Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
authorDavid S. Miller <davem@davemloft.net>
Wed, 27 Dec 2017 15:58:23 +0000 (10:58 -0500)
committerDavid S. Miller <davem@davemloft.net>
Wed, 27 Dec 2017 15:58:23 +0000 (10:58 -0500)
Steffen Klassert says:

====================
pull request (net): ipsec 2017-12-22

1) Check for valid id proto in validate_tmpl(), otherwise
   we may trigger a warning in xfrm_state_fini().
   From Cong Wang.

2) Fix a typo on XFRMA_OUTPUT_MARK policy attribute.
   From Michal Kubecek.

3) Verify the state is valid when encap_type < 0,
   otherwise we may crash on IPsec GRO .
   From Aviv Heller.

4) Fix stack-out-of-bounds read on socket policy lookup.
   We access the flowi of the wrong address family in the
   IPv4 mapped IPv6 case, fix this by catching address
   family missmatches before we do the lookup.

5) fix xfrm_do_migrate() with AEAD to copy the geniv
   field too. Otherwise the state is not fully initialized
   and migration fails. From Antony Antony.

6) Fix stack-out-of-bounds with misconfigured transport
   mode policies. Our policy template validation is not
   strict enough. It is possible to configure policies
   with transport mode template where the address family
   of the template does not match the selectors address
   family. Fix this by refusing such a configuration,
   address family can not change on transport mode.

7) Fix a policy reference leak when reusing pcpu xdst
   entry. From Florian Westphal.

8) Reinject transport-mode packets through tasklet,
   otherwise it is possible to reate a recursion
   loop. From Herbert Xu.

Please pull or let me know if there are problems.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
2349 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/silicon-errata.txt
Documentation/cgroup-v2.txt
Documentation/core-api/genericirq.rst
Documentation/core-api/local_ops.rst
Documentation/devicetree/bindings/arm/ccn.txt
Documentation/devicetree/bindings/arm/omap/crossbar.txt
Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
Documentation/devicetree/bindings/clock/axi-clkgen.txt
Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt
Documentation/devicetree/bindings/clock/exynos4-clock.txt
Documentation/devicetree/bindings/clock/exynos5250-clock.txt
Documentation/devicetree/bindings/clock/exynos5410-clock.txt
Documentation/devicetree/bindings/clock/exynos5420-clock.txt
Documentation/devicetree/bindings/clock/exynos5440-clock.txt
Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt
Documentation/devicetree/bindings/clock/zx296702-clk.txt
Documentation/devicetree/bindings/crypto/fsl-sec4.txt
Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt
Documentation/devicetree/bindings/display/atmel,lcdc.txt
Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
Documentation/devicetree/bindings/dma/zxdma.txt
Documentation/devicetree/bindings/eeprom/at25.txt
Documentation/devicetree/bindings/gpio/gpio-altera.txt
Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
Documentation/devicetree/bindings/hwmon/jc42.txt
Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
Documentation/devicetree/bindings/iio/pressure/hp03.txt
Documentation/devicetree/bindings/input/touchscreen/bu21013.txt
Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
Documentation/devicetree/bindings/interrupt-controller/img,meta-intc.txt
Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt
Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
Documentation/devicetree/bindings/media/exynos5-gsc.txt
Documentation/devicetree/bindings/media/mediatek-vcodec.txt
Documentation/devicetree/bindings/media/rcar_vin.txt
Documentation/devicetree/bindings/media/samsung-fimc.txt
Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
Documentation/devicetree/bindings/media/video-interfaces.txt
Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
Documentation/devicetree/bindings/mfd/ti-keystone-devctrl.txt
Documentation/devicetree/bindings/misc/brcm,kona-smc.txt
Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt
Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
Documentation/devicetree/bindings/mtd/gpmc-nor.txt
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
Documentation/devicetree/bindings/mtd/mtk-nand.txt
Documentation/devicetree/bindings/net/altera_tse.txt
Documentation/devicetree/bindings/net/mdio.txt
Documentation/devicetree/bindings/net/socfpga-dwmac.txt
Documentation/devicetree/bindings/nios2/nios2.txt
Documentation/devicetree/bindings/pci/altera-pcie.txt
Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
Documentation/devicetree/bindings/regulator/regulator.txt
Documentation/devicetree/bindings/serial/efm32-uart.txt
Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt
Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
Documentation/devicetree/bindings/sound/ak4613.txt
Documentation/devicetree/bindings/sound/ak4642.txt
Documentation/devicetree/bindings/sound/max98371.txt
Documentation/devicetree/bindings/sound/max9867.txt
Documentation/devicetree/bindings/sound/renesas,fsi.txt
Documentation/devicetree/bindings/sound/rockchip-spdif.txt
Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
Documentation/devicetree/bindings/spi/efm32-spi.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/devicetree/bindings/thermal/thermal.txt
Documentation/devicetree/bindings/ufs/ufs-qcom.txt
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
Documentation/devicetree/bindings/usb/am33xx-usb.txt
Documentation/devicetree/bindings/usb/ehci-st.txt
Documentation/devicetree/bindings/usb/ohci-st.txt
Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
Documentation/driver-api/dmaengine/client.rst
Documentation/driver-api/pci.rst
Documentation/filesystems/overlayfs.txt
Documentation/ia64/xen.txt
Documentation/locking/crossrelease.txt [deleted file]
Documentation/media/dvb-drivers/frontends.rst [new file with mode: 0644]
Documentation/media/dvb-drivers/index.rst
Documentation/printk-formats.txt
Documentation/scheduler/sched-deadline.txt
Documentation/scsi/scsi_mid_low_api.txt
Documentation/sysctl/vm.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/devices/arm-vgic-its.txt
Documentation/vm/zswap.txt
Documentation/x86/protection-keys.txt
MAINTAINERS
Makefile
arch/alpha/include/uapi/asm/Kbuild
arch/alpha/kernel/srmcons.c
arch/arc/Kconfig
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/include/asm/arcregs.h
arch/arc/include/uapi/asm/Kbuild
arch/arc/kernel/perf_event.c
arch/arc/kernel/setup.c
arch/arc/mm/tlb.c
arch/arc/plat-axs10x/Kconfig
arch/arc/plat-axs10x/axs10x.c
arch/arm/Kconfig.debug
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-cm-t43.dts
arch/arm/boot/dts/armada-385-db-ap.dts
arch/arm/boot/dts/armada-385-linksys.dtsi
arch/arm/boot/dts/armada-385-synology-ds116.dts
arch/arm/boot/dts/armada-388-gp.dts
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/bcm958623hr.dts
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/dm814x.dtsi
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/meson.dtsi
arch/arm/boot/dts/nspire.dtsi
arch/arm/boot/dts/omap3-beagle-xm.dts
arch/arm/boot/dts/omap3-beagle.dts
arch/arm/boot/dts/omap3-cm-t3x.dtsi
arch/arm/boot/dts/omap3-evm-common.dtsi
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/omap3-igep0020-common.dtsi
arch/arm/boot/dts/omap3-igep0030-common.dtsi
arch/arm/boot/dts/omap3-lilly-a83x.dtsi
arch/arm/boot/dts/omap3-overo-base.dtsi
arch/arm/boot/dts/omap3-pandora-common.dtsi
arch/arm/boot/dts/omap3-tao3530.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/omap4-duovero.dtsi
arch/arm/boot/dts/omap4-panda-common.dtsi
arch/arm/boot/dts/omap4-var-som-om44.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/r8a7792.dtsi
arch/arm/boot/dts/r8a7793.dtsi
arch/arm/boot/dts/r8a7794.dtsi
arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
arch/arm/include/asm/assembler.h
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/include/uapi/asm/Kbuild
arch/arm/kernel/entry-header.S
arch/arm/kvm/Kconfig
arch/arm/kvm/Makefile
arch/arm/lib/csumpartialcopyuser.S
arch/arm/mach-iop32x/n2100.c
arch/arm/mach-ixp4xx/dsmg600-setup.c
arch/arm/mach-ixp4xx/nas100d-setup.c
arch/arm/mach-meson/platsmp.c
arch/arm/mach-omap2/cm_common.c
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/pm.h
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/prcm-common.h
arch/arm/mach-omap2/prm33xx.c
arch/arm/mach-omap2/sleep34xx.S
arch/arm/mach-orion5x/db88f5281-setup.c
arch/arm/mach-uniphier/Makefile
arch/arm/mm/dump.c
arch/arm/mm/init.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/Makefile
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/perf_event.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu-reset.S
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ftrace-mod.S [deleted file]
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/module.lds
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/process.c
arch/arm64/kernel/relocate_kernel.S
arch/arm64/kvm/Kconfig
arch/arm64/kvm/Makefile
arch/arm64/kvm/debug.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/hyp/debug-sr.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/mm/context.c
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/pgd.c
arch/blackfin/include/uapi/asm/Kbuild
arch/blackfin/kernel/nmi.c
arch/c6x/include/uapi/asm/Kbuild
arch/cris/include/uapi/asm/Kbuild
arch/frv/include/uapi/asm/Kbuild
arch/h8300/include/uapi/asm/Kbuild
arch/hexagon/include/uapi/asm/Kbuild
arch/ia64/include/uapi/asm/Kbuild
arch/m32r/include/uapi/asm/Kbuild
arch/m68k/amiga/amisound.c
arch/m68k/configs/stmark2_defconfig
arch/m68k/include/uapi/asm/Kbuild
arch/m68k/kernel/vmlinux-nommu.lds
arch/m68k/kernel/vmlinux-std.lds
arch/m68k/kernel/vmlinux-sun3.lds
arch/m68k/mac/macboing.c
arch/metag/include/uapi/asm/Kbuild
arch/microblaze/include/asm/mmu_context_mm.h
arch/microblaze/include/uapi/asm/Kbuild
arch/mips/boot/dts/brcm/Makefile
arch/mips/boot/dts/cavium-octeon/Makefile
arch/mips/boot/dts/img/Makefile
arch/mips/boot/dts/ingenic/Makefile
arch/mips/boot/dts/lantiq/Makefile
arch/mips/boot/dts/mti/Makefile
arch/mips/boot/dts/netlogic/Makefile
arch/mips/boot/dts/ni/Makefile
arch/mips/boot/dts/pic32/Makefile
arch/mips/boot/dts/qca/Makefile
arch/mips/boot/dts/ralink/Makefile
arch/mips/boot/dts/xilfpga/Makefile
arch/mips/include/asm/Kbuild
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/serial.h [new file with mode: 0644]
arch/mips/include/uapi/asm/Kbuild
arch/mips/kvm/mips.c
arch/mips/lasat/picvue_proc.c
arch/mips/mti-malta/malta-display.c
arch/mn10300/include/uapi/asm/Kbuild
arch/nios2/include/uapi/asm/Kbuild
arch/openrisc/include/uapi/asm/Kbuild
arch/parisc/boot/compressed/misc.c
arch/parisc/include/asm/thread_info.h
arch/parisc/include/uapi/asm/Kbuild
arch/parisc/kernel/entry.S
arch/parisc/kernel/hpmc.S
arch/parisc/kernel/pdc_cons.c
arch/parisc/kernel/unwind.c
arch/parisc/lib/delay.c
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/imc-pmu.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/setup.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/fadump.c
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/tau_6xx.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/slice.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/oprofile/op_model_cell.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/imc-pmu.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/powermac/low_i2c.c
arch/powerpc/platforms/powernv/opal-imc.c
arch/powerpc/platforms/powernv/vas.c
arch/powerpc/platforms/ps3/setup.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/xmon/xmon.c
arch/riscv/include/asm/Kbuild
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/atomic.h
arch/riscv/include/asm/barrier.h
arch/riscv/include/asm/bitops.h
arch/riscv/include/asm/bug.h
arch/riscv/include/asm/cacheflush.h
arch/riscv/include/asm/io.h
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/mmu_context.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/spinlock.h
arch/riscv/include/asm/timex.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/include/asm/vdso-syscalls.h [new file with mode: 0644]
arch/riscv/include/asm/vdso.h
arch/riscv/include/uapi/asm/Kbuild
arch/riscv/kernel/head.S
arch/riscv/kernel/riscv_ksyms.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/clock_getres.S [new file with mode: 0644]
arch/riscv/kernel/vdso/clock_gettime.S [new file with mode: 0644]
arch/riscv/kernel/vdso/flush_icache.S [new file with mode: 0644]
arch/riscv/kernel/vdso/getcpu.S [new file with mode: 0644]
arch/riscv/kernel/vdso/gettimeofday.S [new file with mode: 0644]
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/lib/delay.c
arch/riscv/mm/Makefile
arch/riscv/mm/cacheflush.c [new file with mode: 0644]
arch/riscv/mm/ioremap.c
arch/s390/Kbuild
arch/s390/Makefile
arch/s390/appldata/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/appldata/appldata_mem.c
arch/s390/appldata/appldata_net_sum.c
arch/s390/appldata/appldata_os.c
arch/s390/boot/compressed/vmlinux.scr
arch/s390/boot/install.sh
arch/s390/crypto/aes_s390.c
arch/s390/crypto/arch_random.c
arch/s390/crypto/crc32-vx.c
arch/s390/crypto/des_s390.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/paes_s390.c
arch/s390/crypto/prng.c
arch/s390/crypto/sha.h
arch/s390/crypto/sha1_s390.c
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/s390/crypto/sha_common.c
arch/s390/hypfs/Makefile
arch/s390/hypfs/inode.c
arch/s390/include/asm/Kbuild
arch/s390/include/asm/alternative.h
arch/s390/include/asm/ap.h
arch/s390/include/asm/bugs.h
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/kprobes.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/livepatch.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/perf_event.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/ptrace.h
arch/s390/include/asm/segment.h
arch/s390/include/asm/switch_to.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/sysinfo.h
arch/s390/include/asm/topology.h
arch/s390/include/asm/vga.h
arch/s390/include/uapi/asm/Kbuild
arch/s390/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_para.h
arch/s390/include/uapi/asm/kvm_perf.h
arch/s390/include/uapi/asm/perf_regs.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sthyi.h
arch/s390/include/uapi/asm/virtio-ccw.h
arch/s390/include/uapi/asm/vmcp.h
arch/s390/include/uapi/asm/zcrypt.h
arch/s390/kernel/alternative.c
arch/s390/kernel/compat_linux.c
arch/s390/kernel/debug.c
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/lgr.c
arch/s390/kernel/module.c
arch/s390/kernel/nmi.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/perf_regs.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/sthyi.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso32/clock_getres.S
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kernel/vdso64/clock_getres.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kernel/vdso64/gettimeofday.S
arch/s390/kernel/vdso64/note.S
arch/s390/kernel/vtime.c
arch/s390/kvm/Makefile
arch/s390/kvm/diag.c
arch/s390/kvm/gaccess.h
arch/s390/kvm/guestdbg.c
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/irq.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/kvm/sigp.c
arch/s390/kvm/vsie.c
arch/s390/mm/cmm.c
arch/s390/mm/gmap.c
arch/s390/mm/mmap.c
arch/s390/mm/pgalloc.c
arch/s390/mm/pgtable.c
arch/s390/net/Makefile
arch/s390/net/bpf_jit_comp.c
arch/s390/numa/Makefile
arch/s390/pci/Makefile
arch/s390/pci/pci.c
arch/s390/pci/pci_debug.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_insn.c
arch/s390/tools/gen_opcode_table.c
arch/score/include/uapi/asm/Kbuild
arch/sh/drivers/heartbeat.c
arch/sh/drivers/pci/common.c
arch/sh/drivers/push-switch.c
arch/sh/include/uapi/asm/Kbuild
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/uapi/asm/Kbuild
arch/sparc/lib/Makefile
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/net/bpf_jit_comp_64.c
arch/tile/include/asm/pgtable.h
arch/tile/include/uapi/asm/Kbuild
arch/um/include/asm/Kbuild
arch/um/kernel/trap.c
arch/unicore32/include/uapi/asm/Kbuild
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/kaslr.c
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/pgtable_64.c [new file with mode: 0644]
arch/x86/boot/genimage.sh
arch/x86/crypto/salsa20_glue.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/events/intel/core.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/elf.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/hypertransport.h [deleted file]
arch/x86/include/asm/hypervisor.h
arch/x86/include/asm/insn-eval.h
arch/x86/include/asm/io.h
arch/x86/include/asm/irqdomain.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/kmemcheck.h [deleted file]
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/segment.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/suspend_32.h
arch/x86/include/asm/suspend_64.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/unwind.h
arch/x86/include/uapi/asm/Kbuild
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/Makefile
arch/x86/kernel/apic/htirq.c [deleted file]
arch/x86/kernel/apic/vector.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/doublefault.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/ioport.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/sys_x86_64.c
arch/x86/kernel/traps.c
arch/x86/kernel/umip.c
arch/x86/kernel/unwind_orc.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/cpuid.h
arch/x86/kvm/emulate.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/lib/insn-eval.c
arch/x86/lib/x86-opcode-map.txt
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/hugetlbpage.c
arch/x86/mm/ioremap.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/kmemcheck/error.c [deleted file]
arch/x86/mm/kmemcheck/error.h [deleted file]
arch/x86/mm/kmemcheck/opcode.c [deleted file]
arch/x86/mm/kmemcheck/opcode.h [deleted file]
arch/x86/mm/kmemcheck/pte.c [deleted file]
arch/x86/mm/kmemcheck/pte.h [deleted file]
arch/x86/mm/kmemcheck/selftest.c [deleted file]
arch/x86/mm/kmemcheck/selftest.h [deleted file]
arch/x86/mm/kmemcheck/shadow.h [deleted file]
arch/x86/mm/kmmio.c
arch/x86/mm/mmap.c
arch/x86/pci/broadcom_bus.c
arch/x86/pci/fixup.c
arch/x86/platform/uv/uv_nmi.c
arch/x86/power/cpu.c
arch/x86/xen/apic.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/xen-asm_64.S
arch/xtensa/include/uapi/asm/Kbuild
block/bio.c
block/blk-core.c
block/blk-map.c
block/blk-stat.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk-wbt.c
block/bounce.c
block/genhd.c
block/kyber-iosched.c
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/pkcs7_parser.c
crypto/asymmetric_keys/pkcs7_trust.c
crypto/asymmetric_keys/pkcs7_verify.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/asymmetric_keys/x509_public_key.c
crypto/hmac.c
crypto/rsa_helper.c
crypto/salsa20_generic.c
crypto/shash.c
crypto/skcipher.c
drivers/Makefile
drivers/acpi/apei/erst.c
drivers/acpi/cppc_acpi.c
drivers/acpi/device_pm.c
drivers/acpi/device_sysfs.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/acpi/scan.c
drivers/android/binder.c
drivers/ata/ahci_mtk.c
drivers/ata/ahci_qoriq.c
drivers/ata/libata-core.c
drivers/ata/pata_pdc2027x.c
drivers/atm/ambassador.c
drivers/atm/firestream.c
drivers/atm/horizon.c
drivers/atm/idt77105.c
drivers/atm/idt77252.c
drivers/atm/iphase.c
drivers/atm/lanai.c
drivers/atm/nicstar.c
drivers/auxdisplay/Kconfig
drivers/base/Kconfig
drivers/base/isa.c
drivers/base/power/main.c
drivers/base/power/runtime.c
drivers/base/power/wakeup.c
drivers/block/DAC960.c
drivers/block/DAC960.h
drivers/block/aoe/aoecmd.c
drivers/block/ataflop.c
drivers/block/null_blk.c
drivers/block/rsxx/cregs.c
drivers/block/rsxx/dma.c
drivers/block/skd_main.c
drivers/block/sunvdc.c
drivers/block/swim3.c
drivers/block/umem.c
drivers/block/xsysace.c
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/char/dtlk.c
drivers/char/hangcheck-timer.c
drivers/char/ipmi/bt-bmc.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_si_parisc.c
drivers/char/ipmi/ipmi_si_pci.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/mem.c
drivers/char/nwbutton.c
drivers/char/nwbutton.h
drivers/char/rtc.c
drivers/char/tpm/tpm-dev-common.c
drivers/clocksource/timer-of.c
drivers/clocksource/timer-of.h
drivers/cpufreq/Kconfig
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/mediatek-cpufreq.c
drivers/dax/device.c
drivers/dma/at_hdmac.c
drivers/dma/dma-jz4740.c
drivers/dma/dmatest.c
drivers/dma/fsl-edma.c
drivers/dma/ioat/init.c
drivers/firmware/arm_scpi.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/runtime-map.c
drivers/firmware/google/vpd.c
drivers/firmware/psci_checker.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpio/gpio-74x164.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-pca953x.c
drivers/gpu/drm/amd/acp/Makefile
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdkfd/Makefile
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/display/Makefile
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/Makefile
drivers/gpu/drm/amd/display/dc/basics/Makefile
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
drivers/gpu/drm/amd/display/dc/bios/Makefile
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_debug.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dce/Makefile
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce100/Makefile
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
drivers/gpu/drm/amd/display/dc/dce110/Makefile
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
drivers/gpu/drm/amd/display/dc/dce112/Makefile
drivers/gpu/drm/amd/display/dc/dce120/Makefile
drivers/gpu/drm/amd/display/dc/dce80/Makefile
drivers/gpu/drm/amd/display/dc/dcn10/Makefile
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/gpio/Makefile
drivers/gpu/drm/amd/display/dc/i2caux/Makefile
drivers/gpu/drm/amd/display/dc/inc/core_status.h
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/amd/display/dc/irq/Makefile
drivers/gpu/drm/amd/display/dc/virtual/Makefile
drivers/gpu/drm/amd/display/modules/freesync/Makefile
drivers/gpu/drm/amd/lib/Makefile
drivers/gpu/drm/amd/powerplay/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
drivers/gpu/drm/amd/powerplay/inc/smu72.h
drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
drivers/gpu/drm/amd/powerplay/smumgr/Makefile
drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/bridge/adv7511/adv7511.h
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/lvds-encoder.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_mode_config.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gemfs.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/selftests/lib_sw_fence.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/omapdrm/displays/Kconfig
drivers/gpu/drm/omapdrm/dss/dpi.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/rockchip/rockchip_drm_psr.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_irq.c
drivers/gpu/drm/vgem/vgem_fence.c
drivers/gpu/drm/via/via_dmablit.c
drivers/hid/hid-appleir.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-wiimote-core.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hwmon/jc42.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwtracing/stm/ftrace.c
drivers/i2c/busses/i2c-cht-wc.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-stm32.h
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/i2c-boardinfo.c
drivers/iio/adc/cpcap-adc.c
drivers/iio/adc/meson_saradc.c
drivers/iio/common/ssp_sensors/ssp_dev.c
drivers/iio/health/max30102.c
drivers/iio/industrialio-core.c
drivers/iio/proximity/sx9500.c
drivers/infiniband/Kconfig
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/security.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_hem.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_d.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/input/gameport/gameport.c
drivers/input/input.c
drivers/input/joystick/db9.c
drivers/input/joystick/gamecon.c
drivers/input/joystick/turbografx.c
drivers/input/touchscreen/s3c2410_ts.c
drivers/iommu/intel-iommu.c
drivers/iommu/iova.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic-v4.c
drivers/irqchip/irq-imgpdc.c
drivers/irqchip/irq-s3c24xx.c
drivers/irqchip/irq-sni-exiu.c
drivers/irqchip/qcom-irq-combiner.c
drivers/isdn/capi/capidrv.c
drivers/isdn/divert/isdn_divert.c
drivers/isdn/hardware/eicon/divasi.c
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/i4l/isdn_tty.c
drivers/lightnvm/pblk-core.c
drivers/lightnvm/pblk-gc.c
drivers/lightnvm/pblk-init.c
drivers/lightnvm/pblk-rl.c
drivers/lightnvm/pblk.h
drivers/lightnvm/rrpc.c
drivers/md/bcache/alloc.c
drivers/md/bcache/btree.c
drivers/md/bcache/extents.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-snap.c
drivers/md/dm-table.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/media/common/saa7146/saa7146_vbi.c
drivers/media/common/siano/smscoreapi.c
drivers/media/dvb-core/dvb_ca_en50221.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-core/dvb_net.c
drivers/media/dvb-frontends/af9013.h
drivers/media/dvb-frontends/ascot2e.h
drivers/media/dvb-frontends/cxd2820r.h
drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
drivers/media/dvb-frontends/drx39xyj/drx_driver.h
drivers/media/dvb-frontends/drx39xyj/drxj.c
drivers/media/dvb-frontends/drx39xyj/drxj.h
drivers/media/dvb-frontends/drxk.h
drivers/media/dvb-frontends/drxk_hard.c
drivers/media/dvb-frontends/dvb-pll.h
drivers/media/dvb-frontends/helene.h
drivers/media/dvb-frontends/horus3a.h
drivers/media/dvb-frontends/ix2505v.c
drivers/media/dvb-frontends/ix2505v.h
drivers/media/dvb-frontends/l64781.c
drivers/media/dvb-frontends/m88ds3103.h
drivers/media/dvb-frontends/mb86a20s.h
drivers/media/dvb-frontends/mn88472.h
drivers/media/dvb-frontends/rtl2830.h
drivers/media/dvb-frontends/rtl2832.h
drivers/media/dvb-frontends/rtl2832_sdr.h
drivers/media/dvb-frontends/sp887x.c
drivers/media/dvb-frontends/stb6000.h
drivers/media/dvb-frontends/stv0299.c
drivers/media/dvb-frontends/tda10071.h
drivers/media/dvb-frontends/tda826x.h
drivers/media/dvb-frontends/tua6100.c
drivers/media/dvb-frontends/tua6100.h
drivers/media/dvb-frontends/zd1301_demod.h
drivers/media/dvb-frontends/zl10036.c
drivers/media/dvb-frontends/zl10036.h
drivers/media/i2c/Kconfig
drivers/media/i2c/et8ek8/Kconfig
drivers/media/i2c/imx274.c
drivers/media/i2c/lm3560.c
drivers/media/i2c/m5mols/m5mols_capture.c
drivers/media/i2c/m5mols/m5mols_controls.c
drivers/media/i2c/m5mols/m5mols_core.c
drivers/media/i2c/ov5647.c
drivers/media/i2c/s5k6a3.c
drivers/media/i2c/s5k6aa.c
drivers/media/i2c/tvp514x.c
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
drivers/media/pci/solo6x10/solo6x10-enc.c
drivers/media/pci/sta2x11/sta2x11_vip.c
drivers/media/pci/tw68/tw68-risc.c
drivers/media/platform/davinci/vpif.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/platform/exynos4-is/fimc-capture.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/exynos4-is/mipi-csis.c
drivers/media/platform/fsl-viu.c
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
drivers/media/platform/mtk-vpu/mtk_vpu.c
drivers/media/platform/pxa_camera.c
drivers/media/platform/rcar_fdp1.c
drivers/media/platform/rcar_jpu.c
drivers/media/platform/s3c-camif/camif-core.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/sh_veu.c
drivers/media/platform/soc_camera/soc_scale_crop.c
drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
drivers/media/platform/sti/hva/hva-h264.c
drivers/media/platform/ti-vpe/vpe.c
drivers/media/platform/vim2m.c
drivers/media/platform/vsp1/vsp1_dl.c
drivers/media/radio/radio-si476x.c
drivers/media/radio/radio-wl1273.c
drivers/media/rc/img-ir/img-ir-hw.c
drivers/media/rc/imon.c
drivers/media/rc/ir-jvc-decoder.c
drivers/media/rc/ir-lirc-codec.c
drivers/media/rc/ir-nec-decoder.c
drivers/media/rc/ir-sanyo-decoder.c
drivers/media/rc/ir-sharp-decoder.c
drivers/media/rc/ir-xmp-decoder.c
drivers/media/rc/rc-ir-raw.c
drivers/media/rc/rc-main.c
drivers/media/rc/sir_ir.c
drivers/media/rc/st_rc.c
drivers/media/rc/streamzap.c
drivers/media/tuners/mt2063.c
drivers/media/usb/au0828/au0828-dvb.c
drivers/media/usb/au0828/au0828-video.c
drivers/media/usb/dvb-usb/cinergyT2-fe.c
drivers/media/usb/dvb-usb/dib0700_devices.c
drivers/media/usb/dvb-usb/dibusb-common.c
drivers/media/usb/dvb-usb/friio-fe.c
drivers/media/usb/dvb-usb/friio.c
drivers/media/usb/gspca/ov519.c
drivers/media/usb/pwc/pwc-dec23.c
drivers/media/usb/siano/smsusb.c
drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
drivers/media/usb/usbtv/usbtv-core.c
drivers/media/v4l2-core/tuner-core.c
drivers/media/v4l2-core/v4l2-async.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/media/v4l2-core/v4l2-fwnode.c
drivers/media/v4l2-core/v4l2-mem2mem.c
drivers/media/v4l2-core/videobuf-core.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-memops.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/memstick/core/ms_block.c
drivers/mfd/cros_ec_spi.c
drivers/mfd/rtsx_usb.c
drivers/mfd/twl4030-audio.c
drivers/mfd/twl6040.c
drivers/misc/cxl/pci.c
drivers/misc/eeprom/at24.c
drivers/misc/pti.c
drivers/mmc/core/block.c
drivers/mmc/core/bus.c
drivers/mmc/core/card.h
drivers/mmc/core/debugfs.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/core/quirks.h
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdsuper.c
drivers/mtd/nand/brcmnand/brcmnand.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/sm_ftl.c
drivers/net/caif/caif_hsi.c
drivers/net/can/flexcan.c
drivers/net/can/peak_canfd/peak_canfd.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/mcba_usb.c
drivers/net/can/usb/usb_8dev.c
drivers/net/cris/eth_v10.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/phy.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/eql.c
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/aquantia/atlantic/ver.h
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/arc/emac_rockchip.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_clsf.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/rl.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qualcomm/emac/emac-phy.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/Kconfig
drivers/net/hamradio/scc.c
drivers/net/hippi/rrunner.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/phy/at803x.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-xgene.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/meson-gxl.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/slip/slip.c
drivers/net/tap.c
drivers/net/tun.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/hdlc_ppp.c
drivers/net/wireless/atmel/at76c50x-usb.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/dvm/main.c
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/hostap/hostap_ap.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/intersil/orinoco/orinoco_usb.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/quantenna/qtnfmac/core.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/nfc/nfcmrvl/fw_dnld.c
drivers/nfc/pn533/pn533.c
drivers/nfc/st-nci/ndlc.c
drivers/nfc/st-nci/se.c
drivers/nfc/st21nfca/se.c
drivers/ntb/test/ntb_pingpong.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/of/dynamic.c
drivers/of/of_mdio.c
drivers/of/overlay.c
drivers/of/unittest.c
drivers/parisc/lba_pci.c
drivers/pci/Kconfig
drivers/pci/Makefile
drivers/pci/host/pcie-rcar.c
drivers/pci/htirq.c [deleted file]
drivers/pci/pci-driver.c
drivers/pinctrl/intel/pinctrl-denverton.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-gemini.c
drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
drivers/platform/x86/asus-wireless.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/dell-wmi.c
drivers/platform/x86/sony-laptop.c
drivers/pps/clients/pps-ktimer.c
drivers/rtc/rtc-dev.c
drivers/s390/Makefile
drivers/s390/block/Kconfig
drivers/s390/block/dasd.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/xpram.c
drivers/s390/char/Kconfig
drivers/s390/char/defkeymap.map
drivers/s390/char/fs3270.c
drivers/s390/char/hmcdrv_mod.c
drivers/s390/char/monreader.c
drivers/s390/char/monwriter.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp_async.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/tape_class.c
drivers/s390/char/tape_core.c
drivers/s390/char/tty3270.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/vmur.c
drivers/s390/char/zcore.c
drivers/s390/cio/blacklist.h
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/chp.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/cmf.c
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/isc.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/cio/scm.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_api.h
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_cca_key.h
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2a.h
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_error.h
drivers/s390/crypto/zcrypt_msgtype50.c
drivers/s390/crypto/zcrypt_msgtype50.h
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/s390/crypto/zcrypt_msgtype6.h
drivers/s390/crypto/zcrypt_pcixcc.c
drivers/s390/crypto/zcrypt_pcixcc.h
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/Kconfig
drivers/s390/net/ctcm_main.c
drivers/s390/net/fsm.c
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/s390/net/smsgiucv.c
drivers/s390/net/smsgiucv_app.c
drivers/s390/scsi/Makefile
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/virtio/Makefile
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/rx.c
drivers/scsi/aacraid/src.c
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_tmf.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/arm/fas216.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/ipr.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pmcraid.c
drivers/scsi/scsi_debugfs.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/sd.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/amlogic/meson-gx-socinfo.c
drivers/spi/spi-armada-3700.c
drivers/spi/spi-atmel.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sun4i.c
drivers/spi/spi-xilinx.c
drivers/staging/ccree/ssi_hash.c
drivers/staging/comedi/drivers/ni_atmio.c
drivers/staging/greybus/operation.c
drivers/staging/irda/include/net/irda/timer.h
drivers/staging/lustre/lnet/lnet/lib-socket.c
drivers/staging/lustre/lnet/lnet/net_fault.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/lustre/lustre/ptlrpc/service.c
drivers/staging/media/atomisp/include/linux/atomisp.h
drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
drivers/staging/media/imx/imx-ic-prpencvf.c
drivers/staging/media/imx/imx-media-csi.c
drivers/staging/most/hdm-usb/hdm_usb.c
drivers/staging/octeon-usb/octeon-hcd.c
drivers/staging/pi433/rf69.c
drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
drivers/staging/rtl8712/recv_linux.c
drivers/staging/rtl8712/rtl8712_led.c
drivers/staging/speakup/main.c
drivers/staging/speakup/synth.c
drivers/staging/unisys/visorbus/visorbus_main.c
drivers/staging/unisys/visornic/visornic_main.c
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
drivers/target/iscsi/cxgbit/cxgbit.h
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
drivers/target/iscsi/cxgbit/cxgbit_main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_alua.c
drivers/target/target_core_alua.h
drivers/target/target_core_configfs.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_file.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tee/optee/core.c
drivers/tty/cyclades.c
drivers/tty/ipwireless/hardware.c
drivers/tty/isicom.c
drivers/tty/moxa.c
drivers/tty/n_gsm.c
drivers/tty/n_r3964.c
drivers/tty/rocket.c
drivers/tty/serdev/serdev-ttyport.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/crisv10.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/ifx6x60.c
drivers/tty/serial/imx.c
drivers/tty/serial/kgdb_nmi.c
drivers/tty/serial/max3100.c
drivers/tty/serial/mux.c
drivers/tty/serial/pnx8xxx_uart.c
drivers/tty/serial/sa1100.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sn_console.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/tty/vt/keyboard.c
drivers/tty/vt/vt.c
drivers/usb/atm/cxacru.c
drivers/usb/atm/speedtch.c
drivers/usb/atm/usbatm.c
drivers/usb/common/ulpi.c
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/legacy/Kconfig
drivers/usb/gadget/udc/at91_udc.c
drivers/usb/gadget/udc/bdc/bdc_core.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/m66592-udc.c
drivers/usb/gadget/udc/omap_udc.c
drivers/usb/gadget/udc/pxa25x_udc.c
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/ehci-dbg.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/oxu210hp-hcd.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/sl811-hcd.c
drivers/usb/host/uhci-hcd.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/musb/da8xx.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/usb_debug.c
drivers/usb/storage/realtek_cr.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/Kconfig
drivers/usb/typec/ucsi/Kconfig
drivers/usb/usbip/stub_rx.c
drivers/usb/usbip/stub_tx.c
drivers/usb/usbip/usbip_common.h
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_sysfs.c
drivers/uwb/drp.c
drivers/uwb/neh.c
drivers/uwb/rsv.c
drivers/uwb/uwb-internal.h
drivers/vhost/net.c
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/watchdog/alim7101_wdt.c
drivers/watchdog/at91sam9_wdt.c
drivers/watchdog/bcm47xx_wdt.c
drivers/watchdog/bcm63xx_wdt.c
drivers/watchdog/cpu5wdt.c
drivers/watchdog/machzwd.c
drivers/watchdog/mixcomwd.c
drivers/watchdog/mpc8xxx_wdt.c
drivers/watchdog/mtx-1_wdt.c
drivers/watchdog/nuc900_wdt.c
drivers/watchdog/pcwd.c
drivers/watchdog/pika_wdt.c
drivers/watchdog/rdc321x_wdt.c
drivers/watchdog/sbc60xxwdt.c
drivers/watchdog/sc520_wdt.c
drivers/watchdog/shwdt.c
drivers/watchdog/via_wdt.c
drivers/watchdog/w83877f_wdt.c
drivers/xen/Kconfig
drivers/xen/grant-table.c
drivers/xen/pvcalls-front.c
firmware/Makefile
fs/9p/vfs_super.c
fs/adfs/super.c
fs/affs/amigaffs.c
fs/affs/bitmap.c
fs/affs/super.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/flock.c
fs/afs/internal.h
fs/afs/rotate.c
fs/afs/security.c
fs/afs/server_list.c
fs/afs/super.c
fs/afs/write.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/befs/ChangeLog
fs/befs/linuxvfs.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-checker.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/xattr.c
fs/coda/inode.c
fs/cramfs/Kconfig
fs/cramfs/inode.c
fs/ecryptfs/main.c
fs/efs/super.c
fs/exec.c
fs/ext2/balloc.c
fs/ext2/ialloc.c
fs/ext2/super.c
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/f2fs.h
fs/f2fs/gc.c
fs/f2fs/recovery.c
fs/f2fs/super.c
fs/fat/fatent.c
fs/fat/inode.c
fs/fat/misc.c
fs/fat/namei_msdos.c
fs/freevxfs/vxfs_super.c
fs/fs-writeback.c
fs/fuse/inode.c
fs/gfs2/ops_fstype.c
fs/gfs2/super.c
fs/gfs2/trans.c
fs/hfs/mdb.c
fs/hfs/super.c
fs/hfsplus/super.c
fs/hpfs/dir.c
fs/hpfs/dnode.c
fs/hpfs/map.c
fs/hpfs/super.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/isofs/inode.c
fs/jffs2/fs.c
fs/jffs2/os-linux.h
fs/jffs2/super.c
fs/jfs/super.c
fs/kernfs/mount.c
fs/libfs.c
fs/lockd/host.c
fs/lockd/mon.c
fs/lockd/svc.c
fs/lockd/svcsubs.c
fs/locks.c
fs/mbcache.c
fs/minix/inode.c
fs/namei.c
fs/namespace.c
fs/ncpfs/inode.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4client.c
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfs_common/grace.c
fs/nfsd/auth.c
fs/nfsd/export.c
fs/nfsd/netns.h
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfssvc.c
fs/nilfs2/segment.c
fs/nilfs2/super.c
fs/nilfs2/the_nilfs.c
fs/notify/fsnotify.c
fs/nsfs.c
fs/ntfs/super.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/file.c
fs/ocfs2/super.c
fs/ocfs2/xattr.c
fs/openpromfs/inode.c
fs/orangefs/super.c
fs/overlayfs/Kconfig
fs/overlayfs/dir.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/root.c
fs/proc_namespace.c
fs/pstore/platform.c
fs/qnx4/inode.c
fs/qnx6/inode.c
fs/quota/dquot.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/prints.c
fs/reiserfs/super.c
fs/reiserfs/xattr.c
fs/romfs/super.c
fs/squashfs/super.c
fs/statfs.c
fs/super.c
fs/sysfs/mount.c
fs/sysv/inode.c
fs/sysv/super.c
fs/ubifs/file.c
fs/ubifs/io.c
fs/ubifs/super.c
fs/ubifs/ubifs.h
fs/udf/super.c
fs/ufs/balloc.c
fs/ufs/ialloc.c
fs/ufs/super.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_ialloc.h
fs/xfs/scrub/inode.c
fs/xfs/scrub/quota.c
fs/xfs/scrub/scrub.c
fs/xfs/scrub/trace.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap_item.c
fs/xfs/xfs_bmap_item.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_dquot_item.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_iomap.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_refcount_item.h
fs/xfs/xfs_reflink.c
fs/xfs/xfs_super.c
fs/xfs/xfs_super.h
fs/xfs/xfs_symlink.c
fs/xfs/xfs_trace.c
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/asm-generic/pgtable.h
include/crypto/if_alg.h
include/crypto/internal/hash.h
include/drm/drm_connector.h
include/drm/drm_edid.h
include/drm/drm_mode_config.h
include/drm/ttm/ttm_page_alloc.h
include/kvm/arm_arch_timer.h
include/kvm/arm_vgic.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf_verifier.h
include/linux/compiler.h
include/linux/completion.h
include/linux/cred.h
include/linux/debugfs.h
include/linux/dma-mapping.h
include/linux/fs.h
include/linux/htirq.h [deleted file]
include/linux/hugetlb.h
include/linux/hyperv.h
include/linux/idr.h
include/linux/iio/timer/stm32-lptim-trigger.h
include/linux/intel-pti.h [moved from include/linux/pti.h with 94% similarity]
include/linux/ipv6.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v4.h
include/linux/irqdesc.h
include/linux/kallsyms.h
include/linux/kmemcheck.h [deleted file]
include/linux/kthread.h
include/linux/kvm_host.h
include/linux/libgcc.h [moved from include/lib/libgcc.h with 100% similarity]
include/linux/lockdep.h
include/linux/mfd/rtsx_pci.h
include/linux/migrate.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/oom.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pm.h
include/linux/ptr_ring.h
include/linux/rbtree.h
include/linux/rculist_nulls.h
include/linux/rwlock_types.h
include/linux/sched.h
include/linux/sched/coredump.h
include/linux/serdev.h
include/linux/skbuff.h
include/linux/spi/spi.h
include/linux/spinlock.h
include/linux/spinlock_types.h
include/linux/string.h
include/linux/sunrpc/cache.h
include/linux/sysfs.h
include/linux/tcp.h
include/linux/timekeeper_internal.h
include/linux/timekeeping.h
include/linux/timer.h
include/linux/trace.h
include/linux/usb/usbnet.h
include/linux/workqueue.h
include/linux/writeback.h
include/net/cfg80211.h
include/net/gue.h
include/net/ip.h
include/net/pkt_cls.h
include/net/red.h
include/net/sch_generic.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tc_act/tc_sample.h
include/net/tcp.h
include/scsi/libsas.h
include/target/target_core_base.h
include/trace/events/kvm.h
include/trace/events/preemptirq.h
include/trace/events/sched.h
include/trace/events/tcp.h
include/trace/events/xdp.h
include/uapi/asm-generic/bpf_perf_event.h [new file with mode: 0644]
include/uapi/linux/bcache.h
include/uapi/linux/bfs_fs.h
include/uapi/linux/bpf_perf_event.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/kvm.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/usb/ch9.h
init/main.c
ipc/mqueue.c
kernel/bpf/core.c
kernel/bpf/hashtab.c
kernel/bpf/offload.c
kernel/bpf/verifier.c
kernel/cgroup/debug.c
kernel/cgroup/stat.c
kernel/cpu.c
kernel/debug/kdb/kdb_io.c
kernel/events/core.c
kernel/exit.c
kernel/futex.c
kernel/groups.c
kernel/irq/manage.c
kernel/irq/matrix.c
kernel/irq/spurious.c
kernel/jump_label.c
kernel/kallsyms.c
kernel/kcov.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/spinlock.c
kernel/module.c
kernel/padata.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/wait.c
kernel/time/Kconfig
kernel/time/clocksource.c
kernel/time/posix-timers.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/time/timer_list.c
kernel/trace/Kconfig
kernel/trace/blktrace.c
kernel/trace/bpf_trace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_stack.c
kernel/uid16.c
kernel/workqueue.c
lib/Kconfig.debug
lib/ashldi3.c
lib/ashrdi3.c
lib/asn1_decoder.c
lib/cmpdi2.c
lib/lshrdi3.c
lib/muldi3.c
lib/nlattr.c
lib/oid_registry.c
lib/random32.c
lib/rbtree.c
lib/test_bpf.c
lib/test_printf.c
lib/ucmpdi2.c
lib/vsprintf.c
mm/backing-dev.c
mm/early_ioremap.c
mm/frame_vector.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/report.c
mm/kmemcheck.c [deleted file]
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/shmem.c
mm/slab.c
net/802/garp.c
net/802/mrp.c
net/9p/trans_fd.c
net/appletalk/aarp.c
net/appletalk/ddp.c
net/atm/lec.c
net/atm/mpc.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/fragmentation.c
net/batman-adv/tp_meter.c
net/bluetooth/hidp/core.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bridge/br_netlink.c
net/can/proc.c
net/core/dev.c
net/core/drop_monitor.c
net/core/gen_estimator.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/netprio_cgroup.c
net/core/skbuff.c
net/dccp/minisocks.c
net/dccp/proto.c
net/decnet/dn_route.c
net/decnet/dn_timer.c
net/dsa/slave.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/igmp.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/ipmr.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/raw.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_rate.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_MASQUERADE.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/kcm/kcmsock.c
net/lapb/lapb_timer.c
net/mac80211/ht.c
net/ncsi/ncsi-manage.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_h323_asn1.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_exthdr.c
net/netfilter/x_tables.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_LED.c
net/netfilter/xt_bpf.c
net/netfilter/xt_osf.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/netrom/nr_loopback.c
net/netrom/nr_timer.c
net/nfc/nci/core.c
net/openvswitch/flow.c
net/rds/rdma.c
net/rds/send.c
net/rose/rose_link.c
net/rose/rose_timer.c
net/rxrpc/af_rxrpc.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/sendmsg.c
net/sched/act_meta_mark.c
net/sched/act_meta_skbtcindex.c
net/sched/act_sample.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_choke.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_ingress.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sctp/chunk.c
net/sctp/debug.c
net/sctp/outqueue.c
net/sctp/socket.c
net/sctp/ulpqueue.c
net/socket.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/sunrpc/xprtsock.c
net/tipc/bearer.c
net/tipc/group.c
net/tipc/monitor.c
net/tipc/server.c
net/tipc/socket.c
net/tipc/udp_media.c
net/vmw_vsock/hyperv_transport.c
net/wireless/Makefile
net/wireless/certs/sforshee.hex [new file with mode: 0644]
net/wireless/certs/sforshee.x509 [deleted file]
net/wireless/lib80211.c
net/wireless/nl80211.c
net/x25/af_x25.c
net/x25/x25_link.c
net/x25/x25_timer.c
net/xfrm/xfrm_state.c
samples/bpf/Makefile
samples/bpf/bpf_load.c
samples/hidraw/Makefile
samples/seccomp/Makefile
samples/sockmap/Makefile
samples/statx/Makefile
samples/uhid/Makefile
scripts/Makefile.build
scripts/Makefile.lib
scripts/bloat-o-meter
scripts/checkpatch.pl
scripts/coccicheck
scripts/coccinelle/api/setup_timer.cocci [deleted file]
scripts/faddr2line
scripts/kconfig/symbol.c
scripts/kernel-doc
scripts/package/Makefile
security/apparmor/apparmorfs.c
security/apparmor/include/audit.h
security/apparmor/include/lib.h
security/keys/gc.c
security/keys/key.c
security/keys/keyctl.c
security/keys/request_key.c
sound/core/pcm.c
sound/core/rawmidi.c
sound/core/seq/seq_timer.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/line6/driver.c
sound/usb/mixer.c
sound/usb/quirks.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm_perf.h
tools/arch/s390/include/uapi/asm/ptrace.h [new file with mode: 0644]
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/bpf/bpftool/Documentation/Makefile
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/main.c
tools/bpf/bpftool/main.h
tools/hv/hv_kvp_daemon.c
tools/include/linux/compiler.h
tools/include/linux/kmemcheck.h [deleted file]
tools/include/linux/lockdep.h
tools/include/uapi/asm-generic/bpf_perf_event.h [new file with mode: 0644]
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf_perf_event.h
tools/include/uapi/linux/kcmp.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/prctl.h
tools/kvm/kvm_stat/kvm_stat
tools/kvm/kvm_stat/kvm_stat.txt
tools/objtool/.gitignore
tools/objtool/Makefile
tools/objtool/arch/x86/Build
tools/objtool/arch/x86/decode.c
tools/objtool/arch/x86/include/asm/inat.h [moved from tools/objtool/arch/x86/insn/inat.h with 95% similarity]
tools/objtool/arch/x86/include/asm/inat_types.h [moved from tools/objtool/arch/x86/insn/inat_types.h with 100% similarity]
tools/objtool/arch/x86/include/asm/insn.h [moved from tools/objtool/arch/x86/insn/insn.h with 99% similarity]
tools/objtool/arch/x86/include/asm/orc_types.h [moved from tools/objtool/orc_types.h with 100% similarity]
tools/objtool/arch/x86/lib/inat.c [moved from tools/objtool/arch/x86/insn/inat.c with 99% similarity]
tools/objtool/arch/x86/lib/insn.c [moved from tools/objtool/arch/x86/insn/insn.c with 99% similarity]
tools/objtool/arch/x86/lib/x86-opcode-map.txt [moved from tools/objtool/arch/x86/insn/x86-opcode-map.txt with 99% similarity]
tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk [moved from tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk with 100% similarity]
tools/objtool/orc.h
tools/objtool/orc_dump.c
tools/objtool/sync-check.sh [new file with mode: 0755]
tools/perf/arch/s390/Makefile
tools/perf/arch/s390/util/dwarf-regs.c
tools/perf/bench/numa.c
tools/perf/builtin-help.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
tools/perf/tests/shell/trace+probe_vfs_getname.sh
tools/perf/tests/task-exit.c
tools/perf/trace/beauty/mmap.c
tools/perf/util/annotate.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/intel-pt-decoder/inat.h
tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
tools/perf/util/machine.c
tools/perf/util/mmap.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/pmu.c
tools/power/cpupower/Makefile
tools/power/cpupower/bench/system.c
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
tools/scripts/Makefile.include
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/test_verifier_log.c
tools/testing/selftests/net/config
tools/testing/selftests/x86/5lvl.c [new file with mode: 0644]
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/mpx-hw.h
tools/testing/selftests/x86/pkey-helpers.h
tools/testing/selftests/x86/protection_keys.c
tools/usb/usbip/libsrc/vhci_driver.c
tools/virtio/ringtest/ptr_ring.c
tools/vm/slabinfo-gnuplot.sh
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/hyp/timer-sr.c
virt/kvm/arm/hyp/vgic-v2-sr.c
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/mmio.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-irqfd.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic-v4.c [new file with mode: 0644]
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h
virt/kvm/kvm_main.c

index b44217290e5776ee616437bf8f1b86be93adac0a..6571fbfdb2a1527c25b3a01e9c4228c84adce639 100644 (file)
                        [KVM,ARM] Trap guest accesses to GICv3 common
                        system registers
 
+       kvm-arm.vgic_v4_enable=
+                       [KVM,ARM] Allow use of GICv4 for direct injection of
+                       LPIs.
+
        kvm-intel.ept=  [KVM,Intel] Disable extended page tables
                        (virtualized MMU) support on capable Intel chips.
                        Default is 1 (enabled)
index 304bf22bb83cc0ec8dfbbcf2a48b206ecb781afb..fc1c884fea10497357f889b11e33c6d323fecf55 100644 (file)
@@ -75,3 +75,4 @@ stable kernels.
 | Qualcomm Tech. | Falkor v1       | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 | Qualcomm Tech. | Falkor v1       | E1009           | QCOM_FALKOR_ERRATUM_1009    |
 | Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
+| Qualcomm Tech. | Falkor v{1,2}   | E1041           | QCOM_FALKOR_ERRATUM_1041    |
index 779211fbb69ffac450f22b0ad6864c7c6c2bd98f..2cddab7efb20df0dcf07c4d7d64a5611138a8d7c 100644 (file)
@@ -898,6 +898,13 @@ controller implements weight and absolute bandwidth limit models for
 normal scheduling policy and absolute bandwidth allocation model for
 realtime scheduling policy.
 
+WARNING: cgroup2 doesn't yet support control of realtime processes and
+the cpu controller can only be enabled when all RT processes are in
+the root cgroup.  Be aware that system management software may already
+have placed RT processes into nonroot cgroups during the system boot
+process, and these processes may need to be moved to the root cgroup
+before the cpu controller can be enabled.
+
 
 CPU Interface Files
 ~~~~~~~~~~~~~~~~~~~
index 0054bd48be849035146239cb6efc0d7dba0c0a62..4da67b65cecfa68e536efe7a6905cc0757df0417 100644 (file)
@@ -225,9 +225,9 @@ interrupts.
 
 The following control flow is implemented (simplified excerpt)::
 
-    :c:func:`desc->irq_data.chip->irq_mask_ack`;
+    desc->irq_data.chip->irq_mask_ack();
     handle_irq_event(desc->action);
-    :c:func:`desc->irq_data.chip->irq_unmask`;
+    desc->irq_data.chip->irq_unmask();
 
 
 Default Fast EOI IRQ flow handler
@@ -239,7 +239,7 @@ which only need an EOI at the end of the handler.
 The following control flow is implemented (simplified excerpt)::
 
     handle_irq_event(desc->action);
-    :c:func:`desc->irq_data.chip->irq_eoi`;
+    desc->irq_data.chip->irq_eoi();
 
 
 Default Edge IRQ flow handler
@@ -251,15 +251,15 @@ interrupts.
 The following control flow is implemented (simplified excerpt)::
 
     if (desc->status & running) {
-        :c:func:`desc->irq_data.chip->irq_mask_ack`;
+        desc->irq_data.chip->irq_mask_ack();
         desc->status |= pending | masked;
         return;
     }
-    :c:func:`desc->irq_data.chip->irq_ack`;
+    desc->irq_data.chip->irq_ack();
     desc->status |= running;
     do {
         if (desc->status & masked)
-            :c:func:`desc->irq_data.chip->irq_unmask`;
+            desc->irq_data.chip->irq_unmask();
         desc->status &= ~pending;
         handle_irq_event(desc->action);
     } while (status & pending);
@@ -293,10 +293,10 @@ simplified version without locking.
 The following control flow is implemented (simplified excerpt)::
 
     if (desc->irq_data.chip->irq_ack)
-        :c:func:`desc->irq_data.chip->irq_ack`;
+        desc->irq_data.chip->irq_ack();
     handle_irq_event(desc->action);
     if (desc->irq_data.chip->irq_eoi)
-            :c:func:`desc->irq_data.chip->irq_eoi`;
+        desc->irq_data.chip->irq_eoi();
 
 
 EOI Edge IRQ flow handler
index 1062ddba62c7608bb96f4211e2a6a0863f8a47c3..2ac3f9f2984531dda8a28ac9daaf3aa3d0ea8985 100644 (file)
@@ -177,18 +177,14 @@ Here is a sample module which implements a basic per cpu counter using
                     printk("Read : CPU %d, count %ld\n", cpu,
                             local_read(&per_cpu(counters, cpu)));
             }
-            del_timer(&test_timer);
-            test_timer.expires = jiffies + 1000;
-            add_timer(&test_timer);
+            mod_timer(&test_timer, jiffies + 1000);
     }
 
     static int __init test_init(void)
     {
             /* initialize the timer that will increment the counter */
-            init_timer(&test_timer);
-            test_timer.function = do_test_timer;
-            test_timer.expires = jiffies + 1;
-            add_timer(&test_timer);
+            timer_setup(&test_timer, do_test_timer, 0);
+            mod_timer(&test_timer, jiffies + 1);
 
             return 0;
     }
index 29801456c9ee9a3701481d8c57cc149df2c312d2..43b5a71a5a9dde70aeebc38510e5a478e1d8d742 100644 (file)
@@ -15,7 +15,7 @@ Required properties:
 
 Example:
 
-       ccn@0x2000000000 {
+       ccn@2000000000 {
                compatible = "arm,ccn-504";
                reg = <0x20 0x00000000 0 0x1000000>;
                interrupts = <0 181 4>;
index bb5727ae004ac287c6725d53ef6a4b90a9e41480..ecb360ed0e332b70a84cb139eba8fd16ca847145 100644 (file)
@@ -49,7 +49,7 @@ An interrupt consumer on an SoC using crossbar will use:
        interrupts = <GIC_SPI request_number interrupt_level>
 
 Example:
-       device_x@0x4a023000 {
+       device_x@4a023000 {
                /* Crossbar 8 used */
                interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                ...
index 866d93421eba20ba81fa66e905ecd58213636d95..f9632bacbd04aebd4ad68c46012777eb0c263204 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
 - interrupts : Should contain MC General interrupt.
 
 Example:
-       memory-controller@0x7000f000 {
+       memory-controller@7000f000 {
                compatible = "nvidia,tegra20-mc";
                reg = <0x7000f000 0x024
                       0x7000f03c 0x3c4>;
index fb40da303d25c8f13093aafe43df83eaf2f88f47..aca94fe9416f009ef387e9f7b7880bc5b6b5127e 100644 (file)
@@ -17,7 +17,7 @@ Optional properties:
 - clock-output-names : From common clock binding.
 
 Example:
-       clock@0xff000000 {
+       clock@ff000000 {
                compatible = "adi,axi-clkgen";
                #clock-cells = <0>;
                reg = <0xff000000 0x1000>;
index 7a837d2182acf74e0104b4da227cc2a4715d7504..4acfc8f641b63c83e6ad9c5e2efb510b43022213 100644 (file)
@@ -23,7 +23,7 @@ Example:
                clocks = <&clk_osc>;
        };
 
-       aux: aux@0x7e215004 {
+       aux: aux@7e215004 {
                compatible = "brcm,bcm2835-aux";
                #clock-cells = <1>;
                reg = <0x7e215000 0x8>;
index bc61c952cb0b7221ccd47c099199d06598daf91e..17bb11365354d6d6126446874d562da2dfe7e45d 100644 (file)
@@ -24,7 +24,7 @@ tree sources.
 
 Example 1: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10030000 {
+       clock: clock-controller@10030000 {
                compatible = "samsung,exynos4210-clock";
                reg = <0x10030000 0x20000>;
                #clock-cells = <1>;
index 536eacd1063f88ccf2e9ef0e0bdcf06137558cf8..aff266a12eeb71bf7cf05a8cbeecbe4390f21ef4 100644 (file)
@@ -22,7 +22,7 @@ tree sources.
 
 Example 1: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5250-clock";
                reg = <0x10010000 0x30000>;
                #clock-cells = <1>;
index 4527de3ea205d176f0425f9512e6bf560a2d3ce3..c68b0d29b3d031636f827ef904a60a6794270275 100644 (file)
@@ -30,7 +30,7 @@ Example 1: An example of a clock controller node is listed below.
                #clock-cells = <0>;
        };
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5410-clock";
                reg = <0x10010000 0x30000>;
                #clock-cells = <1>;
index d54f42cf0440945396e039e3e37053bef04bbc9b..717a7b1531c78278e606c16f545627651dce2ccd 100644 (file)
@@ -23,7 +23,7 @@ tree sources.
 
 Example 1: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5420-clock";
                reg = <0x10010000 0x30000>;
                #clock-cells = <1>;
index 5f7005f73058f74a1fe308814eb11f75d8100435..c7d227c31e95bd944f351dd977682d3337b4a2e5 100644 (file)
@@ -21,7 +21,7 @@ tree sources.
 
 Example: An example of a clock controller node is listed below.
 
-       clock: clock-controller@0x10010000 {
+       clock: clock-controller@10010000 {
                compatible = "samsung,exynos5440-clock";
                reg = <0x160000 0x10000>;
                #clock-cells = <1>;
index 3e6a81e99804435c003560e9f2145a6a5e6e74c9..c35cb6c4af4d9fd7e681de3c9d6a5bdbe629e034 100644 (file)
@@ -14,7 +14,7 @@ Required properties:
 
 Example:
 
-pllctrl: pll-controller@0x02310000 {
+pllctrl: pll-controller@02310000 {
        compatible = "ti,keystone-pllctrl", "syscon";
        reg = <0x02310000 0x200>;
 };
index e85ecb510d56daad83ffbbe3572a96dc110a43a3..5c91c9e4f1beb47f35feb64361f3de23bcc35389 100644 (file)
@@ -20,13 +20,13 @@ ID in its "clocks" phandle cell. See include/dt-bindings/clock/zx296702-clock.h
 for the full list of zx296702 clock IDs.
 
 
-topclk: topcrm@0x09800000 {
+topclk: topcrm@09800000 {
         compatible = "zte,zx296702-topcrm-clk";
         reg = <0x09800000 0x1000>;
         #clock-cells = <1>;
 };
 
-uart0: serial@0x09405000 {
+uart0: serial@09405000 {
         compatible = "zte,zx296702-uart";
         reg = <0x09405000 0x1000>;
         interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
index 7aef0eae58d43cce4a2d94aa6e20fc5c40343ffa..76aec8a3724d62120392be98cd093acd0f4015b5 100644 (file)
@@ -456,7 +456,7 @@ System ON/OFF key driver
       Definition: this is phandle to the register map node.
 
 EXAMPLE:
-       snvs-pwrkey@0x020cc000 {
+       snvs-pwrkey@020cc000 {
                compatible = "fsl,sec-v4.0-pwrkey";
                regmap = <&snvs>;
                interrupts = <0 4 0x4>
@@ -545,7 +545,7 @@ FULL EXAMPLE
                        interrupts = <93 2>;
                };
 
-               snvs-pwrkey@0x020cc000 {
+               snvs-pwrkey@020cc000 {
                        compatible = "fsl,sec-v4.0-pwrkey";
                        regmap = <&sec_mon>;
                        interrupts = <0 4 0x4>;
index 001dd63979a974802fe69b6e72b12fe79c7c1c6b..148191b0fc15864725da6907a0571b64337024d6 100644 (file)
@@ -9,7 +9,7 @@ Required properties:
 - clock-names : the name of clock used by the DFI, must be "pclk_ddr_mon";
 
 Example:
-       dfi: dfi@0xff630000 {
+       dfi: dfi@ff630000 {
                compatible = "rockchip,rk3399-dfi";
                reg = <0x00 0xff630000 0x00 0x4000>;
                rockchip,pmu = <&pmugrf>;
index 1a21202778ee354af7e5fc1f3d9ce65b0a7a964a..acb5a01321279e13d0c0c2780be8018b330152d8 100644 (file)
@@ -27,7 +27,7 @@ Optional properties:
 
 Example:
 
-       fb0: fb@0x00500000 {
+       fb0: fb@00500000 {
                compatible = "atmel,at91sam9g45-lcdc";
                reg = <0x00500000 0x1000>;
                interrupts = <23 3 0>;
@@ -41,7 +41,7 @@ Example:
 
 Example for fixed framebuffer memory:
 
-       fb0: fb@0x00500000 {
+       fb0: fb@00500000 {
                compatible = "atmel,at91sam9263-lcdc";
                reg = <0x00700000 0x1000 0x70000000 0x200000>;
                [...]
index 55492c264d1779539ffb838f3e2f020c07e45709..b3408cc57be6d398f2c206f5d599d7134004dd0b 100644 (file)
@@ -73,7 +73,7 @@ Hypervisor OS configuration:
                max-read-transactions = <31>;
                channel-reset-timeout-cycles = <0x500>;
 
-               hidma_24: dma-controller@0x5c050000 {
+               hidma_24: dma-controller@5c050000 {
                        compatible = "qcom,hidma-1.0";
                        reg = <0 0x5c050000 0x0 0x1000>,
                              <0 0x5c0b0000 0x0 0x1000>;
@@ -85,7 +85,7 @@ Hypervisor OS configuration:
 
 Guest OS configuration:
 
-       hidma_24: dma-controller@0x5c050000 {
+       hidma_24: dma-controller@5c050000 {
                compatible = "qcom,hidma-1.0";
                reg = <0 0x5c050000 0x0 0x1000>,
                      <0 0x5c0b0000 0x0 0x1000>;
index abec59f35fde949baf60210017407e45e8cffce6..0ab80f69e566ef7c66001486a7cd5acea5685df8 100644 (file)
@@ -13,7 +13,7 @@ Required properties:
 Example:
 
 Controller:
-       dma: dma-controller@0x09c00000{
+       dma: dma-controller@09c00000{
                compatible = "zte,zx296702-dma";
                reg = <0x09c00000 0x1000>;
                clocks = <&topclk ZX296702_DMA_ACLK>;
index 1d3447165c374f673aa9a717f94f2387cfd852f1..e823d90b802f7f8f293f9c0ba4f06533361824bd 100644 (file)
@@ -1,7 +1,12 @@
 EEPROMs (SPI) compatible with Atmel at25.
 
 Required properties:
-- compatible : "atmel,at25".
+- compatible : Should be "<vendor>,<type>", and generic value "atmel,at25".
+  Example "<vendor>,<type>" values:
+    "microchip,25lc040"
+    "st,m95m02"
+    "st,m95256"
+
 - reg : chip select number
 - spi-max-frequency : max spi frequency to use
 - pagesize : size of the eeprom page
@@ -13,7 +18,7 @@ Optional properties:
 - spi-cpol : SPI inverse clock polarity, as per spi-bus bindings.
 - read-only : this parameter-less property disables writes to the eeprom
 
-Obsolete legacy properties are can be used in place of "size", "pagesize",
+Obsolete legacy properties can be used in place of "size", "pagesize",
 "address-width", and "read-only":
 - at25,byte-len : total eeprom size in bytes
 - at25,addr-mode : addr-mode flags, as defined in include/linux/spi/eeprom.h
@@ -22,8 +27,8 @@ Obsolete legacy properties are can be used in place of "size", "pagesize",
 Additional compatible properties are also allowed.
 
 Example:
-       at25@0 {
-               compatible = "atmel,at25", "st,m95256";
+       eeprom@0 {
+               compatible = "st,m95256", "atmel,at25";
                reg = <0>
                spi-max-frequency = <5000000>;
                spi-cpha;
index 826a7208ca93a8f5cd31f4a94c96b36ce6c7dab1..146e554b3c6769cfe10238acc1e9b48d9a5e3d28 100644 (file)
@@ -30,7 +30,7 @@ Optional properties:
 
 Example:
 
-gpio_altr: gpio@0xff200000 {
+gpio_altr: gpio@ff200000 {
        compatible = "altr,pio-1.0";
        reg = <0xff200000 0x10>;
        interrupts = <0 45 4>;
index 7f57271df2bc96b41168d337c911511ccb726e33..0d0158728f897bd9fbeaa41e884955445ca1f062 100644 (file)
@@ -27,7 +27,7 @@ Required properties:
        ti,tca6424
        ti,tca9539
        ti,tca9554
-       onsemi,pca9654
+       onnn,pca9654
        exar,xra1202
 
 Optional properties:
index 07a250498fbb4cccb3210a418c572907d8310c43..f569db58f64a100b6c72e1053ce443b77ae102c2 100644 (file)
@@ -34,6 +34,10 @@ Required properties:
 
 - reg: I2C address
 
+Optional properties:
+- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
+                        This is not supported on all chips.
+
 Example:
 
 temp-sensor@1a {
index 231e4cc4008cbcf0783c6ab438fa3eca1815c4fb..d4a082acf92f0f7b98cdd0702c47de3bbed30725 100644 (file)
@@ -18,7 +18,7 @@ Optional properties:
 Example
 
 / {
-       i2c4: i2c4@0x10054000 {
+       i2c4: i2c4@10054000 {
                compatible = "ingenic,jz4780-i2c";
                reg = <0x10054000 0x1000>;
 
index 54e7e70bcea52da324caf4f6c277573cca91f823..831dbee7a5c35abee4e2d64e5c2c30de653fbe7a 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
 
 Example:
 
-hp03@0x77 {
+hp03@77 {
        compatible = "hoperf,hp03";
        reg = <0x77>;
        xclr-gpio = <&portc 0 0x0>;
index ca5a2c86480cff1ec8f68168a9f7c94c1a29ef01..56d835242af2d834f440e3d149011b38e5a7a65f 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 Example:
 
        i2c@80110000 {
-               bu21013_tp@0x5c {
+               bu21013_tp@5c {
                        compatible = "rohm,bu21013_tp";
                        reg = <0x5c>;
                        touch-gpio = <&gpio2 20 0x4>;
index 560d8a727b8f84e951b227d8fcc838579ddc94b1..2f324464864658a80a13b1cdab0395282f280b8e 100644 (file)
@@ -155,7 +155,7 @@ Example:
                      <0x0 0xe112f000 0 0x02000>,
                      <0x0 0xe1140000 0 0x10000>,
                      <0x0 0xe1160000 0 0x10000>;
-               v2m0: v2m@0x8000 {
+               v2m0: v2m@8000 {
                        compatible = "arm,gic-v2m-frame";
                        msi-controller;
                        reg = <0x0 0x80000 0 0x1000>;
@@ -163,7 +163,7 @@ Example:
 
                ....
 
-               v2mN: v2m@0x9000 {
+               v2mN: v2m@9000 {
                        compatible = "arm,gic-v2m-frame";
                        msi-controller;
                        reg = <0x0 0x90000 0 0x1000>;
index 80994adab3928905de7c44bbc725a1f6f345ca74..42431f44697fb22cee723d176ef4ce9d1cdc4ebd 100644 (file)
@@ -71,7 +71,7 @@ Example 2:
         * An interrupt generating device that is wired to a Meta external
         * trigger block.
         */
-       uart1: uart@0x02004c00 {
+       uart1: uart@02004c00 {
                // Interrupt source '5' that is level-sensitive.
                // Note that there are only two cells as specified in the
                // interrupt parent's '#interrupt-cells' property.
index a691185503441ec2ed37e292c8412af12a116f8e..5dc2a55ad81143d86d8e781f147342ad6a21848e 100644 (file)
@@ -51,7 +51,7 @@ Example 1:
        /*
         * TZ1090 PDC block
         */
-       pdc: pdc@0x02006000 {
+       pdc: pdc@02006000 {
                // This is an interrupt controller node.
                interrupt-controller;
 
index 715a013ed4bdef1ada6217e5f5540df142d385ce..2ab0ea39867b516dd9462e8b71142ddf8d8a5f8d 100644 (file)
@@ -39,7 +39,7 @@ Example:
 
 The following is an example from the SPEAr320 SoC dtsi file.
 
-shirq: interrupt-controller@0xb3000000 {
+shirq: interrupt-controller@b3000000 {
        compatible = "st,spear320-shirq";
        reg = <0xb3000000 0x1000>;
        interrupts = <28 29 30 1>;
index c2619797ce0c92f06b41117940209f4c9e37ca1c..49cfc8c337c4644a5b7cd271660fe88b5ec28412 100644 (file)
@@ -14,7 +14,7 @@ Optional properties:
                        depends on the interrupt controller parent.
 
 Example:
-       mbox_tx: mailbox@0x100 {
+       mbox_tx: mailbox@100 {
                compatible = "altr,mailbox-1.0";
                reg = <0x100 0x8>;
                interrupt-parent = < &gic_0 >;
@@ -22,7 +22,7 @@ Example:
                #mbox-cells = <1>;
        };
 
-       mbox_rx: mailbox@0x200 {
+       mbox_rx: mailbox@200 {
                compatible = "altr,mailbox-1.0";
                reg = <0x200 0x8>;
                interrupt-parent = < &gic_0 >;
@@ -40,7 +40,7 @@ support only one channel).The equivalent "mbox-names" property value can be
 used to give a name to the communication channel to be used by the client user.
 
 Example:
-       mclient0: mclient0@0x400 {
+       mclient0: mclient0@400 {
                compatible = "client-1.0";
                reg = <0x400 0x10>;
                mbox-names = "mbox-tx", "mbox-rx";
index 0f3ee81d92c297022adcdbb1f9689f4b924ea143..9bcdf2087625c108a8180060f3dd9ebb4c111a46 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 - brcm,use-bcm-hdr:  present if a BCM header precedes each frame.
 
 Example:
-       pdc0: iproc-pdc0@0x612c0000 {
+       pdc0: iproc-pdc0@612c0000 {
                compatible = "brcm,iproc-pdc-mbox";
                reg = <0 0x612c0000 0 0x445>;  /* PDC FS0 regs */
                interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
index 0d4fdaedc6f1e82aec78f8080b6c312104b3cb0e..bc963a6d305a8bbc0e0ca2a231eea89d3247087c 100644 (file)
@@ -17,7 +17,7 @@ Optional properties:
 
 Example:
 
-gsc_0:  gsc@0x13e00000 {
+gsc_0:  gsc@13e00000 {
        compatible = "samsung,exynos5250-gsc";
        reg = <0x13e00000 0x1000>;
        interrupts = <0 85 0>;
index 46c15c54175d87cca499ceb81c33fdc6560df5b0..2a615d84a682563d28499b9a1f26a5ca2abcd984 100644 (file)
@@ -68,7 +68,7 @@ vcodec_dec: vcodec@16000000 {
                   "vdec_bus_clk_src";
   };
 
-  vcodec_enc: vcodec@0x18002000 {
+  vcodec_enc: vcodec@18002000 {
     compatible = "mediatek,mt8173-vcodec-enc";
     reg = <0 0x18002000 0 0x1000>,    /*VENC_SYS*/
           <0 0x19002000 0 0x1000>;    /*VENC_LT_SYS*/
index 6e4ef8caf759e5d31179c00120eccec349f3f8cd..19357d0bbe6539b3fbb7c73a4adbe4509fc905de 100644 (file)
@@ -44,7 +44,7 @@ Device node example
               vin0 = &vin0;
        };
 
-        vin0: vin@0xe6ef0000 {
+        vin0: vin@e6ef0000 {
                 compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin";
                 clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
                 reg = <0 0xe6ef0000 0 0x1000>;
index e4e15d8d752157909e03344ee04ce7621ba5b967..48c599dacbdf7baafbc2cc3a64c170a44ba95313 100644 (file)
@@ -138,7 +138,7 @@ Example:
                };
 
                /* MIPI CSI-2 bus IF sensor */
-               s5c73m3: sensor@0x1a {
+               s5c73m3: sensor@1a {
                        compatible = "samsung,s5c73m3";
                        reg = <0x1a>;
                        vddio-supply = <...>;
index 1ce4e46bcbb768a91ba16e27f19fa415769ee802..17a8e81ca0cc0ecd3749bc65a32abd7a4ce58bfb 100644 (file)
@@ -8,7 +8,7 @@ Bindings, specific for the sh_mobile_ceu_camera.c driver:
 
 Example:
 
-ceu0: ceu@0xfe910000 {
+ceu0: ceu@fe910000 {
        compatible = "renesas,sh-mobile-ceu";
        reg = <0xfe910000 0xa0>;
        interrupt-parent = <&intcs>;
index 3994b0143dd1ef37c9ce28462f686e51ccdadc4e..258b8dfddf48c1e218b5e3e1a8a41b70b4baeeec 100644 (file)
@@ -154,7 +154,7 @@ imx074 is linked to ceu0 through the MIPI CSI-2 receiver (csi2). ceu0 has a
 'port' node which may indicate that at any time only one of the following data
 pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
 
-       ceu0: ceu@0xfe910000 {
+       ceu0: ceu@fe910000 {
                compatible = "renesas,sh-mobile-ceu";
                reg = <0xfe910000 0xa0>;
                interrupts = <0x880>;
@@ -193,9 +193,9 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
                };
        };
 
-       i2c0: i2c@0xfff20000 {
+       i2c0: i2c@fff20000 {
                ...
-               ov772x_1: camera@0x21 {
+               ov772x_1: camera@21 {
                        compatible = "ovti,ov772x";
                        reg = <0x21>;
                        vddio-supply = <&regulator1>;
@@ -219,7 +219,7 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
                        };
                };
 
-               imx074: camera@0x1a {
+               imx074: camera@1a {
                        compatible = "sony,imx074";
                        reg = <0x1a>;
                        vddio-supply = <&regulator1>;
@@ -239,7 +239,7 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
                };
        };
 
-       csi2: csi2@0xffc90000 {
+       csi2: csi2@ffc90000 {
                compatible = "renesas,sh-mobile-csi2";
                reg = <0xffc90000 0x1000>;
                interrupts = <0x17a0>;
index fd823d6091b29514e0db29a3b73b1c47edbf064f..152eeccbde1ccd7f3e7686867404733c3f0f8a29 100644 (file)
@@ -46,7 +46,7 @@ Optional properties:
 
 Example:
 
-emif1: emif@0x4c000000 {
+emif1: emif@4c000000 {
        compatible      = "ti,emif-4d";
        ti,hwmods       = "emif2";
        phy-type        = <1>;
index 20963c76b4bcbd30d8d5081ddab600d1c78ef39a..71a1f5963936b855d2ec7fc195e754cf5ad6ec02 100644 (file)
@@ -13,7 +13,7 @@ Required properties:
 
 Example:
 
-devctrl: device-state-control@0x02620000 {
+devctrl: device-state-control@02620000 {
        compatible = "ti,keystone-devctrl", "syscon";
        reg = <0x02620000 0x1000>;
 };
index 6c9f176f35717acf1a2fd3dcd7ca2cee86daeb88..05b47232ed9ed647a97df99e7b4c175352748fb6 100644 (file)
@@ -9,7 +9,7 @@ Required properties:
 - reg : Location and size of bounce buffer
 
 Example:
-       smc@0x3404c000 {
+       smc@3404c000 {
                compatible = "brcm,bcm11351-smc", "brcm,kona-smc";
                reg = <0x3404c000 0x400>; //1 KiB in SRAM
        };
index aaba2483b4ff8c79f34030b28b3f546c03084032..7f5dd83f5bd95afd1b696e186681caae75c268b1 100644 (file)
@@ -12,7 +12,7 @@ Refer to clocks/clock-bindings.txt for generic clock consumer properties.
 
 Example:
 
-sdio2: sdio@0x3f1a0000 {
+sdio2: sdio@3f1a0000 {
        compatible = "brcm,kona-sdhci";
        reg = <0x3f1a0000 0x10000>;
        clocks = <&sdio3_clk>;
index 954561d09a8e6acca1cdfcd433c07da29027c1ca..fa90d253dc7ea00f79de2b7cb99477a87f1682b7 100644 (file)
@@ -24,7 +24,7 @@ Optional properties:
 
 Example:
 
-sdhci0: sdhci@0x18041000 {
+sdhci0: sdhci@18041000 {
        compatible = "brcm,sdhci-iproc-cygnus";
        reg = <0x18041000 0x100>;
        interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
index 3a4ac401e6f93a9d8ce3becd4d75410f95f1e50d..19f5508a75696b722624c550700ee74f72b2362f 100644 (file)
@@ -55,7 +55,7 @@ Examples:
 
 [hwmod populated DMA resources]
 
-       mmc1: mmc@0x4809c000 {
+       mmc1: mmc@4809c000 {
                compatible = "ti,omap4-hsmmc";
                reg = <0x4809c000 0x400>;
                ti,hwmods = "mmc1";
@@ -67,7 +67,7 @@ Examples:
 
 [generic DMA request binding]
 
-       mmc1: mmc@0x4809c000 {
+       mmc1: mmc@4809c000 {
                compatible = "ti,omap4-hsmmc";
                reg = <0x4809c000 0x400>;
                ti,hwmods = "mmc1";
index 131d3a74d0bd453f48c3f31e5487db4e3c71c6e1..c8567b40fe13a02abaee0ffd2f037d0a4282a950 100644 (file)
@@ -82,15 +82,15 @@ gpmc: gpmc@6e000000 {
                        label = "bootloader-nor";
                        reg = <0 0x40000>;
                };
-               partition@0x40000 {
+               partition@40000 {
                        label = "params-nor";
                        reg = <0x40000 0x40000>;
                };
-               partition@0x80000 {
+               partition@80000 {
                        label = "kernel-nor";
                        reg = <0x80000 0x200000>;
                };
-               partition@0x280000 {
+               partition@280000 {
                        label = "filesystem-nor";
                        reg = <0x240000 0x7d80000>;
                };
index 376fa2f50e6bc9b41052928037acd4b3a382d380..956bb046e599d576e3f881b2901e0d369a3c9802 100644 (file)
@@ -13,7 +13,6 @@ Required properties:
                  at25df321a
                  at25df641
                  at26df081a
-                 en25s64
                  mr25h128
                  mr25h256
                  mr25h10
@@ -33,7 +32,6 @@ Required properties:
                  s25fl008k
                  s25fl064k
                  sst25vf040b
-                 sst25wf040b
                  m25p40
                  m25p80
                  m25p16
index dbf9e054c11c0f3a68ba2783a042ca25e12d6b78..0431841de781334ce38aab9877ccd7a8d3698079 100644 (file)
@@ -131,7 +131,7 @@ Example:
                                read-only;
                                reg = <0x00000000 0x00400000>;
                        };
-                       android@0x00400000 {
+                       android@00400000 {
                                label = "android";
                                reg = <0x00400000 0x12c00000>;
                        };
index a706297998e9b48a5eb8924b0e622ae807532757..0e21df94a53ffa221e6262f4323dc2ff60068a00 100644 (file)
@@ -52,7 +52,7 @@ Optional properties:
 
 Example:
 
-       tse_sub_0_eth_tse_0: ethernet@0x1,00000000 {
+       tse_sub_0_eth_tse_0: ethernet@1,00000000 {
                compatible = "altr,tse-msgdma-1.0";
                reg =   <0x00000001 0x00000000 0x00000400>,
                        <0x00000001 0x00000460 0x00000020>,
@@ -90,7 +90,7 @@ Example:
                };
        };
 
-       tse_sub_1_eth_tse_0: ethernet@0x1,00001000 {
+       tse_sub_1_eth_tse_0: ethernet@1,00001000 {
                compatible = "altr,tse-msgdma-1.0";
                reg =   <0x00000001 0x00001000 0x00000400>,
                        <0x00000001 0x00001460 0x00000020>,
index 96a53f89aa6e2fa7f9a0d4c6c3b18d03a3b75994..e3e1603f256c1a55ce956a403fce855e7bf58e0e 100644 (file)
@@ -18,7 +18,7 @@ Example :
 This example shows these optional properties, plus other properties
 required for the TI Davinci MDIO driver.
 
-       davinci_mdio: ethernet@0x5c030000 {
+       davinci_mdio: ethernet@5c030000 {
                compatible = "ti,davinci_mdio";
                reg = <0x5c030000 0x1000>;
                #address-cells = <1>;
index b30d04b54ee94f93910421caca3badda8fcae01e..17d6819669c8ccee0186f4dc797be638a1828097 100644 (file)
@@ -28,7 +28,7 @@ Required properties:
 
 Example:
 
-gmii_to_sgmii_converter: phy@0x100000240 {
+gmii_to_sgmii_converter: phy@100000240 {
        compatible = "altr,gmii-to-sgmii-2.0";
        reg = <0x00000001 0x00000240 0x00000008>,
                <0x00000001 0x00000200 0x00000040>;
index d6d0a94cb3bbba6634a1c365df92670c84499a94..b95e831bcba3f9824bebf3ce75ee11c2a0b4f203 100644 (file)
@@ -36,7 +36,7 @@ Optional properties:
 
 Example:
 
-cpu@0x0 {
+cpu@0 {
        device_type = "cpu";
        compatible = "altr,nios2-1.0";
        reg = <0>;
index 495880193adc8c9e9403b5fda893df580718630c..a1dc9366a8fcac6683c99d4d1aeb961b836d44cc 100644 (file)
@@ -25,7 +25,7 @@ Optional properties:
 - bus-range:   PCI bus numbers covered
 
 Example
-       pcie_0: pcie@0xc00000000 {
+       pcie_0: pcie@c00000000 {
                compatible = "altr,pcie-root-port-1.0";
                reg = <0xc0000000 0x20000000>,
                        <0xff220000 0x00004000>;
index 7b1e48bf172b74ba51755409cf8510dc0a15fb6d..149d8f7f86b06ed943b7c0c1331d777a593db55f 100644 (file)
@@ -52,7 +52,7 @@ Additional required properties for imx7d-pcie:
 
 Example:
 
-       pcie@0x01000000 {
+       pcie@01000000 {
                compatible = "fsl,imx6q-pcie", "snps,dw-pcie";
                reg = <0x01ffc000 0x04000>,
                      <0x01f00000 0x80000>;
index bdb7ab39d2d7effafede0803adef4eeef1d9bf9a..7bf9df047a1ee4992ee7f8b9f8e3f742b26bc8a0 100644 (file)
@@ -21,7 +21,7 @@ Optional properties:
 - dma-coherent: Present if DMA operations are coherent.
 
 Hip05 Example (note that Hip06 is the same except compatible):
-       pcie@0xb0080000 {
+       pcie@b0080000 {
                compatible = "hisilicon,hip05-pcie", "snps,dw-pcie";
                reg = <0 0xb0080000 0 0x10000>, <0x220 0x00000000 0 0x2000>;
                reg-names = "rc_dbi", "config";
index cbc7847dbf6c59ffeb845f95682d9b9a427d81b8..c1ce5a0a652ecbbbb9e61b251b6d41eaad52433f 100644 (file)
@@ -45,7 +45,7 @@ Optional properties:
 - usb3_vbus-supply : regulator phandle for controller usb3 vbus
 
 Example:
-       usbphy: phy@0x01c13400 {
+       usbphy: phy@01c13400 {
                #phy-cells = <1>;
                compatible = "allwinner,sun4i-a10-usb-phy";
                /* phy base regs, phy1 pmu reg, phy2 pmu reg */
index 3600d5c6c4d7d5372f405fbd4c146e41dfa24071..3914529a3214b61b6dbc8c2fa6e49cd90bc8d6a5 100644 (file)
@@ -25,7 +25,7 @@ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
 
 For example:
 
-       pinmux: pinmux@0x0301d0c8 {
+       pinmux: pinmux@0301d0c8 {
                compatible = "brcm,cygnus-pinmux";
                reg = <0x0301d0c8 0x1b0>;
 
index eecf028ff4854bbe994b6232a7f88890e759d3cf..bf9b07016c8730f29a199e100065d19e90e9200a 100644 (file)
@@ -96,14 +96,14 @@ For example, pinctrl might have subnodes like the following:
 
 For a specific board, if it wants to use sd1,
 it can add the following to its board-specific .dts file.
-sd1: sd@0x12340000 {
+sd1: sd@12340000 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd1_pmx0>;
 }
 
 or
 
-sd1: sd@0x12340000 {
+sd1: sd@12340000 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd1_pmx1>;
 }
index 5f55be59d914a33aa71f4f432237782fd45f46ad..f8420520e14bf6d48467585c8260455f12da1540 100644 (file)
@@ -41,7 +41,7 @@ For example, pinctrl might have subnodes like the following:
 
 For a specific board, if it wants to use uart2 without hardware flow control,
 it can add the following to its board-specific .dts file.
-uart2: uart@0xb0070000 {
+uart2: uart@b0070000 {
        pinctrl-names = "default";
        pinctrl-0 = <&uart2_noflow_pins_a>;
 }
index 4864e3a74de311ff02e6f3c54a04ae2fbba5ae2d..a01a3b8a23632a8707ff9e528c6c010500d8da9a 100644 (file)
@@ -136,7 +136,7 @@ Example for rk3188:
                #size-cells = <1>;
                ranges;
 
-               gpio0: gpio0@0x2000a000 {
+               gpio0: gpio0@2000a000 {
                        compatible = "rockchip,rk3188-gpio-bank0";
                        reg = <0x2000a000 0x100>;
                        interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
@@ -149,7 +149,7 @@ Example for rk3188:
                        #interrupt-cells = <2>;
                };
 
-               gpio1: gpio1@0x2003c000 {
+               gpio1: gpio1@2003c000 {
                        compatible = "rockchip,gpio-bank";
                        reg = <0x2003c000 0x100>;
                        interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
index 378f6dc8b8bd102b64cbf477aa203c41115d9b00..3cbf56ce66ea9871ad2e41068baaef6171ae7ca3 100644 (file)
@@ -107,7 +107,7 @@ regulators (twl_reg1 and twl_reg2),
                ...
        };
 
-       mmc: mmc@0x0 {
+       mmc: mmc@0 {
                ...
                ...
                vmmc-supply = <&twl_reg1>;
index 8adbab268ca3997f50c134081d2f84409d8d10e3..4f8d8fde0c1c2a0728ac3c331d694e7a1bdbd423 100644 (file)
@@ -12,7 +12,7 @@ Optional properties:
 
 Example:
 
-uart@0x4000c400 {
+uart@4000c400 {
        compatible = "energymicro,efm32-uart";
        reg = <0x4000c400 0x400>;
        interrupts = <15>;
index f311472990a7e5f081dad3cdba160e2d5edc7abc..75996b6111bb7d95f5f6effb28d27679fc1a5400 100644 (file)
@@ -14,7 +14,7 @@ Required properties:
 
 
 Example:
-       ps20: ps2@0x01c2a000 {
+       ps20: ps2@01c2a000 {
                compatible = "allwinner,sun4i-a10-ps2";
                reg = <0x01c2a000 0x400>;
                interrupts = <0 62 4>;
index 64c66a5644e7551c2637b4aebff3d81f12cdf48f..77cd42cc5f54897b60371af222d62f847e97326f 100644 (file)
@@ -220,7 +220,7 @@ qmss: qmss@2a40000 {
                #address-cells = <1>;
                #size-cells = <1>;
                ranges;
-               pdsp0@0x2a10000 {
+               pdsp0@2a10000 {
                        reg = <0x2a10000 0x1000>,
                              <0x2a0f000 0x100>,
                              <0x2a0c000 0x3c8>,
index 5875ca459ed11355feccc4c106e04e5df2a71ce3..4248b662deff04e4fbbbc3effa487e09fa140bee 100644 (file)
@@ -21,7 +21,7 @@ please check:
 
 Example:
 
-       i2s: i2s@0x77600000 {
+       i2s: i2s@77600000 {
                compatible = "adi,axi-i2s-1.00.a";
                reg = <0x77600000 0x1000>;
                clocks = <&clk 15>, <&audio_clock>;
index 4eb7997674a09006dfa0a991b634082ddc64974d..7b664e7cb4ae99dfae7f1beb52bba140f6ee1dd7 100644 (file)
@@ -20,7 +20,7 @@ please check:
 
 Example:
 
-       spdif: spdif@0x77400000 {
+       spdif: spdif@77400000 {
                compatible = "adi,axi-spdif-tx-1.00.a";
                reg = <0x77600000 0x1000>;
                clocks = <&clk 15>, <&audio_clock>;
index 1783f9ef093096a0f06c1daf8ba854bba91e397d..49a2e74fd9cb1489b145b1132f2f892209c8b448 100644 (file)
@@ -20,7 +20,7 @@ Optional properties:
 Example:
 
 &i2c {
-       ak4613: ak4613@0x10 {
+       ak4613: ak4613@10 {
                compatible = "asahi-kasei,ak4613";
                reg = <0x10>;
        };
index 340784db6808809eecf4ae0e310023550713e2c6..58e48ee9717547f2766a2c0a89fec19536431634 100644 (file)
@@ -17,7 +17,7 @@ Optional properties:
 Example 1:
 
 &i2c {
-       ak4648: ak4648@0x12 {
+       ak4648: ak4648@12 {
                compatible = "asahi-kasei,ak4642";
                reg = <0x12>;
        };
index 6c285235e64be59e0cf55bfee5b54920a58dea36..8b2b2704b574e248c426565514076e2a42aa0631 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
 Example:
 
 &i2c {
-       max98371: max98371@0x31 {
+       max98371: max98371@31 {
                compatible = "maxim,max98371";
                reg = <0x31>;
        };
index 394cd4eb17ec47531edabc161f93506925775805..b8bd914ee697b1435f0ebb47e4d3062961c61859 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
 Example:
 
 &i2c {
-       max9867: max9867@0x18 {
+       max9867: max9867@18 {
                compatible = "maxim,max9867";
                reg = <0x18>;
        };
index 0d0ab51105b01ece9c0dfbe6571e6d2086550f42..0cf0f819b8236e250fd0f0db1bc0dba879e9dcd4 100644 (file)
@@ -20,7 +20,7 @@ Required properties:
 
 Example:
 
-sh_fsi2: sh_fsi2@0xec230000 {
+sh_fsi2: sh_fsi2@ec230000 {
        compatible = "renesas,sh_fsi2";
        reg = <0xec230000 0x400>;
        interrupts = <0 146 0x4>;
index 0a1dc4e1815cb524faee4c228a14ddeeea626578..ec20c1271e929c17e8c24e736989cdbbd4ec9ce9 100644 (file)
@@ -33,7 +33,7 @@ Required properties on RK3288:
 
 Example for the rk3188 SPDIF controller:
 
-spdif: spdif@0x1011e000 {
+spdif: spdif@1011e000 {
        compatible = "rockchip,rk3188-spdif", "rockchip,rk3066-spdif";
        reg = <0x1011e000 0x2000>;
        interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
index 40068ec0e9a561db3128fe37d581b1a4499b5ddf..9c1ee52fed5bff6300b5863b0c3fc07aeceb5dc8 100644 (file)
@@ -51,7 +51,7 @@ Optional properties:
 
 Example:
 
-       sti_uni_player1: sti-uni-player@0x8D81000 {
+       sti_uni_player1: sti-uni-player@8D81000 {
                compatible = "st,stih407-uni-player-hdmi";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
@@ -63,7 +63,7 @@ Example:
                st,tdm-mode = <1>;
        };
 
-       sti_uni_player2: sti-uni-player@0x8D82000 {
+       sti_uni_player2: sti-uni-player@8D82000 {
                compatible = "st,stih407-uni-player-pcm-out";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
@@ -74,7 +74,7 @@ Example:
                dma-names = "tx";
        };
 
-       sti_uni_player3: sti-uni-player@0x8D85000 {
+       sti_uni_player3: sti-uni-player@8D85000 {
                compatible = "st,stih407-uni-player-spdif";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
@@ -85,7 +85,7 @@ Example:
                dma-names = "tx";
        };
 
-       sti_uni_reader1: sti-uni-reader@0x8D84000 {
+       sti_uni_reader1: sti-uni-reader@8D84000 {
                compatible = "st,stih407-uni-reader-hdmi";
                #sound-dai-cells = <0>;
                st,syscfg = <&syscfg_core>;
index 2c1e6a43930baa88426aa4822f6ec79ee8bbc10e..e0fa61a1be0c864258574787d8fa496db2118916 100644 (file)
@@ -19,7 +19,7 @@ Recommended properties :
 
 Example:
 
-spi1: spi@0x4000c400 { /* USART1 */
+spi1: spi@4000c400 { /* USART1 */
        #address-cells = <1>;
        #size-cells = <0>;
        compatible = "energymicro,efm32-spi";
index 5bf13960f7f4a3c826c10b1e15a618df82d82403..e3c48b20b1a691b37d0b425251a257c682a38eca 100644 (file)
@@ -12,24 +12,30 @@ Required properties:
   - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
 - reg : Offset and length of the register set for the device
 - interrupts : Should contain CSPI/eCSPI interrupt
-- cs-gpios : Specifies the gpio pins to be used for chipselects.
 - clocks : Clock specifiers for both ipg and per clocks.
 - clock-names : Clock names should include both "ipg" and "per"
 See the clock consumer binding,
        Documentation/devicetree/bindings/clock/clock-bindings.txt
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
-               Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request names should include "tx" and "rx" if present.
 
-Obsolete properties:
-- fsl,spi-num-chipselects : Contains the number of the chipselect
+Recommended properties:
+- cs-gpios : GPIOs to use as chip selects, see spi-bus.txt.  While the native chip
+select lines can be used, they appear to always generate a pulse between each
+word of a transfer.  Most use cases will require GPIO based chip selects to
+generate a valid transaction.
 
 Optional properties:
+- num-cs :  Number of total chip selects, see spi-bus.txt.
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: DMA request names, if present, should include "tx" and "rx".
 - fsl,spi-rdy-drctl: Integer, representing the value of DRCTL, the register
 controlling the SPI_READY handling. Note that to enable the DRCTL consideration,
 the SPI_READY mode-flag needs to be set too.
 Valid values are: 0 (disabled), 1 (edge-triggered burst) and 2 (level-triggered burst).
 
+Obsolete properties:
+- fsl,spi-num-chipselects : Contains the number of the chipselect
+
 Example:
 
 ecspi@70010000 {
index 88b6ea1ad2903cadd73650ff6dbfa0eb54aac39f..44d7cb2cb2c023ec8f1bbcaa90c26f7d406f9abf 100644 (file)
@@ -239,7 +239,7 @@ cpus {
         * A simple fan controller which supports 10 speeds of operation
         * (represented as 0-9).
         */
-       fan0: fan@0x48 {
+       fan0: fan@48 {
                ...
                cooling-min-level = <0>;
                cooling-max-level = <9>;
@@ -252,7 +252,7 @@ ocp {
        /*
         * A simple IC with a single bandgap temperature sensor.
         */
-       bandgap0: bandgap@0x0000ED00 {
+       bandgap0: bandgap@0000ED00 {
                ...
                #thermal-sensor-cells = <0>;
        };
@@ -330,7 +330,7 @@ ocp {
        /*
         * A simple IC with several bandgap temperature sensors.
         */
-       bandgap0: bandgap@0x0000ED00 {
+       bandgap0: bandgap@0000ED00 {
                ...
                #thermal-sensor-cells = <1>;
        };
@@ -447,7 +447,7 @@ one thermal zone.
        /*
         * A simple IC with a single temperature sensor.
         */
-       adc: sensor@0x49 {
+       adc: sensor@49 {
                ...
                #thermal-sensor-cells = <0>;
        };
@@ -458,7 +458,7 @@ ocp {
        /*
         * A simple IC with a single bandgap temperature sensor.
         */
-       bandgap0: bandgap@0x0000ED00 {
+       bandgap0: bandgap@0000ED00 {
                ...
                #thermal-sensor-cells = <0>;
        };
@@ -516,7 +516,7 @@ with many sensors and many cooling devices.
        /*
         * An IC with several temperature sensor.
         */
-       adc_dummy: sensor@0x50 {
+       adc_dummy: sensor@50 {
                ...
                #thermal-sensor-cells = <1>; /* sensor internal ID */
        };
index 1f69ee1a61ea25b776671be1becb8e8f63a29eb1..21d9a93db2e970cde487151f667c93063a8ae5f9 100644 (file)
@@ -32,7 +32,7 @@ Optional properties:
 
 Example:
 
-       ufsphy1: ufsphy@0xfc597000 {
+       ufsphy1: ufsphy@fc597000 {
                compatible = "qcom,ufs-phy-qmp-20nm";
                reg = <0xfc597000 0x800>;
                reg-names = "phy_mem";
@@ -53,7 +53,7 @@ Example:
                        <&clock_gcc clk_gcc_ufs_rx_cfg_clk>;
        };
 
-       ufshc@0xfc598000 {
+       ufshc@fc598000 {
                ...
                phys = <&ufsphy1>;
                phy-names = "ufsphy";
index a99ed5565b26c406ef6e643c99d6c1eb08fedeac..c39dfef76a183ad80eb27b36a820b3ee6c8107b6 100644 (file)
@@ -46,7 +46,7 @@ Note: If above properties are not defined it can be assumed that the supply
 regulators or clocks are always on.
 
 Example:
-       ufshc@0xfc598000 {
+       ufshc@fc598000 {
                compatible = "jedec,ufs-1.1";
                reg = <0xfc598000 0x800>;
                interrupts = <0 28 0>;
index 7a33f22c815a76ca534bf16f5d158e6d9ae07033..7a198a30408abf3f8a164189d60248f1dfcddc8b 100644 (file)
@@ -95,6 +95,7 @@ usb: usb@47400000 {
                reg = <0x47401300 0x100>;
                reg-names = "phy";
                ti,ctrl_mod = <&ctrl_mod>;
+               #phy-cells = <0>;
        };
 
        usb0: usb@47401000 {
@@ -141,6 +142,7 @@ usb: usb@47400000 {
                reg = <0x47401b00 0x100>;
                reg-names = "phy";
                ti,ctrl_mod = <&ctrl_mod>;
+               #phy-cells = <0>;
        };
 
        usb1: usb@47401800 {
index 9feea6c3e4d998cba087de49f608cba8ce445242..065c91d955ad159a1ff4cbd75d26ff86f0e52c0e 100644 (file)
@@ -22,7 +22,7 @@ See: Documentation/devicetree/bindings/reset/reset.txt
 
 Example:
 
-       ehci1: usb@0xfe203e00 {
+       ehci1: usb@fe203e00 {
                compatible = "st,st-ehci-300x";
                reg = <0xfe203e00 0x100>;
                interrupts = <GIC_SPI 148 IRQ_TYPE_NONE>;
index d893ec9131c3bdec4cd71e632b932ffd56dfbc9f..44c998c16f85bfbfa4814fffb8dc3d344c14a3de 100644 (file)
@@ -20,7 +20,7 @@ See: Documentation/devicetree/bindings/reset/reset.txt
 
 Example:
 
-       ohci0: usb@0xfe1ffc00 {
+       ohci0: usb@fe1ffc00 {
                compatible = "st,st-ohci-300x";
                reg = <0xfe1ffc00 0x100>;
                interrupts = <GIC_SPI 149 IRQ_TYPE_NONE>;
index e27763ef00497d8645463d084a04e3f07d0c939f..3c7a1cd13b1011f37753f67905561843ce984370 100644 (file)
@@ -6,7 +6,7 @@ reg: Register address and length for watchdog registers
 
 Example:
 
-watchdog: jz4740-watchdog@0x10002000 {
+watchdog: jz4740-watchdog@10002000 {
        compatible = "ingenic,jz4740-watchdog";
        reg = <0x10002000 0x100>;
 };
index 6245c99af8c1157176fd1d22681bc221917d3d7f..fbbb2831f29f8c7f50675238289478c98f472606 100644 (file)
@@ -185,7 +185,7 @@ The details of these operations are:
       void dma_async_issue_pending(struct dma_chan *chan);
 
 Further APIs:
-------------
+-------------
 
 1. Terminate APIs
 
index 01a6c8b7d3a7b59dcee9a11d828968e464af9c1c..ca85e5e78b2c439b3279fbe797b67eeaea84b141 100644 (file)
@@ -25,9 +25,6 @@ PCI Support Library
 .. kernel-doc:: drivers/pci/irq.c
    :export:
 
-.. kernel-doc:: drivers/pci/htirq.c
-   :export:
-
 .. kernel-doc:: drivers/pci/probe.c
    :export:
 
index 8caa60734647f70a777b8a568ba9f2dd99182fb8..e6a5f4912b6d4a4910ed1d2fc494fff304ab47ff 100644 (file)
@@ -156,6 +156,40 @@ handle it in two different ways:
    root of the overlay.  Finally the directory is moved to the new
    location.
 
+There are several ways to tune the "redirect_dir" feature.
+
+Kernel config options:
+
+- OVERLAY_FS_REDIRECT_DIR:
+    If this is enabled, then redirect_dir is turned on by  default.
+- OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW:
+    If this is enabled, then redirects are always followed by default. Enabling
+    this results in a less secure configuration.  Enable this option only when
+    worried about backward compatibility with kernels that have the redirect_dir
+    feature and follow redirects even if turned off.
+
+Module options (can also be changed through /sys/module/overlay/parameters/*):
+
+- "redirect_dir=BOOL":
+    See OVERLAY_FS_REDIRECT_DIR kernel config option above.
+- "redirect_always_follow=BOOL":
+    See OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW kernel config option above.
+- "redirect_max=NUM":
+    The maximum number of bytes in an absolute redirect (default is 256).
+
+Mount options:
+
+- "redirect_dir=on":
+    Redirects are enabled.
+- "redirect_dir=follow":
+    Redirects are not created, but followed.
+- "redirect_dir=off":
+    Redirects are not created and only followed if "redirect_always_follow"
+    feature is enabled in the kernel/module config.
+- "redirect_dir=nofollow":
+    Redirects are not created and not followed (equivalent to "redirect_dir=off"
+    if "redirect_always_follow" feature is not enabled).
+
 Non-directories
 ---------------
 
index c61a99f7c8bbeefa2d96ef6b9fe3ff5c67e135fb..a12c74ce27734eb78a54b77923e648aaceb03430 100644 (file)
@@ -41,7 +41,7 @@ Getting and Building Xen and Dom0
 
  5. make initrd for Dom0/DomU
     # make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \
-      O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
+      O=$(pwd)/build-linux-2.6.18-xen_ia64
     # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \
       2.6.18.8-xen --builtin mptspi --builtin mptbase \
       --builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \
diff --git a/Documentation/locking/crossrelease.txt b/Documentation/locking/crossrelease.txt
deleted file mode 100644 (file)
index bdf1423..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-Crossrelease
-============
-
-Started by Byungchul Park <byungchul.park@lge.com>
-
-Contents:
-
- (*) Background
-
-     - What causes deadlock
-     - How lockdep works
-
- (*) Limitation
-
-     - Limit lockdep
-     - Pros from the limitation
-     - Cons from the limitation
-     - Relax the limitation
-
- (*) Crossrelease
-
-     - Introduce crossrelease
-     - Introduce commit
-
- (*) Implementation
-
-     - Data structures
-     - How crossrelease works
-
- (*) Optimizations
-
-     - Avoid duplication
-     - Lockless for hot paths
-
- (*) APPENDIX A: What lockdep does to work aggresively
-
- (*) APPENDIX B: How to avoid adding false dependencies
-
-
-==========
-Background
-==========
-
-What causes deadlock
---------------------
-
-A deadlock occurs when a context is waiting for an event to happen,
-which is impossible because another (or the) context who can trigger the
-event is also waiting for another (or the) event to happen, which is
-also impossible due to the same reason.
-
-For example:
-
-   A context going to trigger event C is waiting for event A to happen.
-   A context going to trigger event A is waiting for event B to happen.
-   A context going to trigger event B is waiting for event C to happen.
-
-A deadlock occurs when these three wait operations run at the same time,
-because event C cannot be triggered if event A does not happen, which in
-turn cannot be triggered if event B does not happen, which in turn
-cannot be triggered if event C does not happen. After all, no event can
-be triggered since any of them never meets its condition to wake up.
-
-A dependency might exist between two waiters and a deadlock might happen
-due to an incorrect releationship between dependencies. Thus, we must
-define what a dependency is first. A dependency exists between them if:
-
-   1. There are two waiters waiting for each event at a given time.
-   2. The only way to wake up each waiter is to trigger its event.
-   3. Whether one can be woken up depends on whether the other can.
-
-Each wait in the example creates its dependency like:
-
-   Event C depends on event A.
-   Event A depends on event B.
-   Event B depends on event C.
-
-   NOTE: Precisely speaking, a dependency is one between whether a
-   waiter for an event can be woken up and whether another waiter for
-   another event can be woken up. However from now on, we will describe
-   a dependency as if it's one between an event and another event for
-   simplicity.
-
-And they form circular dependencies like:
-
-    -> C -> A -> B -
-   /                \
-   \                /
-    ----------------
-
-   where 'A -> B' means that event A depends on event B.
-
-Such circular dependencies lead to a deadlock since no waiter can meet
-its condition to wake up as described.
-
-CONCLUSION
-
-Circular dependencies cause a deadlock.
-
-
-How lockdep works
------------------
-
-Lockdep tries to detect a deadlock by checking dependencies created by
-lock operations, acquire and release. Waiting for a lock corresponds to
-waiting for an event, and releasing a lock corresponds to triggering an
-event in the previous section.
-
-In short, lockdep does:
-
-   1. Detect a new dependency.
-   2. Add the dependency into a global graph.
-   3. Check if that makes dependencies circular.
-   4. Report a deadlock or its possibility if so.
-
-For example, consider a graph built by lockdep that looks like:
-
-   A -> B -
-           \
-            -> E
-           /
-   C -> D -
-
-   where A, B,..., E are different lock classes.
-
-Lockdep will add a dependency into the graph on detection of a new
-dependency. For example, it will add a dependency 'E -> C' when a new
-dependency between lock E and lock C is detected. Then the graph will be:
-
-       A -> B -
-               \
-                -> E -
-               /      \
-    -> C -> D -        \
-   /                   /
-   \                  /
-    ------------------
-
-   where A, B,..., E are different lock classes.
-
-This graph contains a subgraph which demonstrates circular dependencies:
-
-                -> E -
-               /      \
-    -> C -> D -        \
-   /                   /
-   \                  /
-    ------------------
-
-   where C, D and E are different lock classes.
-
-This is the condition under which a deadlock might occur. Lockdep
-reports it on detection after adding a new dependency. This is the way
-how lockdep works.
-
-CONCLUSION
-
-Lockdep detects a deadlock or its possibility by checking if circular
-dependencies were created after adding each new dependency.
-
-
-==========
-Limitation
-==========
-
-Limit lockdep
--------------
-
-Limiting lockdep to work on only typical locks e.g. spin locks and
-mutexes, which are released within the acquire context, the
-implementation becomes simple but its capacity for detection becomes
-limited. Let's check pros and cons in next section.
-
-
-Pros from the limitation
-------------------------
-
-Given the limitation, when acquiring a lock, locks in a held_locks
-cannot be released if the context cannot acquire it so has to wait to
-acquire it, which means all waiters for the locks in the held_locks are
-stuck. It's an exact case to create dependencies between each lock in
-the held_locks and the lock to acquire.
-
-For example:
-
-   CONTEXT X
-   ---------
-   acquire A
-   acquire B /* Add a dependency 'A -> B' */
-   release B
-   release A
-
-   where A and B are different lock classes.
-
-When acquiring lock A, the held_locks of CONTEXT X is empty thus no
-dependency is added. But when acquiring lock B, lockdep detects and adds
-a new dependency 'A -> B' between lock A in the held_locks and lock B.
-They can be simply added whenever acquiring each lock.
-
-And data required by lockdep exists in a local structure, held_locks
-embedded in task_struct. Forcing to access the data within the context,
-lockdep can avoid racy problems without explicit locks while handling
-the local data.
-
-Lastly, lockdep only needs to keep locks currently being held, to build
-a dependency graph. However, relaxing the limitation, it needs to keep
-even locks already released, because a decision whether they created
-dependencies might be long-deferred.
-
-To sum up, we can expect several advantages from the limitation:
-
-   1. Lockdep can easily identify a dependency when acquiring a lock.
-   2. Races are avoidable while accessing local locks in a held_locks.
-   3. Lockdep only needs to keep locks currently being held.
-
-CONCLUSION
-
-Given the limitation, the implementation becomes simple and efficient.
-
-
-Cons from the limitation
-------------------------
-
-Given the limitation, lockdep is applicable only to typical locks. For
-example, page locks for page access or completions for synchronization
-cannot work with lockdep.
-
-Can we detect deadlocks below, under the limitation?
-
-Example 1:
-
-   CONTEXT X      CONTEXT Y       CONTEXT Z
-   ---------      ---------       ----------
-                  mutex_lock A
-   lock_page B
-                  lock_page B
-                                  mutex_lock A /* DEADLOCK */
-                                  unlock_page B held by X
-                  unlock_page B
-                  mutex_unlock A
-                                  mutex_unlock A
-
-   where A and B are different lock classes.
-
-No, we cannot.
-
-Example 2:
-
-   CONTEXT X              CONTEXT Y
-   ---------              ---------
-                          mutex_lock A
-   mutex_lock A
-                          wait_for_complete B /* DEADLOCK */
-   complete B
-                          mutex_unlock A
-   mutex_unlock A
-
-   where A is a lock class and B is a completion variable.
-
-No, we cannot.
-
-CONCLUSION
-
-Given the limitation, lockdep cannot detect a deadlock or its
-possibility caused by page locks or completions.
-
-
-Relax the limitation
---------------------
-
-Under the limitation, things to create dependencies are limited to
-typical locks. However, synchronization primitives like page locks and
-completions, which are allowed to be released in any context, also
-create dependencies and can cause a deadlock. So lockdep should track
-these locks to do a better job. We have to relax the limitation for
-these locks to work with lockdep.
-
-Detecting dependencies is very important for lockdep to work because
-adding a dependency means adding an opportunity to check whether it
-causes a deadlock. The more lockdep adds dependencies, the more it
-thoroughly works. Thus Lockdep has to do its best to detect and add as
-many true dependencies into a graph as possible.
-
-For example, considering only typical locks, lockdep builds a graph like:
-
-   A -> B -
-           \
-            -> E
-           /
-   C -> D -
-
-   where A, B,..., E are different lock classes.
-
-On the other hand, under the relaxation, additional dependencies might
-be created and added. Assuming additional 'FX -> C' and 'E -> GX' are
-added thanks to the relaxation, the graph will be:
-
-         A -> B -
-                 \
-                  -> E -> GX
-                 /
-   FX -> C -> D -
-
-   where A, B,..., E, FX and GX are different lock classes, and a suffix
-   'X' is added on non-typical locks.
-
-The latter graph gives us more chances to check circular dependencies
-than the former. However, it might suffer performance degradation since
-relaxing the limitation, with which design and implementation of lockdep
-can be efficient, might introduce inefficiency inevitably. So lockdep
-should provide two options, strong detection and efficient detection.
-
-Choosing efficient detection:
-
-   Lockdep works with only locks restricted to be released within the
-   acquire context. However, lockdep works efficiently.
-
-Choosing strong detection:
-
-   Lockdep works with all synchronization primitives. However, lockdep
-   suffers performance degradation.
-
-CONCLUSION
-
-Relaxing the limitation, lockdep can add additional dependencies giving
-additional opportunities to check circular dependencies.
-
-
-============
-Crossrelease
-============
-
-Introduce crossrelease
-----------------------
-
-In order to allow lockdep to handle additional dependencies by what
-might be released in any context, namely 'crosslock', we have to be able
-to identify those created by crosslocks. The proposed 'crossrelease'
-feature provoides a way to do that.
-
-Crossrelease feature has to do:
-
-   1. Identify dependencies created by crosslocks.
-   2. Add the dependencies into a dependency graph.
-
-That's all. Once a meaningful dependency is added into graph, then
-lockdep would work with the graph as it did. The most important thing
-crossrelease feature has to do is to correctly identify and add true
-dependencies into the global graph.
-
-A dependency e.g. 'A -> B' can be identified only in the A's release
-context because a decision required to identify the dependency can be
-made only in the release context. That is to decide whether A can be
-released so that a waiter for A can be woken up. It cannot be made in
-other than the A's release context.
-
-It's no matter for typical locks because each acquire context is same as
-its release context, thus lockdep can decide whether a lock can be
-released in the acquire context. However for crosslocks, lockdep cannot
-make the decision in the acquire context but has to wait until the
-release context is identified.
-
-Therefore, deadlocks by crosslocks cannot be detected just when it
-happens, because those cannot be identified until the crosslocks are
-released. However, deadlock possibilities can be detected and it's very
-worth. See 'APPENDIX A' section to check why.
-
-CONCLUSION
-
-Using crossrelease feature, lockdep can work with what might be released
-in any context, namely crosslock.
-
-
-Introduce commit
-----------------
-
-Since crossrelease defers the work adding true dependencies of
-crosslocks until they are actually released, crossrelease has to queue
-all acquisitions which might create dependencies with the crosslocks.
-Then it identifies dependencies using the queued data in batches at a
-proper time. We call it 'commit'.
-
-There are four types of dependencies:
-
-1. TT type: 'typical lock A -> typical lock B'
-
-   Just when acquiring B, lockdep can see it's in the A's release
-   context. So the dependency between A and B can be identified
-   immediately. Commit is unnecessary.
-
-2. TC type: 'typical lock A -> crosslock BX'
-
-   Just when acquiring BX, lockdep can see it's in the A's release
-   context. So the dependency between A and BX can be identified
-   immediately. Commit is unnecessary, too.
-
-3. CT type: 'crosslock AX -> typical lock B'
-
-   When acquiring B, lockdep cannot identify the dependency because
-   there's no way to know if it's in the AX's release context. It has
-   to wait until the decision can be made. Commit is necessary.
-
-4. CC type: 'crosslock AX -> crosslock BX'
-
-   When acquiring BX, lockdep cannot identify the dependency because
-   there's no way to know if it's in the AX's release context. It has
-   to wait until the decision can be made. Commit is necessary.
-   But, handling CC type is not implemented yet. It's a future work.
-
-Lockdep can work without commit for typical locks, but commit step is
-necessary once crosslocks are involved. Introducing commit, lockdep
-performs three steps. What lockdep does in each step is:
-
-1. Acquisition: For typical locks, lockdep does what it originally did
-   and queues the lock so that CT type dependencies can be checked using
-   it at the commit step. For crosslocks, it saves data which will be
-   used at the commit step and increases a reference count for it.
-
-2. Commit: No action is reauired for typical locks. For crosslocks,
-   lockdep adds CT type dependencies using the data saved at the
-   acquisition step.
-
-3. Release: No changes are required for typical locks. When a crosslock
-   is released, it decreases a reference count for it.
-
-CONCLUSION
-
-Crossrelease introduces commit step to handle dependencies of crosslocks
-in batches at a proper time.
-
-
-==============
-Implementation
-==============
-
-Data structures
----------------
-
-Crossrelease introduces two main data structures.
-
-1. hist_lock
-
-   This is an array embedded in task_struct, for keeping lock history so
-   that dependencies can be added using them at the commit step. Since
-   it's local data, it can be accessed locklessly in the owner context.
-   The array is filled at the acquisition step and consumed at the
-   commit step. And it's managed in circular manner.
-
-2. cross_lock
-
-   One per lockdep_map exists. This is for keeping data of crosslocks
-   and used at the commit step.
-
-
-How crossrelease works
-----------------------
-
-It's the key of how crossrelease works, to defer necessary works to an
-appropriate point in time and perform in at once at the commit step.
-Let's take a look with examples step by step, starting from how lockdep
-works without crossrelease for typical locks.
-
-   acquire A /* Push A onto held_locks */
-   acquire B /* Push B onto held_locks and add 'A -> B' */
-   acquire C /* Push C onto held_locks and add 'B -> C' */
-   release C /* Pop C from held_locks */
-   release B /* Pop B from held_locks */
-   release A /* Pop A from held_locks */
-
-   where A, B and C are different lock classes.
-
-   NOTE: This document assumes that readers already understand how
-   lockdep works without crossrelease thus omits details. But there's
-   one thing to note. Lockdep pretends to pop a lock from held_locks
-   when releasing it. But it's subtly different from the original pop
-   operation because lockdep allows other than the top to be poped.
-
-In this case, lockdep adds 'the top of held_locks -> the lock to acquire'
-dependency every time acquiring a lock.
-
-After adding 'A -> B', a dependency graph will be:
-
-   A -> B
-
-   where A and B are different lock classes.
-
-And after adding 'B -> C', the graph will be:
-
-   A -> B -> C
-
-   where A, B and C are different lock classes.
-
-Let's performs commit step even for typical locks to add dependencies.
-Of course, commit step is not necessary for them, however, it would work
-well because this is a more general way.
-
-   acquire A
-   /*
-    * Queue A into hist_locks
-    *
-    * In hist_locks: A
-    * In graph: Empty
-    */
-
-   acquire B
-   /*
-    * Queue B into hist_locks
-    *
-    * In hist_locks: A, B
-    * In graph: Empty
-    */
-
-   acquire C
-   /*
-    * Queue C into hist_locks
-    *
-    * In hist_locks: A, B, C
-    * In graph: Empty
-    */
-
-   commit C
-   /*
-    * Add 'C -> ?'
-    * Answer the following to decide '?'
-    * What has been queued since acquire C: Nothing
-    *
-    * In hist_locks: A, B, C
-    * In graph: Empty
-    */
-
-   release C
-
-   commit B
-   /*
-    * Add 'B -> ?'
-    * Answer the following to decide '?'
-    * What has been queued since acquire B: C
-    *
-    * In hist_locks: A, B, C
-    * In graph: 'B -> C'
-    */
-
-   release B
-
-   commit A
-   /*
-    * Add 'A -> ?'
-    * Answer the following to decide '?'
-    * What has been queued since acquire A: B, C
-    *
-    * In hist_locks: A, B, C
-    * In graph: 'B -> C', 'A -> B', 'A -> C'
-    */
-
-   release A
-
-   where A, B and C are different lock classes.
-
-In this case, dependencies are added at the commit step as described.
-
-After commits for A, B and C, the graph will be:
-
-   A -> B -> C
-
-   where A, B and C are different lock classes.
-
-   NOTE: A dependency 'A -> C' is optimized out.
-
-We can see the former graph built without commit step is same as the
-latter graph built using commit steps. Of course the former way leads to
-earlier finish for building the graph, which means we can detect a
-deadlock or its possibility sooner. So the former way would be prefered
-when possible. But we cannot avoid using the latter way for crosslocks.
-
-Let's look at how commit steps work for crosslocks. In this case, the
-commit step is performed only on crosslock AX as real. And it assumes
-that the AX release context is different from the AX acquire context.
-
-   BX RELEASE CONTEXT             BX ACQUIRE CONTEXT
-   ------------------             ------------------
-                                  acquire A
-                                  /*
-                                   * Push A onto held_locks
-                                   * Queue A into hist_locks
-                                   *
-                                   * In held_locks: A
-                                   * In hist_locks: A
-                                   * In graph: Empty
-                                   */
-
-                                  acquire BX
-                                  /*
-                                   * Add 'the top of held_locks -> BX'
-                                   *
-                                   * In held_locks: A
-                                   * In hist_locks: A
-                                   * In graph: 'A -> BX'
-                                   */
-
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-   It must be guaranteed that the following operations are seen after
-   acquiring BX globally. It can be done by things like barrier.
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-   acquire C
-   /*
-    * Push C onto held_locks
-    * Queue C into hist_locks
-    *
-    * In held_locks: C
-    * In hist_locks: C
-    * In graph: 'A -> BX'
-    */
-
-   release C
-   /*
-    * Pop C from held_locks
-    *
-    * In held_locks: Empty
-    * In hist_locks: C
-    * In graph: 'A -> BX'
-    */
-                                  acquire D
-                                  /*
-                                   * Push D onto held_locks
-                                   * Queue D into hist_locks
-                                   * Add 'the top of held_locks -> D'
-                                   *
-                                   * In held_locks: A, D
-                                   * In hist_locks: A, D
-                                   * In graph: 'A -> BX', 'A -> D'
-                                   */
-   acquire E
-   /*
-    * Push E onto held_locks
-    * Queue E into hist_locks
-    *
-    * In held_locks: E
-    * In hist_locks: C, E
-    * In graph: 'A -> BX', 'A -> D'
-    */
-
-   release E
-   /*
-    * Pop E from held_locks
-    *
-    * In held_locks: Empty
-    * In hist_locks: D, E
-    * In graph: 'A -> BX', 'A -> D'
-    */
-                                  release D
-                                  /*
-                                   * Pop D from held_locks
-                                   *
-                                   * In held_locks: A
-                                   * In hist_locks: A, D
-                                   * In graph: 'A -> BX', 'A -> D'
-                                   */
-   commit BX
-   /*
-    * Add 'BX -> ?'
-    * What has been queued since acquire BX: C, E
-    *
-    * In held_locks: Empty
-    * In hist_locks: D, E
-    * In graph: 'A -> BX', 'A -> D',
-    *           'BX -> C', 'BX -> E'
-    */
-
-   release BX
-   /*
-    * In held_locks: Empty
-    * In hist_locks: D, E
-    * In graph: 'A -> BX', 'A -> D',
-    *           'BX -> C', 'BX -> E'
-    */
-                                  release A
-                                  /*
-                                   * Pop A from held_locks
-                                   *
-                                   * In held_locks: Empty
-                                   * In hist_locks: A, D
-                                   * In graph: 'A -> BX', 'A -> D',
-                                   *           'BX -> C', 'BX -> E'
-                                   */
-
-   where A, BX, C,..., E are different lock classes, and a suffix 'X' is
-   added on crosslocks.
-
-Crossrelease considers all acquisitions after acqiuring BX are
-candidates which might create dependencies with BX. True dependencies
-will be determined when identifying the release context of BX. Meanwhile,
-all typical locks are queued so that they can be used at the commit step.
-And then two dependencies 'BX -> C' and 'BX -> E' are added at the
-commit step when identifying the release context.
-
-The final graph will be, with crossrelease:
-
-               -> C
-              /
-       -> BX -
-      /       \
-   A -         -> E
-      \
-       -> D
-
-   where A, BX, C,..., E are different lock classes, and a suffix 'X' is
-   added on crosslocks.
-
-However, the final graph will be, without crossrelease:
-
-   A -> D
-
-   where A and D are different lock classes.
-
-The former graph has three more dependencies, 'A -> BX', 'BX -> C' and
-'BX -> E' giving additional opportunities to check if they cause
-deadlocks. This way lockdep can detect a deadlock or its possibility
-caused by crosslocks.
-
-CONCLUSION
-
-We checked how crossrelease works with several examples.
-
-
-=============
-Optimizations
-=============
-
-Avoid duplication
------------------
-
-Crossrelease feature uses a cache like what lockdep already uses for
-dependency chains, but this time it's for caching CT type dependencies.
-Once that dependency is cached, the same will never be added again.
-
-
-Lockless for hot paths
-----------------------
-
-To keep all locks for later use at the commit step, crossrelease adopts
-a local array embedded in task_struct, which makes access to the data
-lockless by forcing it to happen only within the owner context. It's
-like how lockdep handles held_locks. Lockless implmentation is important
-since typical locks are very frequently acquired and released.
-
-
-=================================================
-APPENDIX A: What lockdep does to work aggresively
-=================================================
-
-A deadlock actually occurs when all wait operations creating circular
-dependencies run at the same time. Even though they don't, a potential
-deadlock exists if the problematic dependencies exist. Thus it's
-meaningful to detect not only an actual deadlock but also its potential
-possibility. The latter is rather valuable. When a deadlock occurs
-actually, we can identify what happens in the system by some means or
-other even without lockdep. However, there's no way to detect possiblity
-without lockdep unless the whole code is parsed in head. It's terrible.
-Lockdep does the both, and crossrelease only focuses on the latter.
-
-Whether or not a deadlock actually occurs depends on several factors.
-For example, what order contexts are switched in is a factor. Assuming
-circular dependencies exist, a deadlock would occur when contexts are
-switched so that all wait operations creating the dependencies run
-simultaneously. Thus to detect a deadlock possibility even in the case
-that it has not occured yet, lockdep should consider all possible
-combinations of dependencies, trying to:
-
-1. Use a global dependency graph.
-
-   Lockdep combines all dependencies into one global graph and uses them,
-   regardless of which context generates them or what order contexts are
-   switched in. Aggregated dependencies are only considered so they are
-   prone to be circular if a problem exists.
-
-2. Check dependencies between classes instead of instances.
-
-   What actually causes a deadlock are instances of lock. However,
-   lockdep checks dependencies between classes instead of instances.
-   This way lockdep can detect a deadlock which has not happened but
-   might happen in future by others but the same class.
-
-3. Assume all acquisitions lead to waiting.
-
-   Although locks might be acquired without waiting which is essential
-   to create dependencies, lockdep assumes all acquisitions lead to
-   waiting since it might be true some time or another.
-
-CONCLUSION
-
-Lockdep detects not only an actual deadlock but also its possibility,
-and the latter is more valuable.
-
-
-==================================================
-APPENDIX B: How to avoid adding false dependencies
-==================================================
-
-Remind what a dependency is. A dependency exists if:
-
-   1. There are two waiters waiting for each event at a given time.
-   2. The only way to wake up each waiter is to trigger its event.
-   3. Whether one can be woken up depends on whether the other can.
-
-For example:
-
-   acquire A
-   acquire B /* A dependency 'A -> B' exists */
-   release B
-   release A
-
-   where A and B are different lock classes.
-
-A depedency 'A -> B' exists since:
-
-   1. A waiter for A and a waiter for B might exist when acquiring B.
-   2. Only way to wake up each is to release what it waits for.
-   3. Whether the waiter for A can be woken up depends on whether the
-      other can. IOW, TASK X cannot release A if it fails to acquire B.
-
-For another example:
-
-   TASK X                         TASK Y
-   ------                         ------
-                                  acquire AX
-   acquire B /* A dependency 'AX -> B' exists */
-   release B
-   release AX held by Y
-
-   where AX and B are different lock classes, and a suffix 'X' is added
-   on crosslocks.
-
-Even in this case involving crosslocks, the same rule can be applied. A
-depedency 'AX -> B' exists since:
-
-   1. A waiter for AX and a waiter for B might exist when acquiring B.
-   2. Only way to wake up each is to release what it waits for.
-   3. Whether the waiter for AX can be woken up depends on whether the
-      other can. IOW, TASK X cannot release AX if it fails to acquire B.
-
-Let's take a look at more complicated example:
-
-   TASK X                         TASK Y
-   ------                         ------
-   acquire B
-   release B
-   fork Y
-                                  acquire AX
-   acquire C /* A dependency 'AX -> C' exists */
-   release C
-   release AX held by Y
-
-   where AX, B and C are different lock classes, and a suffix 'X' is
-   added on crosslocks.
-
-Does a dependency 'AX -> B' exist? Nope.
-
-Two waiters are essential to create a dependency. However, waiters for
-AX and B to create 'AX -> B' cannot exist at the same time in this
-example. Thus the dependency 'AX -> B' cannot be created.
-
-It would be ideal if the full set of true ones can be considered. But
-we can ensure nothing but what actually happened. Relying on what
-actually happens at runtime, we can anyway add only true ones, though
-they might be a subset of true ones. It's similar to how lockdep works
-for typical locks. There might be more true dependencies than what
-lockdep has detected in runtime. Lockdep has no choice but to rely on
-what actually happens. Crossrelease also relies on it.
-
-CONCLUSION
-
-Relying on what actually happens, lockdep can avoid adding false
-dependencies.
diff --git a/Documentation/media/dvb-drivers/frontends.rst b/Documentation/media/dvb-drivers/frontends.rst
new file mode 100644 (file)
index 0000000..1f5f579
--- /dev/null
@@ -0,0 +1,30 @@
+****************
+Frontend drivers
+****************
+
+Frontend attach headers
+***********************
+
+.. Keep it on alphabetic order
+
+.. kernel-doc:: drivers/media/dvb-frontends/a8293.h
+.. kernel-doc:: drivers/media/dvb-frontends/af9013.h
+.. kernel-doc:: drivers/media/dvb-frontends/ascot2e.h
+.. kernel-doc:: drivers/media/dvb-frontends/cxd2820r.h
+.. kernel-doc:: drivers/media/dvb-frontends/drxk.h
+.. kernel-doc:: drivers/media/dvb-frontends/dvb-pll.h
+.. kernel-doc:: drivers/media/dvb-frontends/helene.h
+.. kernel-doc:: drivers/media/dvb-frontends/horus3a.h
+.. kernel-doc:: drivers/media/dvb-frontends/ix2505v.h
+.. kernel-doc:: drivers/media/dvb-frontends/m88ds3103.h
+.. kernel-doc:: drivers/media/dvb-frontends/mb86a20s.h
+.. kernel-doc:: drivers/media/dvb-frontends/mn88472.h
+.. kernel-doc:: drivers/media/dvb-frontends/rtl2830.h
+.. kernel-doc:: drivers/media/dvb-frontends/rtl2832.h
+.. kernel-doc:: drivers/media/dvb-frontends/rtl2832_sdr.h
+.. kernel-doc:: drivers/media/dvb-frontends/stb6000.h
+.. kernel-doc:: drivers/media/dvb-frontends/tda10071.h
+.. kernel-doc:: drivers/media/dvb-frontends/tda826x.h
+.. kernel-doc:: drivers/media/dvb-frontends/zd1301_demod.h
+.. kernel-doc:: drivers/media/dvb-frontends/zl10036.h
+
index 376141143ae91d46a953b49bc9338fd7c9d9b203..314e127d82e310e42f97a96a21dbf566db6b43ab 100644 (file)
@@ -41,4 +41,5 @@ For more details see the file COPYING in the source distribution of Linux.
        technisat
        ttusb-dec
        udev
+       frontends
        contributors
index 361789df51ecf58d5083436792588d2f12faa7f2..aa0a776c817a7ceabb217c3eecc31ecdb32f59c7 100644 (file)
@@ -5,7 +5,6 @@ How to get printk format specifiers right
 :Author: Randy Dunlap <rdunlap@infradead.org>
 :Author: Andrew Murray <amurray@mpc-data.co.uk>
 
-
 Integer types
 =============
 
@@ -45,6 +44,18 @@ return from vsnprintf.
 Raw pointer value SHOULD be printed with %p. The kernel supports
 the following extended format specifiers for pointer types:
 
+Pointer Types
+=============
+
+Pointers printed without a specifier extension (i.e unadorned %p) are
+hashed to give a unique identifier without leaking kernel addresses to user
+space. On 64 bit machines the first 32 bits are zeroed. If you _really_
+want the address see %px below.
+
+::
+
+       %p      abcdef12 or 00000000abcdef12
+
 Symbols/Function Pointers
 =========================
 
@@ -85,18 +96,32 @@ Examples::
        printk("Faulted at %pS\n", (void *)regs->ip);
        printk(" %s%pB\n", (reliable ? "" : "? "), (void *)*stack);
 
-
 Kernel Pointers
 ===============
 
 ::
 
-       %pK     0x01234567 or 0x0123456789abcdef
+       %pK     01234567 or 0123456789abcdef
 
 For printing kernel pointers which should be hidden from unprivileged
 users. The behaviour of ``%pK`` depends on the ``kptr_restrict sysctl`` - see
 Documentation/sysctl/kernel.txt for more details.
 
+Unmodified Addresses
+====================
+
+::
+
+       %px     01234567 or 0123456789abcdef
+
+For printing pointers when you _really_ want to print the address. Please
+consider whether or not you are leaking sensitive information about the
+Kernel layout in memory before printing pointers with %px. %px is
+functionally equivalent to %lx. %px is preferred to %lx because it is more
+uniquely grep'able. If, in the future, we need to modify the way the Kernel
+handles printing pointers it will be nice to be able to find the call
+sites.
+
 Struct Resources
 ================
 
index e89e36ec15a5bf6453db0b77031d23a28c705e07..8ce78f82ae23a96d4301a1fd238f94b8ab3a0eac 100644 (file)
@@ -204,10 +204,17 @@ CONTENTS
  It does so by decrementing the runtime of the executing task Ti at a pace equal
  to
 
-           dq = -max{ Ui, (1 - Uinact) } dt
+           dq = -max{ Ui / Umax, (1 - Uinact - Uextra) } dt
 
- where Uinact is the inactive utilization, computed as (this_bq - running_bw),
- and Ui is the bandwidth of task Ti.
+ where:
+
+  - Ui is the bandwidth of task Ti;
+  - Umax is the maximum reclaimable utilization (subjected to RT throttling
+    limits);
+  - Uinact is the (per runqueue) inactive utilization, computed as
+    (this_bq - running_bw);
+  - Uextra is the (per runqueue) extra reclaimable utilization
+    (subjected to RT throttling limits).
 
 
  Let's now see a trivial example of two deadline tasks with runtime equal
index 6338400eed73d7e2f8b90807186e15d54cd16cde..2c31d9ee6776ea7fef125d8fd00e7f2ef8614e36 100644 (file)
@@ -319,12 +319,12 @@ struct Scsi_Host:
         instance. If the reference count reaches 0 then the given instance
         is freed
 
-The Scsi_device structure has had reference counting infrastructure added.
-This effectively spreads the ownership of struct Scsi_device instances
+The scsi_device structure has had reference counting infrastructure added.
+This effectively spreads the ownership of struct scsi_device instances
 across the various SCSI layers which use them. Previously such instances
 were exclusively owned by the mid level. See the access functions declared
 towards the end of include/scsi/scsi_device.h . If an LLD wants to keep
-a copy of a pointer to a Scsi_device instance it should use scsi_device_get()
+a copy of a pointer to a scsi_device instance it should use scsi_device_get()
 to bump its reference count. When it is finished with the pointer it can
 use scsi_device_put() to decrement its reference count (and potentially
 delete it).
index b920423f88cbcb19e04a7838359ef04dace8f431..5025ff9307e66c590a4a72795f9e2f75f0fbddc8 100644 (file)
@@ -158,10 +158,6 @@ Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
 value lower than this limit will be ignored and the old configuration will be
 retained.
 
-Note: the value of dirty_bytes also must be set greater than
-dirty_background_bytes or the amount of memory corresponding to
-dirty_background_ratio.
-
 ==============================================================
 
 dirty_expire_centisecs
@@ -181,9 +177,6 @@ generating disk writes will itself start writing out dirty data.
 
 The total available memory is not equal to total system memory.
 
-Note: dirty_ratio must be set greater than dirty_background_ratio or
-ratio corresponding to dirty_background_bytes.
-
 ==============================================================
 
 dirty_writeback_centisecs
index f670e4b9e7f33fdbb186d9b1c9186c6a2fbf7324..57d3ee9e4bde2a799715ca75871fd61b27858b0a 100644 (file)
@@ -2901,14 +2901,19 @@ userspace buffer and its length:
 
 struct kvm_s390_irq_state {
        __u64 buf;
-       __u32 flags;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 reserved[4];
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
 Userspace passes in the above struct and for each pending interrupt a
 struct kvm_s390_irq is copied to the provided buffer.
 
+The structure contains a flags and a reserved field for future extensions. As
+the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and
+reserved, these fields can not be used in the future without breaking
+compatibility.
+
 If -ENOBUFS is returned the buffer provided was too small and userspace
 may retry with a bigger buffer.
 
@@ -2932,10 +2937,14 @@ containing a struct kvm_s390_irq_state:
 
 struct kvm_s390_irq_state {
        __u64 buf;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 pad;
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
+The restrictions for flags and reserved apply as well.
+(see KVM_S390_GET_IRQ_STATE)
+
 The userspace memory referenced by buf contains a struct kvm_s390_irq
 for each interrupt to be injected into the guest.
 If one of the interrupts could not be injected for some reason the
index 8d5830eab26a6ba03c310719eb2cc6b76e64fa5a..4f0c9fc403656d2956fc70334d7d2a0151215f92 100644 (file)
@@ -64,6 +64,8 @@ Groups:
     -EINVAL: Inconsistent restored data
     -EFAULT: Invalid guest ram access
     -EBUSY:  One or more VCPUS are running
+    -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the
+            state is not available
 
   KVM_DEV_ARM_VGIC_GRP_ITS_REGS
   Attributes:
index 89fff7d611ccb533a5c3d375bc94fecf3c2e0687..0b3a1148f9f0414558ed0537b4219225162ccc3a 100644 (file)
@@ -98,5 +98,25 @@ request is made for a page in an old zpool, it is uncompressed using its
 original compressor.  Once all pages are removed from an old zpool, the zpool
 and its compressor are freed.
 
+Some of the pages in zswap are same-value filled pages (i.e. contents of the
+page have same value or repetitive pattern). These pages include zero-filled
+pages and they are handled differently. During store operation, a page is
+checked if it is a same-value filled page before compressing it. If true, the
+compressed length of the page is set to zero and the pattern or same-filled
+value is stored.
+
+Same-value filled pages identification feature is enabled by default and can be
+disabled at boot time by setting the "same_filled_pages_enabled" attribute to 0,
+e.g. zswap.same_filled_pages_enabled=0. It can also be enabled and disabled at
+runtime using the sysfs "same_filled_pages_enabled" attribute, e.g.
+
+echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled
+
+When zswap same-filled page identification is disabled at runtime, it will stop
+checking for the same-value filled pages during store operation. However, the
+existing pages which are marked as same-value filled pages remain stored
+unchanged in zswap until they are either loaded or invalidated.
+
 A debugfs interface is provided for various statistic about pool size, number
-of pages stored, and various counters for the reasons pages are rejected.
+of pages stored, same-value filled pages and various counters for the reasons
+pages are rejected.
index fa46dcb347bc1d2ac60901c4621bd3bad81de601..ecb0d2dadfb769a83b2a3a3f4a20ce03df0aea79 100644 (file)
@@ -1,5 +1,10 @@
-Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
-which will be found on future Intel CPUs.
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a feature
+which is found on Intel's Skylake "Scalable Processor" Server CPUs.
+It will be avalable in future non-server parts.
+
+For anyone wishing to test or use this feature, it is available in
+Amazon's EC2 C5 instances and is known to work there using an Ubuntu
+17.04 image.
 
 Memory Protection Keys provides a mechanism for enforcing page-based
 protections, but without requiring modification of the page tables
index aa71ab52fd76d1607b36f476dde8e0060f500070..a6e86e20761e143ca976d4f8170e60b603bb5ed9 100644 (file)
@@ -554,13 +554,13 @@ S:        Orphan
 F:     Documentation/filesystems/affs.txt
 F:     fs/affs/
 
-AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN
+AFS FILESYSTEM
 M:     David Howells <dhowells@redhat.com>
 L:     linux-afs@lists.infradead.org
 S:     Supported
 F:     fs/afs/
-F:     include/net/af_rxrpc.h
-F:     net/rxrpc/af_rxrpc.c
+F:     include/trace/events/afs.h
+F:     Documentation/filesystems/afs.txt
 W:     https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
@@ -859,7 +859,8 @@ F:  kernel/configs/android*
 ANDROID DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:     Arve HjønnevÃ¥g <arve@android.com>
-M:     Riley Andrews <riandrews@android.com>
+M:     Todd Kjos <tkjos@android.com>
+M:     Martijn Coenen <maco@android.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
 L:     devel@driverdev.osuosl.org
 S:     Supported
@@ -2046,7 +2047,7 @@ F:        arch/arm/boot/dts/uniphier*
 F:     arch/arm/include/asm/hardware/cache-uniphier.h
 F:     arch/arm/mach-uniphier/
 F:     arch/arm/mm/cache-uniphier.c
-F:     arch/arm64/boot/dts/socionext/
+F:     arch/arm64/boot/dts/socionext/uniphier*
 F:     drivers/bus/uniphier-system-bus.c
 F:     drivers/clk/uniphier/
 F:     drivers/gpio/gpio-uniphier.c
@@ -5430,7 +5431,7 @@ F:        drivers/media/tuners/fc2580*
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
 M:     Johannes Thumshirn <jth@kernel.org>
-L:     fcoe-devel@open-fcoe.org
+L:     linux-scsi@vger.kernel.org
 W:     www.Open-FCoE.org
 S:     Supported
 F:     drivers/scsi/libfc/
@@ -6174,7 +6175,6 @@ M:        Jean Delvare <jdelvare@suse.com>
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
 W:     http://hwmon.wiki.kernel.org/
-T:     quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
@@ -7767,6 +7767,7 @@ F:        security/keys/
 
 KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
+M:     Daniel Thompson <daniel.thompson@linaro.org>
 W:     http://kgdb.wiki.kernel.org/
 L:     kgdb-bugreport@lists.sourceforge.net
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
@@ -9331,9 +9332,9 @@ F:        drivers/gpu/drm/mxsfb/
 F:     Documentation/devicetree/bindings/display/mxsfb-drm.txt
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M:     Hyong-Youb Kim <hykim@myri.com>
+M:     Chris Lee <christopher.lee@cspi.com>
 L:     netdev@vger.kernel.org
-W:     https://www.myricom.com/support/downloads/myri10ge.html
+W:     https://www.cspi.com/ethernet-products/support/downloads/
 S:     Supported
 F:     drivers/net/ethernet/myricom/myri10ge/
 
@@ -11777,6 +11778,18 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtl8xxxu/
 
+RXRPC SOCKETS (AF_RXRPC)
+M:     David Howells <dhowells@redhat.com>
+L:     linux-afs@lists.infradead.org
+S:     Supported
+F:     net/rxrpc/
+F:     include/keys/rxrpc-type.h
+F:     include/net/af_rxrpc.h
+F:     include/trace/events/rxrpc.h
+F:     include/uapi/linux/rxrpc.h
+F:     Documentation/networking/rxrpc.txt
+W:     https://www.infradead.org/~dhowells/kafs/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
 L:     linux-fbdev@vger.kernel.org
@@ -12630,6 +12643,14 @@ S:     Maintained
 F:     drivers/ssb/
 F:     include/linux/ssb/
 
+SONY IMX274 SENSOR DRIVER
+M:     Leon Luo <leonl@leopardimaging.com>
+L:     linux-media@vger.kernel.org
+T:     git git://linuxtv.org/media_tree.git
+S:     Maintained
+F:     drivers/media/i2c/imx274.c
+F:     Documentation/devicetree/bindings/media/i2c/imx274.txt
+
 SONY MEMORYSTICK CARD SUPPORT
 M:     Alex Dubov <oakad@yahoo.com>
 W:     http://tifmxx.berlios.de/
@@ -13096,6 +13117,7 @@ F:      drivers/dma/dw/
 
 SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
 M:     Jie Deng <jiedeng@synopsys.com>
+M:     Jose Abreu <Jose.Abreu@synopsys.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/synopsys/
@@ -13648,10 +13670,8 @@ F:     drivers/net/wireless/ti/
 F:     include/linux/wl12xx.h
 
 TILE ARCHITECTURE
-M:     Chris Metcalf <cmetcalf@mellanox.com>
 W:     http://www.mellanox.com/repository/solutions/tile-scm/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
-S:     Supported
+S:     Orphan
 F:     arch/tile/
 F:     drivers/char/tile-srom.c
 F:     drivers/edac/tile_edac.c
index efb942ad0b556c9fc3eb4636f39a351425a8cedc..7e02f951b284187d5354c2b7bd39b0ef1bf5d903 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
-PATCHLEVEL = 14
+PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc4
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -132,7 +132,7 @@ ifneq ($(KBUILD_OUTPUT),)
 # check that the output directory actually exists
 saved-output := $(KBUILD_OUTPUT)
 KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
-                                                               && /bin/pwd)
+                                                               && pwd)
 $(if $(KBUILD_OUTPUT),, \
      $(error failed to create output directory "$(saved-output)"))
 
@@ -474,6 +474,38 @@ ifneq ($(KBUILD_SRC),)
            $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
 endif
 
+ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
+GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
+endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC   := --gcc-toolchain=$(GCC_TOOLCHAIN)
+endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
+
 ifeq ($(config-targets),1)
 # ===========================================================================
 # *config targets only - make sure prerequisites are updated, and descend
@@ -684,38 +716,6 @@ ifdef CONFIG_CC_STACKPROTECTOR
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
-ifeq ($(cc-name),clang)
-ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
-endif
-ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC   := --gcc-toolchain=$(GCC_TOOLCHAIN)
-endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
-endif
-
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
 else
@@ -1009,7 +1009,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
 
 PHONY += $(vmlinux-dirs)
 $(vmlinux-dirs): prepare scripts
-       $(Q)$(MAKE) $(build)=$@
+       $(Q)$(MAKE) $(build)=$@ need-builtin=1
 
 define filechk_kernel.release
        echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
@@ -1337,8 +1337,9 @@ package-dir       := scripts/package
        $(Q)$(MAKE) $(build)=$(package-dir) $@
 %pkg: include/config/kernel.release FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
-rpm: include/config/kernel.release FORCE
-       $(Q)$(MAKE) $(build)=$(package-dir) $@
+rpm: rpm-pkg
+       @echo "  WARNING: \"rpm\" target will be removed after Linux 4.18"
+       @echo "           Please use \"rpm-pkg\" instead."
 
 
 # Brief documentation of the typical targets used
@@ -1546,9 +1547,9 @@ clean: $(clean-dirs)
        $(call cmd,rmdirs)
        $(call cmd,rmfiles)
        @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
-               \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+               \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
                -o -name '*.ko.*' -o -name '*.dtb' -o -name '*.dtb.S' \
-               -o -name '*.dwo'  \
+               -o -name '*.dwo' -o -name '*.lst' \
                -o -name '*.su'  \
                -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
                -o -name '*.symtypes' -o -name 'modules.order' \
index b15bf6bc0e94f46f035e8781ffa921060341fe91..14a2e9af97e9992d87821e8f11276ecfef8e57cf 100644 (file)
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bpf_perf_event.h
index 5da0aec8ce904d1bacc7a4811bcb5b17f3276fcf..438b10c44d732355888e856668fc09c5a9685e85 100644 (file)
@@ -65,9 +65,9 @@ srmcons_do_receive_chars(struct tty_port *port)
 }
 
 static void
-srmcons_receive_chars(unsigned long data)
+srmcons_receive_chars(struct timer_list *t)
 {
-       struct srmcons_private *srmconsp = (struct srmcons_private *)data;
+       struct srmcons_private *srmconsp = from_timer(srmconsp, t, timer);
        struct tty_port *port = &srmconsp->port;
        unsigned long flags;
        int incr = 10;
@@ -206,8 +206,7 @@ static const struct tty_operations srmcons_ops = {
 static int __init
 srmcons_init(void)
 {
-       setup_timer(&srmcons_singleton.timer, srmcons_receive_chars,
-                       (unsigned long)&srmcons_singleton);
+       timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0);
        if (srm_is_registered_console) {
                struct tty_driver *driver;
                int err;
index 5c7adf100a582ba1d1a4c3d2760d1d7103e0eb0d..9d5fd00d9e91bf0caa066bc0597475723a014328 100644 (file)
@@ -39,7 +39,7 @@ config ARC
        select OF
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
-       select PERF_USE_VMALLOC
+       select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_KERNEL_GZIP
index e114000a84f56c9e07ddd3a2e623c4dfeb3df6a2..74d070cd3c13a723fef1a2b3cd91cd2919392762 100644 (file)
                ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
                interrupt-parent = <&mb_intc>;
 
+               creg_rst: reset-controller@11220 {
+                       compatible = "snps,axs10x-reset";
+                       #reset-cells = <1>;
+                       reg = <0x11220 0x4>;
+               };
+
                i2sclk: i2sclk@100a0 {
                        compatible = "snps,axs10x-i2s-pll-clock";
                        reg = <0x100a0 0x10>;
@@ -73,6 +79,8 @@
                        clocks = <&apbclk>;
                        clock-names = "stmmaceth";
                        max-speed = <100>;
+                       resets = <&creg_rst 5>;
+                       reset-names = "stmmaceth";
                };
 
                ehci@0x40000 {
index b1c56d35f2a938e59c9677454499aeecb1da4f7e..49bfbd879caa6ffa08553e9b0f49b542739bb95b 100644 (file)
 
 /* Build Configuration Registers */
 #define ARC_REG_AUX_DCCM       0x18    /* DCCM Base Addr ARCv2 */
+#define ARC_REG_ERP_CTRL       0x3F    /* ARCv2 Error protection control */
 #define ARC_REG_DCCM_BASE_BUILD        0x61    /* DCCM Base Addr ARCompact */
 #define ARC_REG_CRC_BCR                0x62
 #define ARC_REG_VECBASE_BCR    0x68
 #define ARC_REG_PERIBASE_BCR   0x69
 #define ARC_REG_FP_BCR         0x6B    /* ARCompact: Single-Precision FPU */
 #define ARC_REG_DPFP_BCR       0x6C    /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_ERP_BUILD      0xc7    /* ARCv2 Error protection Build: ECC/Parity */
 #define ARC_REG_FP_V2_BCR      0xc8    /* ARCv2 FPU */
 #define ARC_REG_SLC_BCR                0xce
 #define ARC_REG_DCCM_BUILD     0x74    /* DCCM size (common) */
 #define ARC_REG_D_UNCACH_BCR   0x6A
 #define ARC_REG_BPU_BCR                0xc0
 #define ARC_REG_ISA_CFG_BCR    0xc1
+#define ARC_REG_LPB_BUILD      0xE9    /* ARCv2 Loop Buffer Build */
 #define ARC_REG_RTT_BCR                0xF2
 #define ARC_REG_IRQ_BCR                0xF3
+#define ARC_REG_MICRO_ARCH_BCR 0xF9    /* ARCv2 Product revision */
 #define ARC_REG_SMART_BCR      0xFF
 #define ARC_REG_CLUSTER_BCR    0xcf
 #define ARC_REG_AUX_ICCM       0x208   /* ICCM Base Addr (ARCv2) */
+#define ARC_REG_LPB_CTRL       0x488   /* ARCv2 Loop Buffer control */
 
 /* Common for ARCompact and ARCv2 status register */
 #define ARC_REG_STATUS32       0x0A
@@ -229,6 +234,32 @@ struct bcr_bpu_arcv2 {
 #endif
 };
 
+/* Error Protection Build: ECC/Parity */
+struct bcr_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8;
+#else
+       unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5;
+#endif
+};
+
+/* Error Protection Control */
+struct ctl_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad2:27, mpd:1, pad1:2, dpd:1, dpi:1;
+#else
+       unsigned int dpi:1, dpd:1, pad1:2, mpd:1, pad2:27;
+#endif
+};
+
+struct bcr_lpb {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:16, entries:8, ver:8;
+#else
+       unsigned int ver:8, entries:8, pad:16;
+#endif
+};
+
 struct bcr_generic {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int info:24, ver:8;
@@ -270,7 +301,7 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
-                            fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
+                            fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
                             debug:1, ap:1, smart:1, rtt:1, pad3:4,
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
index fa6d0ff4ff894be699616eefad77cd6a2347a3b7..170b5db64afeb7f74fb8279887a7cb75e0205c7b 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 2ce24e74f87956af0bba1d9430200196353bdff8..8aec462d90fbe8f0aa88847272d02004a863f2db 100644 (file)
@@ -336,15 +336,12 @@ static int arc_pmu_add(struct perf_event *event, int flags)
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       if (__test_and_set_bit(idx, pmu_cpu->used_mask)) {
-               idx = find_first_zero_bit(pmu_cpu->used_mask,
-                                         arc_pmu->n_counters);
-               if (idx == arc_pmu->n_counters)
-                       return -EAGAIN;
-
-               __set_bit(idx, pmu_cpu->used_mask);
-               hwc->idx = idx;
-       }
+       idx = ffz(pmu_cpu->used_mask[0]);
+       if (idx == arc_pmu->n_counters)
+               return -EAGAIN;
+
+       __set_bit(idx, pmu_cpu->used_mask);
+       hwc->idx = idx;
 
        write_aux_reg(ARC_REG_PCT_INDEX, idx);
 
@@ -377,21 +374,22 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
        struct perf_sample_data data;
        struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
        struct pt_regs *regs;
-       int active_ints;
+       unsigned int active_ints;
        int idx;
 
        arc_pmu_disable(&arc_pmu->pmu);
 
        active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
+       if (!active_ints)
+               goto done;
 
        regs = get_irq_regs();
 
-       for (idx = 0; idx < arc_pmu->n_counters; idx++) {
-               struct perf_event *event = pmu_cpu->act_counter[idx];
+       do {
+               struct perf_event *event;
                struct hw_perf_event *hwc;
 
-               if (!(active_ints & (1 << idx)))
-                       continue;
+               idx = __ffs(active_ints);
 
                /* Reset interrupt flag by writing of 1 */
                write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
@@ -404,19 +402,22 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
                write_aux_reg(ARC_REG_PCT_INT_CTRL,
                        read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
 
+               event = pmu_cpu->act_counter[idx];
                hwc = &event->hw;
 
                WARN_ON_ONCE(hwc->idx != idx);
 
                arc_perf_event_update(event, &event->hw, event->hw.idx);
                perf_sample_data_init(&data, 0, hwc->last_period);
-               if (!arc_pmu_event_set_period(event))
-                       continue;
+               if (arc_pmu_event_set_period(event)) {
+                       if (perf_event_overflow(event, &data, regs))
+                               arc_pmu_stop(event, 0);
+               }
 
-               if (perf_event_overflow(event, &data, regs))
-                       arc_pmu_stop(event, 0);
-       }
+               active_ints &= ~(1U << idx);
+       } while (active_ints);
 
+done:
        arc_pmu_enable(&arc_pmu->pmu);
 
        return IRQ_HANDLED;
@@ -461,6 +462,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
                pr_err("This core does not have performance counters!\n");
                return -ENODEV;
        }
+       BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
        BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
 
        READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
index fb83844daeea3550aacd27de2525711a6aa5fddc..7ef7d9a8ff89231811e73a241a3a3c6d248e720b 100644 (file)
@@ -199,8 +199,10 @@ static void read_arc_build_cfg_regs(void)
                        unsigned int exec_ctrl;
 
                        READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
-                       cpu->extn.dual_iss_exist = 1;
-                       cpu->extn.dual_iss_enb = exec_ctrl & 1;
+                       cpu->extn.dual_enb = exec_ctrl & 1;
+
+                       /* dual issue always present for this core */
+                       cpu->extn.dual = 1;
                }
        }
 
@@ -253,7 +255,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       cpu_id, cpu->name, cpu->details,
                       is_isa_arcompact() ? "ARCompact" : "ARCv2",
                       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
-                      IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
+                      IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
 
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
                       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -293,11 +295,26 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 
        if (cpu->bpu.ver)
                n += scnprintf(buf + n, len - n,
-                             "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
+                             "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
                              IS_AVAIL1(cpu->bpu.full, "full"),
                              IS_AVAIL1(!cpu->bpu.full, "partial"),
                              cpu->bpu.num_cache, cpu->bpu.num_pred);
 
+       if (is_isa_arcv2()) {
+               struct bcr_lpb lpb;
+
+               READ_BCR(ARC_REG_LPB_BUILD, lpb);
+               if (lpb.ver) {
+                       unsigned int ctl;
+                       ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+
+                       n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
+                               lpb.entries,
+                               IS_DISABLED_RUN(!ctl));
+               }
+       }
+
+       n += scnprintf(buf + n, len - n, "\n");
        return buf;
 }
 
@@ -326,6 +343,24 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
                               cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
                               cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
 
+       if (is_isa_arcv2()) {
+
+               /* Error Protection: ECC/Parity */
+               struct bcr_erp erp;
+               READ_BCR(ARC_REG_ERP_BUILD, erp);
+
+               if (erp.ver) {
+                       struct  ctl_erp ctl;
+                       READ_BCR(ARC_REG_ERP_CTRL, ctl);
+
+                       /* inverted bits: 0 means enabled */
+                       n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
+                               IS_AVAIL3(erp.ic,  !ctl.dpi, "IC "),
+                               IS_AVAIL3(erp.dc,  !ctl.dpd, "DC "),
+                               IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
+               }
+       }
+
        n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
                        EF_ARC_OSABI_CURRENT >> 8,
                        EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
index 8ceefbf72fb0f8b0d1ce9ca1516bb7edd487cc9a..4097764fea23499a828a559f70a62a29daba14c8 100644 (file)
@@ -762,21 +762,23 @@ void read_decode_mmu_bcr(void)
        tmp = read_aux_reg(ARC_REG_MMU_BCR);
        mmu->ver = (tmp >> 24);
 
-       if (mmu->ver <= 2) {
-               mmu2 = (struct bcr_mmu_1_2 *)&tmp;
-               mmu->pg_sz_k = TO_KB(0x2000);
-               mmu->sets = 1 << mmu2->sets;
-               mmu->ways = 1 << mmu2->ways;
-               mmu->u_dtlb = mmu2->u_dtlb;
-               mmu->u_itlb = mmu2->u_itlb;
-       } else if (mmu->ver == 3) {
-               mmu3 = (struct bcr_mmu_3 *)&tmp;
-               mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
-               mmu->sets = 1 << mmu3->sets;
-               mmu->ways = 1 << mmu3->ways;
-               mmu->u_dtlb = mmu3->u_dtlb;
-               mmu->u_itlb = mmu3->u_itlb;
-               mmu->sasid = mmu3->sasid;
+       if (is_isa_arcompact()) {
+               if (mmu->ver <= 2) {
+                       mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+                       mmu->pg_sz_k = TO_KB(0x2000);
+                       mmu->sets = 1 << mmu2->sets;
+                       mmu->ways = 1 << mmu2->ways;
+                       mmu->u_dtlb = mmu2->u_dtlb;
+                       mmu->u_itlb = mmu2->u_itlb;
+               } else {
+                       mmu3 = (struct bcr_mmu_3 *)&tmp;
+                       mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
+                       mmu->sets = 1 << mmu3->sets;
+                       mmu->ways = 1 << mmu3->ways;
+                       mmu->u_dtlb = mmu3->u_dtlb;
+                       mmu->u_itlb = mmu3->u_itlb;
+                       mmu->sasid = mmu3->sasid;
+               }
        } else {
                mmu4 = (struct bcr_mmu_4 *)&tmp;
                mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
@@ -818,8 +820,9 @@ int pae40_exist_but_not_enab(void)
 
 void arc_mmu_init(void)
 {
-       char str[256];
        struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+       char str[256];
+       int compat = 0;
 
        pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
 
@@ -834,15 +837,21 @@ void arc_mmu_init(void)
         */
        BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
 
-       /* For efficiency sake, kernel is compile time built for a MMU ver
-        * This must match the hardware it is running on.
-        * Linux built for MMU V2, if run on MMU V1 will break down because V1
-        *  hardware doesn't understand cmds such as WriteNI, or IVUTLB
-        * On the other hand, Linux built for V1 if run on MMU V2 will do
-        *   un-needed workarounds to prevent memcpy thrashing.
-        * Similarly MMU V3 has new features which won't work on older MMU
+       /*
+        * Ensure that MMU features assumed by kernel exist in hardware.
+        * For older ARC700 cpus, it has to be exact match, since the MMU
+        * revisions were not backwards compatible (MMUv3 TLB layout changed
+        * so even if kernel for v2 didn't use any new cmds of v3, it would
+        * still not work.
+        * For HS cpus, MMUv4 was baseline and v5 is backwards compatible
+        * (will run older software).
         */
-       if (mmu->ver != CONFIG_ARC_MMU_VER) {
+       if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
+               compat = 1;
+       else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
+               compat = 1;
+
+       if (!compat) {
                panic("MMU ver %d doesn't match kernel built for %d...\n",
                      mmu->ver, CONFIG_ARC_MMU_VER);
        }
index c54d1ae57fe0b3feffd4578387f11593c45308e4..4e0df7b7a248147af495948e95488a67e0b78499 100644 (file)
@@ -14,6 +14,8 @@ menuconfig ARC_PLAT_AXS10X
        select MIGHT_HAVE_PCI
        select GENERIC_IRQ_CHIP
        select GPIOLIB
+       select AXS101 if ISA_ARCOMPACT
+       select AXS103 if ISA_ARCV2
        help
          Support for the ARC AXS10x Software Development Platforms.
 
index cf14ebc36916a2a0eca39728c0cc0f315d58bbeb..f1ac6790da5fe64782b59b720bf3ea80d999bff1 100644 (file)
@@ -111,13 +111,6 @@ static void __init axs10x_early_init(void)
 
        axs10x_enable_gpio_intc_wire();
 
-       /*
-        * Reset ethernet IP core.
-        * TODO: get rid of this quirk after axs10x reset driver (or simple
-        * reset driver) will be available in upstream.
-        */
-       iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
-
        scnprintf(mb, 32, "MainBoard v%d", mb_rev);
        axs10x_print_board_ver(CREG_MB_VER, mb);
 }
index 12b8c8f8ec0708f24e5f953857044d45660d618f..17685e19aed8e4792699613eb2df95525b1aca47 100644 (file)
@@ -1776,9 +1776,9 @@ config DEBUG_UART_8250_FLOW_CONTROL
        default y if ARCH_EBSA110 || DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC
 
 config DEBUG_UNCOMPRESS
-       bool
+       bool "Enable decompressor debugging via DEBUG_LL output"
        depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
-       default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
+       depends on DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
                     (!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \
                     !DEBUG_BRCMSTB_UART
        help
index 1b81c4e757727d2abcc798fefaaa55e0b0fecfec..d37f95025807708a9c486d1e6f6365f9960286b5 100644 (file)
                                reg-names = "phy";
                                status = "disabled";
                                ti,ctrl_mod = <&usb_ctrl_mod>;
+                               #phy-cells = <0>;
                        };
 
                        usb0: usb@47401000 {
                                reg-names = "phy";
                                status = "disabled";
                                ti,ctrl_mod = <&usb_ctrl_mod>;
+                               #phy-cells = <0>;
                        };
 
                        usb1: usb@47401800 {
index e5b061469bf88a234ef2367ecb94a143afca793f..4714a59fd86df05a8715ee400f18e6a71041eac7 100644 (file)
                        reg = <0x48038000 0x2000>,
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
-                       interrupts = <80>, <81>;
+                       interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8 2>,
                        reg = <0x4803C000 0x2000>,
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
-                       interrupts = <82>, <83>;
+                       interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10 2>,
index 9e92d480576b04ae89f3e5aefc7c57f3a522070c..3b9a94c274a7b01ed29ead402a4b3538c122138d 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&spi0_pins>;
-       dmas = <&edma 16
-               &edma 17>;
+       dmas = <&edma 16 0
+               &edma 17 0>;
        dma-names = "tx0", "rx0";
 
        flash: w25q64cvzpig@0 {
index 25d2d720dc0e2cacc98b402e609e0fe78eb01acb..678aa023335d885279d6ba3bcbc67f774c3b1408 100644 (file)
        usb3_phy: usb3_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_xhci0_vbus>;
+               #phy-cells = <0>;
        };
 
        reg_xhci0_vbus: xhci0-vbus {
index e1f355ffc8f7e07cbbc3912b13e7436f9ac4f407..434dc9aaa5e4e2b46429e221201bf7839ae00a80 100644 (file)
@@ -66,6 +66,7 @@
        usb3_1_phy: usb3_1-phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&usb3_1_vbus>;
+               #phy-cells = <0>;
        };
 
        usb3_1_vbus: usb3_1-vbus {
index 36ad571e76f31c85aff97acc42d7991ddf184e50..0a3552ebda3b80c84588a3cdfd45fb1ae9286fd2 100644 (file)
        usb3_0_phy: usb3_0_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb3_0_vbus>;
+               #phy-cells = <0>;
        };
 
        usb3_1_phy: usb3_1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb3_1_vbus>;
+               #phy-cells = <0>;
        };
 
        reg_usb3_0_vbus: usb3-vbus0 {
index f503955dbd3b810db157344e3f9cf48dadd4b1ca..51b4ee6df130188cb0fe839ce78aa5f84d5e3607 100644 (file)
        usb2_1_phy: usb2_1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb2_1_vbus>;
+               #phy-cells = <0>;
        };
 
        usb3_phy: usb3_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_usb3_vbus>;
+               #phy-cells = <0>;
        };
 
        reg_usb3_vbus: usb3-vbus {
index 528b9e3bc1da146fd188b4e2c93ec75b7abb4fdd..dcc55aa84583cdd18f7ef6ecd780eb947be1ef1f 100644 (file)
@@ -85,7 +85,7 @@
                timer@20200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x20200 0x100>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&periph_clk>;
                };
 
@@ -93,7 +93,7 @@
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x20600 0x20>;
                        interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
-                                                 IRQ_TYPE_LEVEL_HIGH)>;
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&periph_clk>;
                };
 
index 013431e3d7c3140d3a0645bdf4f130e9a860f984..dcde93c85c2d38e0f230ca21c700cb7755d9ec55 100644 (file)
 
        usbphy: phy {
                compatible = "usb-nop-xceiv";
+               #phy-cells = <0>;
        };
 };
index 3bc50849d013ff0442f559dce24274972202f5f2..b8bde13de90a571ea71aeec0515c7c8290ecb7ca 100644 (file)
        status = "okay";
 };
 
-&sata {
-       status = "okay";
-};
-
 &qspi {
        bspi-sel = <0>;
        flash: m25p80@0 {
index d94d14b3c745a0d01c031b6d9b9e6426e0e7b2ec..6a44b8021702176c63d09e55925ffd3d7e02994e 100644 (file)
        status = "okay";
 };
 
-&sata {
-       status = "okay";
-};
-
 &srab {
        compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
        status = "okay";
index 9708157f5daf6f7c84a76e521aae8d27e5684d68..681f5487406e39f7bdad2c793754149c0ab189d3 100644 (file)
@@ -75,6 +75,7 @@
                                reg = <0x47401300 0x100>;
                                reg-names = "phy";
                                ti,ctrl_mod = <&usb_ctrl_mod>;
+                               #phy-cells = <0>;
                        };
 
                        usb0: usb@47401000 {
                                        reg = <0x1b00 0x100>;
                                        reg-names = "phy";
                                        ti,ctrl_mod = <&usb_ctrl_mod>;
+                                       #phy-cells = <0>;
                                };
                        };
 
index 589a67c5f7969fb15b59e1910ea96db528dbf9e4..84f17f7abb7136104cf2265bb34c7e0bba0ebd12 100644 (file)
                                clock-names = "ipg", "per";
                        };
 
-                       srtc: srtc@53fa4000 {
-                               compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
-                               reg = <0x53fa4000 0x4000>;
-                               interrupts = <24>;
-                               interrupt-parent = <&tzic>;
-                               clocks = <&clks IMX5_CLK_SRTC_GATE>;
-                               clock-names = "ipg";
-                       };
-
                        iomuxc: iomuxc@53fa8000 {
                                compatible = "fsl,imx53-iomuxc";
                                reg = <0x53fa8000 0x4000>;
index 38faa90007d7f0c7e3042a90aef486321cc4bfa0..2fa5eb4bd4029facce1217e34f266b02daeb987f 100644 (file)
@@ -72,7 +72,8 @@
 };
 
 &gpmc {
-       ranges = <1 0 0x08000000 0x1000000>;    /* CS1: 16MB for LAN9221 */
+       ranges = <0 0 0x30000000 0x1000000      /* CS0: 16MB for NAND */
+                 1 0 0x2c000000 0x1000000>;    /* CS1: 16MB for LAN9221 */
 
        ethernet@gpmc {
                pinctrl-names = "default";
index 26cce4d18405d5c993377ed7a246d7c80b43dcea..29cb804d10cc7d8ad9c37b13296d31bdf0f83ffe 100644 (file)
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; /* gpio_4 */
+               #phy-cells = <0>;
        };
 };
 
 &gpmc {
-       ranges = <0 0 0x00000000 0x1000000>;    /* CS0: 16MB for NAND */
+       ranges = <0 0 0x30000000 0x1000000>;    /* CS0: 16MB for NAND */
 
        nand@0,0 {
                compatible = "ti,omap2-nand";
 
 &mmc3 {
        interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
-       pinctrl-0 = <&mmc3_pins>;
+       pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
        pinctrl-names = "default";
        vmmc-supply = <&wl12xx_vmmc>;
        non-removable;
        wlcore: wlcore@2 {
                compatible = "ti,wl1273";
                reg = <2>;
-               interrupt-parent = <&gpio5>;
-               interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
+               interrupt-parent = <&gpio1>;
+               interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */
                ref-clock-frequency = <26000000>;
        };
 };
                        OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */
                        OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */
                        OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */
-                       OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */
-                       OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4)       /* sys_boot1.gpio_3 */
                        OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */
                        OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */
                >;
                        OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4)        /* sys_boot2.gpio_4 */
                >;
        };
+       wl127x_gpio: pinmux_wl127x_gpio_pin {
+               pinctrl-single,pins = <
+                       OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4)         /* sys_boot0.gpio_2 */
+                       OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4)        /* sys_boot1.gpio_3 */
+               >;
+       };
 };
 
 &omap3_pmx_core2 {
index 4926133077b3541165ccf989af0834a8362411b3..0d9faf1a51eac0cca63b54cb272bb1c57e8261d2 100644 (file)
                                reg = <0x7c00 0x200>;
                        };
 
-                       gpio_intc: interrupt-controller@9880 {
-                               compatible = "amlogic,meson-gpio-intc";
-                               reg = <0xc1109880 0x10>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
-                               status = "disabled";
-                       };
-
                        hwrng: rng@8100 {
                                compatible = "amlogic,meson-rng";
                                reg = <0x8100 0x8>;
                                status = "disabled";
                        };
 
+                       gpio_intc: interrupt-controller@9880 {
+                               compatible = "amlogic,meson-gpio-intc";
+                               reg = <0x9880 0x10>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
+                               status = "disabled";
+                       };
+
                        wdt: watchdog@9900 {
                                compatible = "amlogic,meson6-wdt";
                                reg = <0x9900 0x8>;
index ec2283b1a638e028d28e57d53e8a7dd9fc2c2778..1a5ae4cd107f08294c4bddce41d7369a76fa9ecd 100644 (file)
@@ -56,6 +56,7 @@
 
        usb_phy: usb_phy {
                compatible = "usb-nop-xceiv";
+               #phy-cells = <0>;
        };
 
        vbus_reg: vbus_reg {
index 683b96a8f73e01248349440d1caab8fd7ffeb7cc..0349fcc9dc26ab8d80f568b1d5c8528a78a8a588 100644 (file)
@@ -90,6 +90,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        tfp410: encoder0 {
index 4d2eaf843fa960190514dffb06b24d46056adb3e..3ca8991a6c3e977e2a16bb30444db4478a61ca0d 100644 (file)
@@ -64,6 +64,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>;      /* gpio_147 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        sound {
index 31d5ebf38892e77d04fc53fcd37922ffaca282e2..ab6003fe5a4341ded0b7668861e7ff1553a9ef57 100644 (file)
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&hsusb1_power>;
+               #phy-cells = <0>;
        };
 
        /* HS USB Host PHY on PORT 2 */
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        ads7846reg: ads7846-reg {
index dbc3f030a16c00cf8baf4e75cbd53fa5d21dc2a0..ee64191e41ca187b60eda4e36dc7aeb641c34896 100644 (file)
@@ -29,6 +29,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>; /* gpio_21 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        leds {
index 4504908c23fe991ef2805548550ea4b091977522..3dc56fb156b7e88d9503cfea220a3c9067b326c2 100644 (file)
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
+               #phy-cells = <0>;
        };
 
        tv0: connector {
index 667f96245729cd7ef76c2a27dbce6fa2f6410f52..ecbec23af49f74a6ecccdd0ec99fc434422c9de3 100644 (file)
@@ -58,6 +58,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */
                vcc-supply = <&hsusb1_power>;
+               #phy-cells = <0>;
        };
 
        tfp410: encoder {
index e94d9427450cafa9457be8f5b50d8bfa9a07d8b5..443f717074374274f7d82ffc385cb47e3d9cf8e7 100644 (file)
@@ -37,6 +37,7 @@
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;              /* gpio_54 */
+               #phy-cells = <0>;
        };
 };
 
index 343a36d8031d8a2207d9b411ae71000b50232fa0..7ada1e93e166389eb4472cc39d61003b4cc68360 100644 (file)
@@ -51,6 +51,7 @@
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                vcc-supply = <&reg_vcc3>;
+               #phy-cells = <0>;
        };
 };
 
index f25e158e7163b23201ca8e643f557b3965fc959d..ac141fcd1742e3e29a79c4eaacbf69884b171718 100644 (file)
@@ -51,6 +51,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>;      /* gpio_183 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        /* Regulator to trigger the nPoweron signal of the Wifi module */
index 53e007abdc7159ee69fd35a120ba30edb0704e3c..cd53dc6c00516b4d9be3aa5f3023a7ae1a3ffb48 100644 (file)
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; /* GPIO_16 */
                vcc-supply = <&vaux2>;
+               #phy-cells = <0>;
        };
 
        /* HS USB Host VBUS supply
index 9a601d15247bef5da1519db88b1680b119381b6d..6f5bd027b71753c8508acc93a56f211a95cd12be 100644 (file)
@@ -46,6 +46,7 @@
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio6 2 GPIO_ACTIVE_LOW>;       /* gpio_162 */
                vcc-supply = <&hsusb2_power>;
+               #phy-cells = <0>;
        };
 
        sound {
index 90b5c7148feb5a6c20763741628fd1e4c41e3909..bb33935df7b057eef7a9b237fc2d2f951f6ab375 100644 (file)
                                compatible = "ti,ohci-omap3";
                                reg = <0x48064400 0x400>;
                                interrupts = <76>;
+                               remote-wakeup-connected;
                        };
 
                        usbhsehci: ehci@48064800 {
index 8b93d37310f28ba4c10ad81d35f8419914ef4633..24a463f8641fe5b968cdb5d62bf15ed4e100b128 100644 (file)
@@ -73,6 +73,7 @@
        /* HS USB Host PHY on PORT 1 */
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
+               #phy-cells = <0>;
        };
 
        /* LCD regulator from sw5 source */
index 6e6810c258eb29c15ff8b4e87a6ce1a6eac93f5e..eb123b24c8e330141b7ece42c6dea74f0cc2a837 100644 (file)
@@ -43,6 +43,7 @@
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>;      /* gpio_62 */
+               #phy-cells = <0>;
 
                pinctrl-names = "default";
                pinctrl-0 = <&hsusb1phy_pins>;
index 22c1eee9b07a28e27cd1a2d66e89100065a3ce7b..5501d1b4e6cdfa152804b7cbb5b277fcdc0417f6 100644 (file)
@@ -89,6 +89,7 @@
        hsusb1_phy: hsusb1_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>;   /* gpio_62 */
+               #phy-cells = <0>;
                vcc-supply = <&hsusb1_power>;
                clocks = <&auxclk3_ck>;
                clock-names = "main_clk";
index 6500bfc8d1309a26198893583bb00861d5a69a1e..10fce28ceb5b7dc38477e60f0d87a86b6a384638 100644 (file)
@@ -44,6 +44,7 @@
 
                reset-gpios = <&gpio6 17 GPIO_ACTIVE_LOW>; /* gpio 177 */
                vcc-supply = <&vbat>;
+               #phy-cells = <0>;
 
                clocks = <&auxclk3_ck>;
                clock-names = "main_clk";
index 1dc5a76b3c7106c9532f47c71c8052813b2492f9..cc1a07a3620ff5daa9c32770fee918c45e9439cd 100644 (file)
                elm: elm@48078000 {
                        compatible = "ti,am3352-elm";
                        reg = <0x48078000 0x2000>;
-                       interrupts = <4>;
+                       interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                        ti,hwmods = "elm";
                        status = "disabled";
                };
                        usbhsohci: ohci@4a064800 {
                                compatible = "ti,ohci-omap3";
                                reg = <0x4a064800 0x400>;
-                               interrupt-parent = <&gic>;
                                interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+                               remote-wakeup-connected;
                        };
 
                        usbhsehci: ehci@4a064c00 {
                                compatible = "ti,ehci-omap";
                                reg = <0x4a064c00 0x400>;
-                               interrupt-parent = <&gic>;
                                interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
index 575ecffb0e9e47cfda8373c22071de12e8789302..1b20838bb9a42ed64b80b0943dd59acf8be0d06d 100644 (file)
                clocks = <&auxclk1_ck>;
                clock-names = "main_clk";
                clock-frequency = <19200000>;
+               #phy-cells = <0>;
        };
 
        /* HS USB Host PHY on PORT 3 */
        hsusb3_phy: hsusb3_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */
+               #phy-cells = <0>;
        };
 
        tpd12s015: encoder {
index 5b172a04b6f1b14193f280228432adf19b00296d..5e21fb430a65daa8e29a1ca90a404389d9dc99a9 100644 (file)
        hsusb2_phy: hsusb2_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>; /* gpio3_76 HUB_RESET */
+               #phy-cells = <0>;
        };
 
        /* HS USB Host PHY on PORT 3 */
        hsusb3_phy: hsusb3_phy {
                compatible = "usb-nop-xceiv";
                reset-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 ETH_RESET */
+               #phy-cells = <0>;
        };
 
        leds {
index 4cd0005e462f7a0b88577a578d0baff8f2b13714..51a7fb3d7b9a019f5b76d5f72afaa0c0c857e050 100644 (file)
                                compatible = "ti,ohci-omap3";
                                reg = <0x4a064800 0x400>;
                                interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+                               remote-wakeup-connected;
                        };
 
                        usbhsehci: ehci@4a064c00 {
index 2f017fee4009a2242c3a808c5aa07e29eac36ee9..62baabd757b6ba010693b9b7d4cfc37a367819d8 100644 (file)
                clock-names = "extal", "usb_extal";
                #clock-cells = <2>;
                #power-domain-cells = <0>;
+               #reset-cells = <1>;
        };
 
        prr: chipid@ff000044 {
index 131f65b0426ea317ccfa4fa8fed0b015611bd48c..3d080e07374ca609d367d09642595ff11f9c6629 100644 (file)
                        clock-names = "extal";
                        #clock-cells = <2>;
                        #power-domain-cells = <0>;
+                       #reset-cells = <1>;
                };
        };
 
index 58eae569b4e0e3f0920f0e8d0cb4a19cd752d1fd..0cd1035de1a4bd4906eafae6586a596a184a3b84 100644 (file)
                clock-names = "extal", "usb_extal";
                #clock-cells = <2>;
                #power-domain-cells = <0>;
+               #reset-cells = <1>;
        };
 
        rst: reset-controller@e6160000 {
index 905e50c9b524d2c4ac40e516f8238e15f48a2a50..5643976c13569541b079dd5f9c9a4545dcf2ffc3 100644 (file)
                clock-names = "extal", "usb_extal";
                #clock-cells = <2>;
                #power-domain-cells = <0>;
+               #reset-cells = <1>;
        };
 
        rst: reset-controller@e6160000 {
index 02a6227c717ca6ce0bd515804887b74778a06ded..4b8edc8982cf156931177b0ecce0f6b9329afce3 100644 (file)
                                        switch0port10: port@10 {
                                                reg = <10>;
                                                label = "dsa";
-                                               phy-mode = "xgmii";
+                                               phy-mode = "xaui";
                                                link = <&switch1port10>;
                                        };
                                };
                                        switch1port10: port@10 {
                                                reg = <10>;
                                                label = "dsa";
-                                               phy-mode = "xgmii";
+                                               phy-mode = "xaui";
                                                link = <&switch0port10>;
                                        };
                                };
 };
 
 &i2c1 {
-       at24mac602@0 {
+       at24mac602@50 {
                compatible = "atmel,24c02";
                reg = <0x50>;
                read-only;
index ad301f107dd286cff4432b8cc0284ff5976eec54..bc8d4bbd82e27719a990c7972fd77bfca9dc7aef 100644 (file)
@@ -518,4 +518,22 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
 #endif
        .endm
 
+       .macro  bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1:     .inst   0xde02
+#else
+1:     .inst   0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+       .pushsection .rodata.str, "aMS", %progbits, 1
+2:     .asciz  "\msg"
+       .popsection
+       .pushsection __bug_table, "aw"
+       .align  2
+       .word   1b, 2b
+       .hword  \line
+       .popsection
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H__ */
index c8781450905be94995e0df72aeb3cc722afd1b60..3ab8b3781bfeca7264989b813209a99d115d35f2 100644 (file)
 #else
 #define VTTBR_X                (5 - KVM_T0SZ)
 #endif
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  _AC(48, ULL)
 #define VTTBR_VMID_MASK(size)  (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
index 242151ea69087a4ec8c4b5fd963c210ff30a89fe..a9f7d3f47134a96536480275168247e309d78388 100644 (file)
@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {}
 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
+                                            struct kvm_run *run)
+{
+       return false;
+}
 
 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
index 2a029bceaf2f8593788dea27ec05f3664fc9733a..1a7a17b2a1bae97a21fca6a4920efd96540ac43d 100644 (file)
@@ -221,7 +221,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
 }
 #define        __HAVE_ARCH_PTE_SPECIAL
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
 #define pmd_dirty(pmd)         (pmd_isset((pmd), L_PMD_SECT_DIRTY))
 #define pud_page(pud)          pmd_page(__pmd(pud_val(pud)))
index 1c462381c225eea31346ec4f19145e3fd449caab..150ece66ddf34506cf8d36963c2461a8188ebe91 100644 (file)
@@ -232,6 +232,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 #define pte_valid_user(pte)    \
        (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
 
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+       pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
+       pteval_t needed = mask;
+
+       if (write)
+               mask |= L_PTE_RDONLY;
+
+       return (pte_val(pte) & mask) == needed;
+}
+#define pte_access_permitted pte_access_permitted
+
 #if __LINUX_ARM_ARCH__ < 6
 static inline void __sync_icache_dcache(pte_t pteval)
 {
index 4d53de308ee089a7b745926ab8da16caa825806e..4d1cc1847edf076dfb3ea03db6712803a851d28b 100644 (file)
@@ -7,6 +7,7 @@ generated-y += unistd-oabi.h
 generated-y += unistd-eabi.h
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index d523cd8439a3df250ec514e2137ba5754c157ffa..0f07579af472c8ec869c5d87fd8d1e105a24dcba 100644 (file)
        mov     r2, sp
        ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [r2, #\offset + S_PC]!      @ get pc
+       tst     r1, #PSR_I_BIT | 0x0f
+       bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        @ We must avoid clrex due to Cortex-A15 erratum #830321
                                                @ after ldm {}^
        add     sp, sp, #\offset + PT_REGS_SIZE
        movs    pc, lr                          @ return & move spsr_svc into cpsr
+1:     bug     "Returning to usermode but unexpected PSR bits set?", \@
 #elif defined(CONFIG_CPU_V7M)
        @ V7M restore.
        @ Note that we don't need to do clrex here as clearing the local
        ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [sp, #\offset + S_PC]       @ get pc
        add     sp, sp, #\offset + S_SP
+       tst     r1, #PSR_I_BIT | 0x0f
+       bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 
        @ We must avoid clrex due to Cortex-A15 erratum #830321
        .endif
        add     sp, sp, #PT_REGS_SIZE - S_SP
        movs    pc, lr                          @ return & move spsr_svc into cpsr
+1:     bug     "Returning to usermode but unexpected PSR bits set?", \@
 #endif /* !CONFIG_THUMB2_KERNEL */
        .endm
 
index f24628db540984bbff0d4274bd1b312746a6347c..e2bd35b6780cd6c859758a276cea0cf3b29eada2 100644 (file)
@@ -4,6 +4,7 @@
 #
 
 source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
 
 menuconfig VIRTUALIZATION
        bool "Virtualization"
@@ -23,6 +24,8 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select ARM_GIC
+       select ARM_GIC_V3
+       select ARM_GIC_V3_ITS
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select HAVE_KVM_ARCH_TLB_FLUSH_ALL
        select KVM_MMIO
@@ -36,6 +39,8 @@ config KVM
        select HAVE_KVM_IRQCHIP
        select HAVE_KVM_IRQ_ROUTING
        select HAVE_KVM_MSI
+       select IRQ_BYPASS_MANAGER
+       select HAVE_KVM_IRQ_BYPASS
        depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
        ---help---
          Support hosting virtualized guest machines.
index f550abd64a25df1f42de16547e6fd4a1aa092787..48de846f22464637be95c64e0a1ff9357b6e5a65 100644 (file)
@@ -32,6 +32,7 @@ obj-y += $(KVM)/arm/vgic/vgic-init.o
 obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
 obj-y += $(KVM)/arm/vgic/vgic-v2.o
 obj-y += $(KVM)/arm/vgic/vgic-v3.o
+obj-y += $(KVM)/arm/vgic/vgic-v4.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
index 1712f132b80d2402d94d72ea974a0c3326fa2f52..b83fdc06286a64ece150fb7e419bc587e47c3e34 100644 (file)
                .pushsection .text.fixup,"ax"
                .align  4
 9001:          mov     r4, #-EFAULT
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               ldr     r5, [sp, #9*4]          @ *err_ptr
+#else
                ldr     r5, [sp, #8*4]          @ *err_ptr
+#endif
                str     r4, [r5]
                ldmia   sp, {r1, r2}            @ retrieve dst, len
                add     r2, r2, r1
index c1cd80ecc21992b2778c20f03f04ea9c328e0a4e..3b73813c6b0434f93c85bf4256ae70049de8ae2c 100644 (file)
@@ -305,7 +305,7 @@ static void n2100_restart(enum reboot_mode mode, const char *cmd)
 
 static struct timer_list power_button_poll_timer;
 
-static void power_button_poll(unsigned long dummy)
+static void power_button_poll(struct timer_list *unused)
 {
        if (gpio_get_value(N2100_POWER_BUTTON) == 0) {
                ctrl_alt_del();
@@ -336,8 +336,7 @@ static int __init n2100_request_gpios(void)
                        pr_err("could not set power GPIO as input\n");
        }
        /* Set up power button poll timer */
-       init_timer(&power_button_poll_timer);
-       power_button_poll_timer.function = power_button_poll;
+       timer_setup(&power_button_poll_timer, power_button_poll, 0);
        power_button_poll_timer.expires = jiffies + (HZ / 10);
        add_timer(&power_button_poll_timer);
        return 0;
index ac97a459903454e0a88a36de7ab258f68d773c6f..0f5c99941a7d5b14e39663cad535a61698328e68 100644 (file)
@@ -179,10 +179,10 @@ static int power_button_countdown;
 /* Must hold the button down for at least this many counts to be processed */
 #define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
 
-static void dsmg600_power_handler(unsigned long data);
+static void dsmg600_power_handler(struct timer_list *unused);
 static DEFINE_TIMER(dsmg600_power_timer, dsmg600_power_handler);
 
-static void dsmg600_power_handler(unsigned long data)
+static void dsmg600_power_handler(struct timer_list *unused)
 {
        /* This routine is called twice per second to check the
         * state of the power button.
index 43560208540819ffb3b512d562b79a592ba02d10..76dfff03cb714e575cee08e608a32e8d5143cca4 100644 (file)
@@ -202,10 +202,10 @@ static int power_button_countdown;
 /* Must hold the button down for at least this many counts to be processed */
 #define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
 
-static void nas100d_power_handler(unsigned long data);
+static void nas100d_power_handler(struct timer_list *unused);
 static DEFINE_TIMER(nas100d_power_timer, nas100d_power_handler);
 
-static void nas100d_power_handler(unsigned long data)
+static void nas100d_power_handler(struct timer_list *unused)
 {
        /* This routine is called twice per second to check the
         * state of the power button.
index 2555f9056a339b491cdea8bf9305bf8614690c2b..cad7ee8f0d6b49e10fa2cebf094d754e88524e7d 100644 (file)
@@ -102,7 +102,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible,
 
        scu_base = of_iomap(node, 0);
        if (!scu_base) {
-               pr_err("Couln't map SCU registers\n");
+               pr_err("Couldn't map SCU registers\n");
                return;
        }
 
index d555791cf349dd160f49be49e58088e16900466b..83c6fa74cc31e41616f06495bc5b210decd6ac6e 100644 (file)
@@ -68,14 +68,17 @@ void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
 int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
                        u8 *idlest_reg_id)
 {
+       int ret;
        if (!cm_ll_data->split_idlest_reg) {
                WARN_ONCE(1, "cm: %s: no low-level function defined\n",
                          __func__);
                return -EINVAL;
        }
 
-       return cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst,
+       ret = cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst,
                                           idlest_reg_id);
+       *prcm_inst -= cm_base.offset;
+       return ret;
 }
 
 /**
@@ -337,6 +340,7 @@ int __init omap2_cm_base_init(void)
                if (mem) {
                        mem->pa = res.start + data->offset;
                        mem->va = data->mem + data->offset;
+                       mem->offset = data->offset;
                }
 
                data->np = np;
index 5ac122e88f678b75d6c1060783738321d8b9c579..fa7f308c90279eb20ef89fdcdaafef31a1f8ec2c 100644 (file)
@@ -73,6 +73,27 @@ phys_addr_t omap_secure_ram_mempool_base(void)
        return omap_secure_memblock_base;
 }
 
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
+u32 omap3_save_secure_ram(void __iomem *addr, int size)
+{
+       u32 ret;
+       u32 param[5];
+
+       if (size != OMAP3_SAVE_SECURE_RAM_SZ)
+               return OMAP3_SAVE_SECURE_RAM_SZ;
+
+       param[0] = 4;           /* Number of arguments */
+       param[1] = __pa(addr);  /* Physical address for saving */
+       param[2] = 0;
+       param[3] = 1;
+       param[4] = 1;
+
+       ret = save_secure_ram_context(__pa(param));
+
+       return ret;
+}
+#endif
+
 /**
  * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
  * @idx: The PPA API index
index bae263fba640af8c1ff7a3b1b98e71a35486e94c..c509cde71f931ab959cc751462d924a04fc0778f 100644 (file)
@@ -31,6 +31,8 @@
 /* Maximum Secure memory storage size */
 #define OMAP_SECURE_RAM_STORAGE        (88 * SZ_1K)
 
+#define OMAP3_SAVE_SECURE_RAM_SZ       0x803F
+
 /* Secure low power HAL API index */
 #define OMAP4_HAL_SAVESECURERAM_INDEX  0x1a
 #define OMAP4_HAL_SAVEHW_INDEX         0x1b
@@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
 extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
 extern phys_addr_t omap_secure_ram_mempool_base(void);
 extern int omap_secure_ram_reserve_memblock(void);
+extern u32 save_secure_ram_context(u32 args_pa);
+extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
 
 extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
                                  u32 arg1, u32 arg2, u32 arg3, u32 arg4);
index d45cbfdb4be6838b7f698886a84f4ceb48fe35f8..f0388058b7da399d28d3819ab268d5d0a2a61073 100644 (file)
@@ -391,10 +391,8 @@ omap_device_copy_resources(struct omap_hwmod *oh,
        const char *name;
        int error, irq = 0;
 
-       if (!oh || !oh->od || !oh->od->pdev) {
-               error = -EINVAL;
-               goto error;
-       }
+       if (!oh || !oh->od || !oh->od->pdev)
+               return -EINVAL;
 
        np = oh->od->pdev->dev.of_node;
        if (!np) {
@@ -516,8 +514,10 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
                goto odbs_exit1;
 
        od = omap_device_alloc(pdev, &oh, 1);
-       if (IS_ERR(od))
+       if (IS_ERR(od)) {
+               ret = PTR_ERR(od);
                goto odbs_exit1;
+       }
 
        ret = platform_device_add_data(pdev, pdata, pdata_len);
        if (ret)
index d2106ae4410a23fb76c8593b6d123b3a94800ade..52c9d585b44d2607f4d0a4c5d0b1b1d01cb4bac3 100644 (file)
@@ -1646,6 +1646,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
        .main_clk       = "mmchs3_fck",
        .prcm           = {
                .omap2 = {
+                       .module_offs = CORE_MOD,
                        .prcm_reg_id = 1,
                        .module_bit = OMAP3430_EN_MMC3_SHIFT,
                        .idlest_reg_id = 1,
index b668719b9b25a7ada81229ba18ad2645777cf487..8e30772cfe325a35e52f2feaaa53c9fcc59357cd 100644 (file)
@@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz;
 /* ... and its pointer from SRAM after copy */
 extern void (*omap3_do_wfi_sram)(void);
 
-/* save_secure_ram_context function pointer and size, for copy to SRAM */
-extern int save_secure_ram_context(u32 *addr);
-extern unsigned int save_secure_ram_context_sz;
-
 extern void omap3_save_scratchpad_contents(void);
 
 #define PM_RTA_ERRATUM_i608            (1 << 0)
index 841ba19d64a69b153a38ef594220d1a4054d7e59..36c55547137cb08b9b1b662cb5477e591590598d 100644 (file)
@@ -48,6 +48,7 @@
 #include "prm3xxx.h"
 #include "pm.h"
 #include "sdrc.h"
+#include "omap-secure.h"
 #include "sram.h"
 #include "control.h"
 #include "vc.h"
@@ -66,7 +67,6 @@ struct power_state {
 
 static LIST_HEAD(pwrst_list);
 
-static int (*_omap_save_secure_sram)(u32 *addr);
 void (*omap3_do_wfi_sram)(void);
 
 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
@@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
                 * will hang the system.
                 */
                pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
-               ret = _omap_save_secure_sram((u32 *)(unsigned long)
-                               __pa(omap3_secure_ram_storage));
+               ret = omap3_save_secure_ram(omap3_secure_ram_storage,
+                                           OMAP3_SAVE_SECURE_RAM_SZ);
                pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
                /* Following is for error tracking, it should not happen */
                if (ret) {
@@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
  *
  * The minimum set of functions is pushed to SRAM for execution:
  * - omap3_do_wfi for erratum i581 WA,
- * - save_secure_ram_context for security extensions.
  */
 void omap_push_sram_idle(void)
 {
        omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
-
-       if (omap_type() != OMAP2_DEVICE_TYPE_GP)
-               _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
-                               save_secure_ram_context_sz);
 }
 
 static void __init pm_errata_configure(void)
@@ -553,7 +548,7 @@ int __init omap3_pm_init(void)
        clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
        if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
                omap3_secure_ram_storage =
-                       kmalloc(0x803F, GFP_KERNEL);
+                       kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
                if (!omap3_secure_ram_storage)
                        pr_err("Memory allocation failed when allocating for secure sram context\n");
 
index 0592b23902c6885b03985085ec5c4d7415966d01..0977da0dab76077ee663ffa0dc0ba3f3c32f3a26 100644 (file)
@@ -528,6 +528,7 @@ struct omap_prcm_irq_setup {
 struct omap_domain_base {
        u32 pa;
        void __iomem *va;
+       s16 offset;
 };
 
 /**
index d2c5bcabdbebe03d9e05e4e551815ce85b9c4207..ebaf80d72a109fdaabb1fbeb21da6d401119ce8d 100644 (file)
@@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
        return v;
 }
 
-static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
-{
-       u32 v;
-
-       v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
-       v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
-       v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
-
-       return v;
-}
-
 static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
 {
        am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
@@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = {
        .pwrdm_set_next_pwrst           = am33xx_pwrdm_set_next_pwrst,
        .pwrdm_read_next_pwrst          = am33xx_pwrdm_read_next_pwrst,
        .pwrdm_read_pwrst               = am33xx_pwrdm_read_pwrst,
-       .pwrdm_read_prev_pwrst          = am33xx_pwrdm_read_prev_pwrst,
        .pwrdm_set_logic_retst          = am33xx_pwrdm_set_logic_retst,
        .pwrdm_read_logic_pwrst         = am33xx_pwrdm_read_logic_pwrst,
        .pwrdm_read_logic_retst         = am33xx_pwrdm_read_logic_retst,
index fa5fd24f524c5cb233dfb72d73c23929084d7caa..22daf4efed68b2b4b34ea4e5f1c6788f1033c562 100644 (file)
@@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
 ENDPROC(enable_omap3630_toggle_l2_on_restore)
 
 /*
- * Function to call rom code to save secure ram context. This gets
- * relocated to SRAM, so it can be all in .data section. Otherwise
- * we need to initialize api_params separately.
+ * Function to call rom code to save secure ram context.
+ *
+ * r0 = physical address of the parameters
  */
-       .data
-       .align  3
 ENTRY(save_secure_ram_context)
        stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
-       adr     r3, api_params          @ r3 points to parameters
-       str     r0, [r3,#0x4]           @ r0 has sdram address
-       ldr     r12, high_mask
-       and     r3, r3, r12
-       ldr     r12, sram_phy_addr_mask
-       orr     r3, r3, r12
+       mov     r3, r0                  @ physical address of parameters
        mov     r0, #25                 @ set service ID for PPA
        mov     r12, r0                 @ copy secure service ID in r12
        mov     r1, #0                  @ set task id for ROM code in r1
@@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
        nop
        nop
        ldmfd   sp!, {r4 - r11, pc}
-       .align
-sram_phy_addr_mask:
-       .word   SRAM_BASE_P
-high_mask:
-       .word   0xffff
-api_params:
-       .word   0x4, 0x0, 0x0, 0x1, 0x1
 ENDPROC(save_secure_ram_context)
-ENTRY(save_secure_ram_context_sz)
-       .word   . - save_secure_ram_context
-
-       .text
 
 /*
  * ======================
index 3f5863de766acbdbe4189f4fe53a23f648710995..39eae10ac8defa76b2574cc907dc7451c5ccd38c 100644 (file)
@@ -172,7 +172,7 @@ static struct platform_device db88f5281_nand_flash = {
 static void __iomem *db88f5281_7seg;
 static struct timer_list db88f5281_timer;
 
-static void db88f5281_7seg_event(unsigned long data)
+static void db88f5281_7seg_event(struct timer_list *unused)
 {
        static int count = 0;
        writel(0, db88f5281_7seg + (count << 4));
@@ -189,7 +189,7 @@ static int __init db88f5281_7seg_init(void)
                        printk(KERN_ERR "Failed to ioremap db88f5281_7seg\n");
                        return -EIO;
                }
-               setup_timer(&db88f5281_timer, db88f5281_7seg_event, 0);
+               timer_setup(&db88f5281_timer, db88f5281_7seg_event, 0);
                mod_timer(&db88f5281_timer, jiffies + 2 * HZ);
        }
 
index 6bea3d3a2dd76c4129a4b99c793a11651747e214..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 (file)
@@ -1 +0,0 @@
-obj- += dummy.o
index 35ff45470dbfd5bac025eb149294307ab6ef2b7d..fc3b44028cfb22fb140ad75b621cf690c94e5a43 100644 (file)
@@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = {
                .val    = PMD_SECT_USER,
                .set    = "USR",
        }, {
-               .mask   = L_PMD_SECT_RDONLY,
-               .val    = L_PMD_SECT_RDONLY,
+               .mask   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+               .val    = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
                .set    = "ro",
                .clear  = "RW",
 #elif __LINUX_ARM_ARCH__ >= 6
index 81d4482b6861ca2f3ea71d56b1a967db953fc94a..a1f11a7ee81b2f81a512a59ca7cbc1b163189b47 100644 (file)
@@ -629,8 +629,8 @@ static struct section_perm ro_perms[] = {
                .start  = (unsigned long)_stext,
                .end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
-               .mask   = ~L_PMD_SECT_RDONLY,
-               .prot   = L_PMD_SECT_RDONLY,
+               .mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
+               .prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
 #else
                .mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
                .prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
index a93339f5178f2eff247144eb9244c077225094bc..c9a7e9e1414f344c9dfd515600e3e4378bf61d81 100644 (file)
@@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065
 
          If unsure, say Y.
 
-
 config SOCIONEXT_SYNQUACER_PREITS
        bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
        default y
@@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802
          a 128kB offset to be applied to the target address in this commands.
 
          If unsure, say Y.
+
+config QCOM_FALKOR_ERRATUM_E1041
+       bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
+       default y
+       help
+         Falkor CPU may speculatively fetch instructions from an improper
+         memory location when MMU translation is changed from SCTLR_ELn[M]=1
+         to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
+
+         If unsure, say Y.
+
 endmenu
 
 
index b35788c909f1ce826df3717e14841836a9398070..b481b4a7c0111472baece0c0f5bcc9b985fb928a 100644 (file)
@@ -83,9 +83,6 @@ endif
 
 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
 KBUILD_LDFLAGS_MODULE  += -T $(srctree)/arch/arm64/kernel/module.lds
-ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
-KBUILD_LDFLAGS_MODULE  += $(objtree)/arch/arm64/kernel/ftrace-mod.o
-endif
 endif
 
 # Default value
index d7c22d51bc5073c13b17143d314e49728bcd3220..4aa50b9b26bca7b904ab3e2921dc844f0f46f9b9 100644 (file)
@@ -12,6 +12,7 @@ subdir-y += cavium
 subdir-y += exynos
 subdir-y += freescale
 subdir-y += hisilicon
+subdir-y += lg
 subdir-y += marvell
 subdir-y += mediatek
 subdir-y += nvidia
@@ -22,5 +23,4 @@ subdir-y += rockchip
 subdir-y += socionext
 subdir-y += sprd
 subdir-y += xilinx
-subdir-y += lg
 subdir-y += zte
index ead895a4e9a5c9fb6f663092288e8178b95cd6a1..1fb8b9d6cb4ea07c105d088d2ccc5c25ca1ea128 100644 (file)
 
 &uart_B {
        clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_C {
        clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &vpu {
index 8ed981f59e5ae5804da97c887193a32a73b53983..6524b89e7115b5e313834b5ab8565768eb06bd13 100644 (file)
 
 &uart_A {
        clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_AO {
 
 &uart_B {
        clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_C {
        clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
-       clock-names = "xtal", "core", "baud";
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &vpu {
index dd7193acc7dfa54aebf712fe7bbb3264159565f0..6bdefb26b3296ae2dda562472bd26a8a8d8b23bd 100644 (file)
@@ -40,7 +40,6 @@
 };
 
 &ethsc {
-       interrupt-parent = <&gpio>;
        interrupts = <0 8>;
 };
 
index d99e3731358c4aed8f7933fb49a8ba4db7a290b0..254d6795c67e94802789038ad3b9ccc872ffb3f5 100644 (file)
@@ -40,7 +40,6 @@
 };
 
 &ethsc {
-       interrupt-parent = <&gpio>;
        interrupts = <0 8>;
 };
 
index 864feeb3518014f2f9670e3f82c5815c624d038a..f9f06fcfb94aa5e2c8552afc11ae2df8bf3c2f10 100644 (file)
@@ -38,8 +38,7 @@
 };
 
 &ethsc {
-       interrupt-parent = <&gpio>;
-       interrupts = <0 8>;
+       interrupts = <4 8>;
 };
 
 &serial0 {
index aef72d886677758c76d6b932c863893df7c67b53..8b168280976f25de43539ed1b4dbed9b952fcfde 100644 (file)
@@ -512,4 +512,14 @@ alternative_else_nop_endif
 #endif
        .endm
 
+/**
+ * Errata workaround prior to disable MMU. Insert an ISB immediately prior
+ * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
+ */
+       .macro pre_disable_mmu_workaround
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
+       isb
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H */
index 76d1cc85d5b115915aaa63138121e9ba286d8f4e..955130762a3c6acc09f3ee76574f7cefc5097b4c 100644 (file)
@@ -38,7 +38,7 @@
  *
  *     See Documentation/cachetlb.txt for more information. Please note that
  *     the implementation assumes non-aliasing VIPT D-cache and (aliasing)
- *     VIPT or ASID-tagged VIVT I-cache.
+ *     VIPT I-cache.
  *
  *     flush_cache_mm(mm)
  *
index ac67cfc2585a8af417405958779b792d60b06e6a..060e3a4008abd18e4a5aa48bb5b6fa1674a735c3 100644 (file)
@@ -60,6 +60,9 @@ enum ftr_type {
 #define FTR_VISIBLE    true    /* Feature visible to the user space */
 #define FTR_HIDDEN     false   /* Feature is hidden from the user */
 
+#define FTR_VISIBLE_IF_IS_ENABLED(config)              \
+       (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
+
 struct arm64_ftr_bits {
        bool            sign;   /* Value is signed ? */
        bool            visible;
index 235e77d982610a0114f62cc833278994cc2e76b2..cbf08d7cbf3089949bb4ada755a36383e6e5db2e 100644 (file)
@@ -91,6 +91,7 @@
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define QCOM_CPU_PART_FALKOR_V1                0x800
+#define QCOM_CPU_PART_FALKOR           0xC00
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 
 #ifndef __ASSEMBLY__
 
index 650344d01124971d98819116b7e657c7fce7c6f4..c4cd5081d78bc4b66e34f3c283d9b1f6ba48518c 100644 (file)
@@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
                         * Defer the switch to the current thread's TTBR0_EL1
                         * until uaccess_enable(). Restore the current
                         * thread's saved ttbr0 corresponding to its active_mm
-                        * (if different from init_mm).
                         */
                        cpu_set_reserved_ttbr0();
-                       if (current->active_mm != &init_mm)
-                               update_saved_ttbr0(current, current->active_mm);
+                       update_saved_ttbr0(current, current->active_mm);
                }
        }
 }
index 7f069ff37f06cff3a4e032bc5916be1b155ee1f8..715d395ef45bb23f072b34f88116b36940346e95 100644 (file)
 #define VTCR_EL2_FLAGS                 (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
 #define VTTBR_X                                (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
 
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  (UL(48))
 #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
index 674912d7a571942b956c980aa2096efff32e85f9..ea6cb5b24258be29f39507c39d526ea7e52c7c30 100644 (file)
@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void);
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
index 3257895a9b5e413c7c69c9d3cdb2fa23ec030592..9d155fa9a50791af293916cfdc1ede087f850c6d 100644 (file)
@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 
 #define init_new_context(tsk,mm)       ({ atomic64_set(&(mm)->context.id, 0); 0; })
 
-/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm:  describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 static inline void update_saved_ttbr0(struct task_struct *tsk,
                                      struct mm_struct *mm)
 {
-       if (system_uses_ttbr0_pan()) {
-               BUG_ON(mm->pgd == swapper_pg_dir);
-               task_thread_info(tsk)->ttbr0 =
-                       virt_to_phys(mm->pgd) | ASID(mm) << 48;
-       }
+       u64 ttbr;
+
+       if (!system_uses_ttbr0_pan())
+               return;
+
+       if (mm == &init_mm)
+               ttbr = __pa_symbol(empty_zero_page);
+       else
+               ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+
+       task_thread_info(tsk)->ttbr0 = ttbr;
 }
 #else
 static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
 }
 #endif
 
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+       /*
+        * We don't actually care about the ttbr0 mapping, so point it at the
+        * zero page.
+        */
+       update_saved_ttbr0(tsk, &init_mm);
+}
+
 static inline void __switch_mm(struct mm_struct *next)
 {
        unsigned int cpu = smp_processor_id();
@@ -214,11 +216,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
         * value may have not been initialised yet (activate_mm caller) or the
         * ASID has changed since the last run (following the context switch
-        * of another thread of the same process). Avoid setting the reserved
-        * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+        * of another thread of the same process).
         */
-       if (next != &init_mm)
-               update_saved_ttbr0(tsk, next);
+       update_saved_ttbr0(tsk, next);
 }
 
 #define deactivate_mm(tsk,mm)  do { } while (0)
index 19bd97671bb8d4e78a2a3d46ef890fa06a8092fe..4f766178fa6ff3184963c1caaccaf646fd91a4a2 100644 (file)
@@ -32,7 +32,7 @@ struct mod_arch_specific {
        struct mod_plt_sec      init;
 
        /* for CONFIG_DYNAMIC_FTRACE */
-       void                    *ftrace_trampoline;
+       struct plt_entry        *ftrace_trampoline;
 };
 #endif
 
@@ -45,4 +45,48 @@ extern u64 module_alloc_base;
 #define module_alloc_base      ((u64)_etext - MODULES_VSIZE)
 #endif
 
+struct plt_entry {
+       /*
+        * A program that conforms to the AArch64 Procedure Call Standard
+        * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
+        * IP1 (x17) may be inserted at any branch instruction that is
+        * exposed to a relocation that supports long branches. Since that
+        * is exactly what we are dealing with here, we are free to use x16
+        * as a scratch register in the PLT veneers.
+        */
+       __le32  mov0;   /* movn x16, #0x....                    */
+       __le32  mov1;   /* movk x16, #0x...., lsl #16           */
+       __le32  mov2;   /* movk x16, #0x...., lsl #32           */
+       __le32  br;     /* br   x16                             */
+};
+
+static inline struct plt_entry get_plt_entry(u64 val)
+{
+       /*
+        * MOVK/MOVN/MOVZ opcode:
+        * +--------+------------+--------+-----------+-------------+---------+
+        * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
+        * +--------+------------+--------+-----------+-------------+---------+
+        *
+        * Rd     := 0x10 (x16)
+        * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
+        * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
+        * sf     := 1 (64-bit variant)
+        */
+       return (struct plt_entry){
+               cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
+               cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
+               cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
+               cpu_to_le32(0xd61f0200)
+       };
+}
+
+static inline bool plt_entries_equal(const struct plt_entry *a,
+                                    const struct plt_entry *b)
+{
+       return a->mov0 == b->mov0 &&
+              a->mov1 == b->mov1 &&
+              a->mov2 == b->mov2;
+}
+
 #endif /* __ASM_MODULE_H */
index 8d5cbec17d803e37556b5f4a7b25e7b4f1391b35..f9ccc36d3dc3cb2e29ad2cd47f7cdd5eed4cef8a 100644 (file)
@@ -18,6 +18,7 @@
 #define __ASM_PERF_EVENT_H
 
 #include <asm/stack_pointer.h>
+#include <asm/ptrace.h>
 
 #define        ARMV8_PMU_MAX_COUNTERS  32
 #define        ARMV8_PMU_COUNTER_MASK  (ARMV8_PMU_MAX_COUNTERS - 1)
@@ -79,6 +80,7 @@ struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs)  perf_misc_flags(regs)
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
 #endif
 
 #define perf_arch_fetch_caller_regs(regs, __ip) { \
index c9530b5b5ca836cbe23216d664e3ea9939d3b126..bdcc7f1c9d069df3d95c6884b8071cdba530cfc8 100644 (file)
@@ -42,6 +42,8 @@
 #include <asm/cmpxchg.h>
 #include <asm/fixmap.h>
 #include <linux/mmdebug.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
 
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
@@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte)
 
 static inline pte_t pte_mkclean(pte_t pte)
 {
-       return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
+       pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
+       pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+
+       return pte;
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-       return set_pte_bit(pte, __pgprot(PTE_DIRTY));
+       pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
+
+       if (pte_write(pte))
+               pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+
+       return pte;
 }
 
 static inline pte_t pte_mkold(pte_t pte)
@@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
        }
 }
 
-struct mm_struct;
-struct vm_area_struct;
-
 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 
 /*
@@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
         * hardware updates of the pte (ptep_set_access_flags safely changes
         * valid ptes without going through an invalid entry).
         */
-       if (pte_valid(*ptep) && pte_valid(pte)) {
+       if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
+          (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
                VM_WARN_ONCE(!pte_young(pte),
                             "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
                             __func__, pte_val(*ptep), pte_val(pte));
@@ -345,7 +353,6 @@ static inline int pmd_protnone(pmd_t pmd)
 
 #define pmd_thp_or_huge(pmd)   (pmd_huge(pmd) || pmd_trans_huge(pmd))
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 
 #define pmd_mkhuge(pmd)                (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
@@ -642,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
- * ptep_set_wrprotect - mark read-only while preserving the hardware update of
- * the Access Flag.
+ * ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
  */
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
 {
        pte_t old_pte, pte;
 
-       /*
-        * ptep_set_wrprotect() is only called on CoW mappings which are
-        * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
-        * PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
-        * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
-        * protection_map[]. There is no race with the hardware update of the
-        * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
-        * is set.
-        */
-       VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
-                    "%s: potential race with hardware DBM", __func__);
        pte = READ_ONCE(*ptep);
        do {
                old_pte = pte;
+               /*
+                * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+                * clear), set the PTE_DIRTY bit.
+                */
+               if (pte_hw_dirty(pte))
+                       pte = pte_mkdirty(pte);
                pte = pte_wrprotect(pte);
                pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
                                               pte_val(old_pte), pte_val(pte));
diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 8265dd79089522ec140ba8094da70f7f075d2884..067baace74a09b9474cc40cf9a4900c1486a8a2c 100644 (file)
@@ -61,6 +61,3 @@ extra-y                                       += $(head-y) vmlinux.lds
 ifeq ($(CONFIG_DEBUG_EFI),y)
 AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
 endif
-
-# will be included by each individual module but not by the core kernel itself
-extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o
index 65f42d2574142d4b37bebf49ed1fc3cdccbb56ae..2a752cb2a0f35a82f2a60e744d160af9b5f6c6a1 100644 (file)
@@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
        mrs     x12, sctlr_el1
        ldr     x13, =SCTLR_ELx_FLAGS
        bic     x12, x12, x13
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x12
        isb
 
index d16978213c5b332b439205f7c582e1c9a2f65e4d..ea001241bdd470ab4a0a13ba4dad9bdb5a818bae 100644 (file)
@@ -31,13 +31,13 @@ extern const struct cpu_operations cpu_psci_ops;
 
 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 
-static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
        &smp_spin_table_ops,
        &cpu_psci_ops,
        NULL,
 };
 
-static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = {
 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
        &acpi_parking_protocol_ops,
 #endif
@@ -47,7 +47,7 @@ static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
 
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
-       const struct cpu_operations **ops;
+       const struct cpu_operations *const *ops;
 
        ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
index c5ba0097887f93e9d30b37355b84e5750d74d04e..a73a5928f09b26ae7b2de7b3c2217e5c975de4a0 100644 (file)
@@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                                  FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
index 4e6ad355bd058e6a4ab73a0f94832a7b1fe719a6..6b9736c3fb5630ab31c17b662b5c5cfe2b7d0832 100644 (file)
@@ -96,6 +96,7 @@ ENTRY(entry)
        mrs     x0, sctlr_el2
        bic     x0, x0, #1 << 0 // clear SCTLR.M
        bic     x0, x0, #1 << 2 // clear SCTLR.C
+       pre_disable_mmu_workaround
        msr     sctlr_el2, x0
        isb
        b       2f
@@ -103,6 +104,7 @@ ENTRY(entry)
        mrs     x0, sctlr_el1
        bic     x0, x0, #1 << 0 // clear SCTLR.M
        bic     x0, x0, #1 << 2 // clear SCTLR.C
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x0
        isb
 2:
index 143b3e72c25e6c1b51c75582148c74ffe95a1a97..fae81f7964b4f226242961607cb087a20710e22b 100644 (file)
  *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
  */
-static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+struct fpsimd_last_state_struct {
+       struct fpsimd_state *st;
+       bool sve_in_use;
+};
+
+static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
 
 /* Default VL for tasks that don't set it explicitly: */
 static int sve_default_vl = -1;
@@ -905,7 +910,7 @@ void fpsimd_thread_switch(struct task_struct *next)
                 */
                struct fpsimd_state *st = &next->thread.fpsimd_state;
 
-               if (__this_cpu_read(fpsimd_last_state) == st
+               if (__this_cpu_read(fpsimd_last_state.st) == st
                    && st->cpu == smp_processor_id())
                        clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
                else
@@ -991,6 +996,21 @@ void fpsimd_signal_preserve_current_state(void)
                sve_to_fpsimd(current);
 }
 
+/*
+ * Associate current's FPSIMD context with this cpu
+ * Preemption must be disabled when calling this function.
+ */
+static void fpsimd_bind_to_cpu(void)
+{
+       struct fpsimd_last_state_struct *last =
+               this_cpu_ptr(&fpsimd_last_state);
+       struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+       last->st = st;
+       last->sve_in_use = test_thread_flag(TIF_SVE);
+       st->cpu = smp_processor_id();
+}
+
 /*
  * Load the userland FPSIMD state of 'current' from memory, but only if the
  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
@@ -1004,11 +1024,8 @@ void fpsimd_restore_current_state(void)
        local_bh_disable();
 
        if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               struct fpsimd_state *st = &current->thread.fpsimd_state;
-
                task_fpsimd_load();
-               __this_cpu_write(fpsimd_last_state, st);
-               st->cpu = smp_processor_id();
+               fpsimd_bind_to_cpu();
        }
 
        local_bh_enable();
@@ -1026,18 +1043,14 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
 
        local_bh_disable();
 
-       if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
-               current->thread.fpsimd_state = *state;
+       current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
+       if (system_supports_sve() && test_thread_flag(TIF_SVE))
                fpsimd_to_sve(current);
-       }
-       task_fpsimd_load();
 
-       if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               struct fpsimd_state *st = &current->thread.fpsimd_state;
+       task_fpsimd_load();
 
-               __this_cpu_write(fpsimd_last_state, st);
-               st->cpu = smp_processor_id();
-       }
+       if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
+               fpsimd_bind_to_cpu();
 
        local_bh_enable();
 }
@@ -1052,7 +1065,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
 
 static inline void fpsimd_flush_cpu_state(void)
 {
-       __this_cpu_write(fpsimd_last_state, NULL);
+       __this_cpu_write(fpsimd_last_state.st, NULL);
 }
 
 /*
@@ -1065,14 +1078,10 @@ static inline void fpsimd_flush_cpu_state(void)
 #ifdef CONFIG_ARM64_SVE
 void sve_flush_cpu_state(void)
 {
-       struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state);
-       struct task_struct *tsk;
-
-       if (!fpstate)
-               return;
+       struct fpsimd_last_state_struct const *last =
+               this_cpu_ptr(&fpsimd_last_state);
 
-       tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state);
-       if (test_tsk_thread_flag(tsk, TIF_SVE))
+       if (last->st && last->sve_in_use)
                fpsimd_flush_cpu_state();
 }
 #endif /* CONFIG_ARM64_SVE */
@@ -1267,7 +1276,7 @@ static inline void fpsimd_pm_init(void) { }
 #ifdef CONFIG_HOTPLUG_CPU
 static int fpsimd_cpu_dead(unsigned int cpu)
 {
-       per_cpu(fpsimd_last_state, cpu) = NULL;
+       per_cpu(fpsimd_last_state.st, cpu) = NULL;
        return 0;
 }
 
diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S
deleted file mode 100644 (file)
index 00c4025..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-       .section        ".text.ftrace_trampoline", "ax"
-       .align          3
-0:     .quad           0
-__ftrace_trampoline:
-       ldr             x16, 0b
-       br              x16
-ENDPROC(__ftrace_trampoline)
index c13b1fca0e5baff4f95c280b523dd3da01ec2533..50986e388d2b27e92f6984914af4ce756ea0ee46 100644 (file)
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-               unsigned long *trampoline;
+               struct plt_entry trampoline;
                struct module *mod;
 
                /*
@@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * is added in the future, but for now, the pr_err() below
                 * deals with a theoretical issue only.
                 */
-               trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
-               if (trampoline[0] != addr) {
-                       if (trampoline[0] != 0) {
+               trampoline = get_plt_entry(addr);
+               if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+                                      &trampoline)) {
+                       if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+                                              &(struct plt_entry){})) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
 
                        /* point the trampoline to our ftrace entry point */
                        module_disable_ro(mod);
-                       trampoline[0] = addr;
+                       *mod->arch.ftrace_trampoline = trampoline;
                        module_enable_ro(mod, true);
 
                        /* update trampoline before patching in the branch */
                        smp_wmb();
                }
-               addr = (unsigned long)&trampoline[1];
+               addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
 #else /* CONFIG_ARM64_MODULE_PLTS */
                return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
index 67e86a0f57ac43edcee10d89bd5db2e050ae1621..e3cb9fbf96b66c3ba2d4327d4c1a4b3ca734ef1f 100644 (file)
@@ -750,6 +750,7 @@ __primary_switch:
         * to take into account by discarding the current kernel mapping and
         * creating a new one.
         */
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x20                  // disable the MMU
        isb
        bl      __create_page_tables            // recreate kernel mapping
index 749f81779420c7bab2ead0d8f5b9a6cf108e6a45..74bb56f656eff024839df19897ba06512128e9bb 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/smp.h>
+#include <linux/uaccess.h>
 
 #include <asm/compat.h>
 #include <asm/current.h>
@@ -36,7 +37,6 @@
 #include <asm/traps.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
-#include <asm/uaccess.h>
 
 /* Breakpoint currently in use for each BRP. */
 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
index d05dbe658409b251c9dd4c18348b77c1797e6973..ea640f92fe5adaf92526ee252fb8fbc73348d0b6 100644 (file)
 #include <linux/module.h>
 #include <linux/sort.h>
 
-struct plt_entry {
-       /*
-        * A program that conforms to the AArch64 Procedure Call Standard
-        * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
-        * IP1 (x17) may be inserted at any branch instruction that is
-        * exposed to a relocation that supports long branches. Since that
-        * is exactly what we are dealing with here, we are free to use x16
-        * as a scratch register in the PLT veneers.
-        */
-       __le32  mov0;   /* movn x16, #0x....                    */
-       __le32  mov1;   /* movk x16, #0x...., lsl #16           */
-       __le32  mov2;   /* movk x16, #0x...., lsl #32           */
-       __le32  br;     /* br   x16                             */
-};
-
 static bool in_init(const struct module *mod, void *loc)
 {
        return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
@@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
        int i = pltsec->plt_num_entries;
        u64 val = sym->st_value + rela->r_addend;
 
-       /*
-        * MOVK/MOVN/MOVZ opcode:
-        * +--------+------------+--------+-----------+-------------+---------+
-        * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
-        * +--------+------------+--------+-----------+-------------+---------+
-        *
-        * Rd     := 0x10 (x16)
-        * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
-        * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
-        * sf     := 1 (64-bit variant)
-        */
-       plt[i] = (struct plt_entry){
-               cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
-               cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
-               cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
-               cpu_to_le32(0xd61f0200)
-       };
+       plt[i] = get_plt_entry(val);
 
        /*
         * Check if the entry we just created is a duplicate. Given that the
         * relocations are sorted, this will be the last entry we allocated.
         * (if one exists).
         */
-       if (i > 0 &&
-           plt[i].mov0 == plt[i - 1].mov0 &&
-           plt[i].mov1 == plt[i - 1].mov1 &&
-           plt[i].mov2 == plt[i - 1].mov2)
+       if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
                return (u64)&plt[i - 1];
 
        pltsec->plt_num_entries++;
@@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
        unsigned long core_plts = 0;
        unsigned long init_plts = 0;
        Elf64_Sym *syms = NULL;
+       Elf_Shdr *tramp = NULL;
        int i;
 
        /*
@@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
                        mod->arch.core.plt = sechdrs + i;
                else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
                        mod->arch.init.plt = sechdrs + i;
+               else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
+                        !strcmp(secstrings + sechdrs[i].sh_name,
+                                ".text.ftrace_trampoline"))
+                       tramp = sechdrs + i;
                else if (sechdrs[i].sh_type == SHT_SYMTAB)
                        syms = (Elf64_Sym *)sechdrs[i].sh_addr;
        }
@@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
        mod->arch.init.plt_num_entries = 0;
        mod->arch.init.plt_max_entries = init_plts;
 
+       if (tramp) {
+               tramp->sh_type = SHT_NOBITS;
+               tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+               tramp->sh_addralign = __alignof__(struct plt_entry);
+               tramp->sh_size = sizeof(struct plt_entry);
+       }
+
        return 0;
 }
index f7c9781a9d48b48396e61375da2e10f31c0b6296..22e36a21c1134576eb58a9209d75f2c6b2f09f85 100644 (file)
@@ -1,4 +1,5 @@
 SECTIONS {
        .plt (NOLOAD) : { BYTE(0) }
        .init.plt (NOLOAD) : { BYTE(0) }
+       .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
 }
index 9eaef51f83ff8d0ad7c15f54e84db43b14d5fbb3..3affca3dd96a3ee8c3c7bbb14085b08a7b657a79 100644 (file)
@@ -262,12 +262,6 @@ static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 
        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
-
-       [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-       [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
-
-       [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-       [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
 };
 
 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
index b2adcce7bc18e628c924d009ba38190cb1bff951..6b7dcf4310acf6fc04ff40cca4ccd10e27fabec9 100644 (file)
@@ -314,6 +314,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        clear_tsk_thread_flag(p, TIF_SVE);
        p->thread.sve_state = NULL;
 
+       /*
+        * In case p was allocated the same task_struct pointer as some
+        * other recently-exited task, make sure p is disassociated from
+        * any cpu that may have run that now-exited task recently.
+        * Otherwise we could erroneously skip reloading the FPSIMD
+        * registers for p.
+        */
+       fpsimd_flush_task_state(p);
+
        if (likely(!(p->flags & PF_KTHREAD))) {
                *childregs = *current_pt_regs();
                childregs->regs[0] = 0;
index ce704a4aeadd438bf637472bb7037b89fba15087..f407e422a7200b86072349cc70e1e6d5e7e1753b 100644 (file)
@@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
        mrs     x0, sctlr_el2
        ldr     x1, =SCTLR_ELx_FLAGS
        bic     x0, x0, x1
+       pre_disable_mmu_workaround
        msr     sctlr_el2, x0
        isb
 1:
index 13f81f97139088cdc870b7dcbc6b990355e10865..2257dfcc44cce003b54d139e00aebe1743ed5e35 100644 (file)
@@ -4,6 +4,7 @@
 #
 
 source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
 
 menuconfig VIRTUALIZATION
        bool "Virtualization"
@@ -36,6 +37,8 @@ config KVM
        select HAVE_KVM_MSI
        select HAVE_KVM_IRQCHIP
        select HAVE_KVM_IRQ_ROUTING
+       select IRQ_BYPASS_MANAGER
+       select HAVE_KVM_IRQ_BYPASS
        ---help---
          Support hosting virtualized guest machines.
          We don't support KVM with 16K page tables yet, due to the multiple
index 861acbbac385626b0adcbc62ccd9107876e5ff6c..87c4f7ae24de238a354a97f9eff5216deb90c7ee 100644 (file)
@@ -27,6 +27,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v4.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
index dbadfaf850a7d07e202684f293c072e5ed4ec722..fa63b28c65e08a37e269b8d55fa6197e77b42b40 100644 (file)
@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
                }
        }
 }
+
+
+/*
+ * After successfully emulating an instruction, we might want to
+ * return to user space with a KVM_EXIT_DEBUG. We can only do this
+ * once the emulation is complete, though, so for userspace emulations
+ * we have to wait until we have re-entered KVM before calling this
+ * helper.
+ *
+ * Return true (and set exit_reason) to return to userspace or false
+ * if no further action is required.
+ */
+bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+               run->exit_reason = KVM_EXIT_DEBUG;
+               run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
+               return true;
+       }
+       return false;
+}
index b712479954692f3efd3844e490b412f71c3a96aa..304203fa9e3307583748de10c6c1f7425a231c7b 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_psci.h>
+#include <asm/debug-monitors.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -186,6 +187,40 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
        return arm_exit_handlers[hsr_ec];
 }
 
+/*
+ * We may be single-stepping an emulated instruction. If the emulation
+ * has been completed in the kernel, we can return to userspace with a
+ * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
+ * emulation first.
+ */
+static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int handled;
+
+       /*
+        * See ARM ARM B1.14.1: "Hyp traps on instructions
+        * that fail their condition code check"
+        */
+       if (!kvm_condition_valid(vcpu)) {
+               kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+               handled = 1;
+       } else {
+               exit_handle_fn exit_handler;
+
+               exit_handler = kvm_get_exit_handler(vcpu);
+               handled = exit_handler(vcpu, run);
+       }
+
+       /*
+        * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
+        * structure if we need to return to userspace.
+        */
+       if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
+               handled = 0;
+
+       return handled;
+}
+
 /*
  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
  * proper exit to userspace.
@@ -193,8 +228,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                       int exception_index)
 {
-       exit_handle_fn exit_handler;
-
        if (ARM_SERROR_PENDING(exception_index)) {
                u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
 
@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                return 1;
        case ARM_EXCEPTION_EL1_SERROR:
                kvm_inject_vabt(vcpu);
-               return 1;
-       case ARM_EXCEPTION_TRAP:
-               /*
-                * See ARM ARM B1.14.1: "Hyp traps on instructions
-                * that fail their condition code check"
-                */
-               if (!kvm_condition_valid(vcpu)) {
-                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+               /* We may still need to return for single-step */
+               if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
+                       && kvm_arm_handle_step_debug(vcpu, run))
+                       return 0;
+               else
                        return 1;
-               }
-
-               exit_handler = kvm_get_exit_handler(vcpu);
-
-               return exit_handler(vcpu, run);
+       case ARM_EXCEPTION_TRAP:
+               return handle_trap_exceptions(vcpu, run);
        case ARM_EXCEPTION_HYP_GONE:
                /*
                 * EL2 has been reset to the hyp-stub. This happens when a guest
index 3f9615582377661a88fab8be6a12365d625d830a..870828c364c508f825eacc1c49c17886dc9c8cb2 100644 (file)
@@ -151,6 +151,7 @@ reset:
        mrs     x5, sctlr_el2
        ldr     x6, =SCTLR_ELx_FLAGS
        bic     x5, x5, x6              // Clear SCTL_M and etc
+       pre_disable_mmu_workaround
        msr     sctlr_el2, x5
        isb
 
index 321c9c05dd9e09fc0c745a4543a286b7628f00a4..f4363d40e2cd7fd62d40d826d5296c95f15cde9f 100644 (file)
@@ -74,6 +74,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
 {
        u64 reg;
 
+       /* Clear pmscr in case of early return */
+       *pmscr_el1 = 0;
+
        /* SPE present on this CPU? */
        if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
                                                  ID_AA64DFR0_PMSVER_SHIFT))
index 525c01f48867808b6efa257063daaa4c8207252e..f7c651f3a8c0e8001bb11b1a45216ac6d4a6b342 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 #include <asm/fpsimd.h>
+#include <asm/debug-monitors.h>
 
 static bool __hyp_text __fpsimd_enabled_nvhe(void)
 {
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
        return true;
 }
 
-static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
+/* Skip an instruction which has been emulated. Returns true if
+ * execution can continue or false if we need to exit hyp mode because
+ * single-step was in effect.
+ */
+static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
 {
        *vcpu_pc(vcpu) = read_sysreg_el2(elr);
 
@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
        }
 
        write_sysreg_el2(*vcpu_pc(vcpu), elr);
+
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+               vcpu->arch.fault.esr_el2 =
+                       (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
+               return false;
+       } else {
+               return true;
+       }
 }
 
 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ again:
                        int ret = __vgic_v2_perform_cpuif_access(vcpu);
 
                        if (ret == 1) {
-                               __skip_instr(vcpu);
-                               goto again;
+                               if (__skip_instr(vcpu))
+                                       goto again;
+                               else
+                                       exit_code = ARM_EXCEPTION_TRAP;
                        }
 
                        if (ret == -1) {
-                               /* Promote an illegal access to an SError */
-                               __skip_instr(vcpu);
+                               /* Promote an illegal access to an
+                                * SError. If we would be returning
+                                * due to single-step clear the SS
+                                * bit so handle_exit knows what to
+                                * do after dealing with the error.
+                                */
+                               if (!__skip_instr(vcpu))
+                                       *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
                                exit_code = ARM_EXCEPTION_EL1_SERROR;
                        }
 
@@ -363,8 +384,10 @@ again:
                int ret = __vgic_v3_perform_cpuif_access(vcpu);
 
                if (ret == 1) {
-                       __skip_instr(vcpu);
-                       goto again;
+                       if (__skip_instr(vcpu))
+                               goto again;
+                       else
+                               exit_code = ARM_EXCEPTION_TRAP;
                }
 
                /* 0 falls through to be handled out of EL2 */
index ab9f5f0fb2c7fc6ada0605e31d8f73b0024ae277..6f4017046323f874e832cb07f5863283877179ba 100644 (file)
@@ -96,12 +96,6 @@ static void flush_context(unsigned int cpu)
 
        set_reserved_asid_bits();
 
-       /*
-        * Ensure the generation bump is observed before we xchg the
-        * active_asids.
-        */
-       smp_wmb();
-
        for_each_possible_cpu(i) {
                asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
                /*
@@ -117,7 +111,10 @@ static void flush_context(unsigned int cpu)
                per_cpu(reserved_asids, i) = asid;
        }
 
-       /* Queue a TLB invalidate and flush the I-cache if necessary. */
+       /*
+        * Queue a TLB invalidation for each CPU to perform on next
+        * context-switch
+        */
        cpumask_setall(&tlb_flush_pending);
 }
 
@@ -202,11 +199,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
        asid = atomic64_read(&mm->context.id);
 
        /*
-        * The memory ordering here is subtle. We rely on the control
-        * dependency between the generation read and the update of
-        * active_asids to ensure that we are synchronised with a
-        * parallel rollover (i.e. this pairs with the smp_wmb() in
-        * flush_context).
+        * The memory ordering here is subtle.
+        * If our ASID matches the current generation, then we update
+        * our active_asids entry with a relaxed xchg. Racing with a
+        * concurrent rollover means that either:
+        *
+        * - We get a zero back from the xchg and end up waiting on the
+        *   lock. Taking the lock synchronises with the rollover and so
+        *   we are forced to see the updated generation.
+        *
+        * - We get a valid ASID back from the xchg, which means the
+        *   relaxed xchg in flush_context will treat us as reserved
+        *   because atomic RmWs are totally ordered for a given location.
         */
        if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
            && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
index ca74a2aace425b95ed95ecf0e70a78188621004e..7b60d62ac5939e83c8e153ec1c3a0447565f23eb 100644 (file)
@@ -389,7 +389,7 @@ void ptdump_check_wx(void)
                .check_wx = true,
        };
 
-       walk_pgd(&st, &init_mm, 0);
+       walk_pgd(&st, &init_mm, VA_START);
        note_page(&st, 0, 0, 0);
        if (st.wx_pages || st.uxn_pages)
                pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
index 22168cd0dde73e06698bc40b166867df17a00134..9b7f89df49dbfe108da2eadc59421ce99a4432b4 100644 (file)
@@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
        struct siginfo info;
        const struct fault_info *inf;
-       int ret = 0;
 
        inf = esr_to_fault_info(esr);
        pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
@@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
                if (interrupts_enabled(regs))
                        nmi_enter();
 
-               ret = ghes_notify_sea();
+               ghes_notify_sea();
 
                if (interrupts_enabled(regs))
                        nmi_exit();
@@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
                info.si_addr  = (void __user *)addr;
        arm64_notify_die("", regs, &info, esr);
 
-       return ret;
+       return 0;
 }
 
 static const struct fault_info fault_info[] = {
index 5960bef0170df85916d0c1ac3b65f570f0af67ea..00e7b900ca4193e83dfa7de7dd506984afe90bce 100644 (file)
@@ -476,6 +476,8 @@ void __init arm64_memblock_init(void)
 
        reserve_elfcorehdr();
 
+       high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
+
        dma_contiguous_reserve(arm64_dma_phys_limit);
 
        memblock_allow_resize();
@@ -502,7 +504,6 @@ void __init bootmem_init(void)
        sparse_init();
        zone_sizes_init(min, max);
 
-       high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
        memblock_dump_all();
 }
 
index 371c5f03a1708c172e731abb07637e369681fec1..051e71ec3335edc316817602a51d8676678c1a0f 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 
-static struct kmem_cache *pgd_cache;
+static struct kmem_cache *pgd_cache __ro_after_init;
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
index aa624b4ab6557c3e22d3660819f25688d605e3ed..2240b38c2915fa725cf2c5d1afc322edd1bb47c0 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 1e714329fe8a186f59f7a6bf25de7393a14fb403..8a211d95821f6d5b98992a65b820da20a7a60b21 100644 (file)
@@ -166,7 +166,7 @@ int check_nmi_wdt_touched(void)
        return 1;
 }
 
-static void nmi_wdt_timer(unsigned long data)
+static void nmi_wdt_timer(struct timer_list *unused)
 {
        if (check_nmi_wdt_touched())
                nmi_wdt_keepalive();
@@ -180,8 +180,7 @@ static int __init init_nmi_wdt(void)
        nmi_wdt_start();
        nmi_active = true;
 
-       init_timer(&ntimer);
-       ntimer.function = nmi_wdt_timer;
+       timer_setup(&ntimer, nmi_wdt_timer, 0);
        ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
        add_timer(&ntimer);
 
index 67ee896a76a7f2f0837436e054b8146dbe7dbac3..26644e15d8540fa43cf70e47acf9ce837dd18fd5 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 3687b54bb18ed1a0f36d512af627b8085f15987b..3470c6e9c7b9ba1ca3b5962d276b42a4c2d2e35f 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index b15bf6bc0e94f46f035e8781ffa921060341fe91..14a2e9af97e9992d87821e8f11276ecfef8e57cf 100644 (file)
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bpf_perf_event.h
index 187aed820e71feac3ffd03e021387bc892bda5e9..2f65f78792cbe5cf7219bb2228fb84e05a0b9204 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index cb5df3aad3a848e27fdccfd2c36e848b1217bc27..41a176dbb53e4f16524157bae122180d572dd4f5 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 13a97aa2285f7418d18f1396f03bf6281b580a0f..f5c6967a93bb204ff41bfdee837d3ec4bdaa94dd 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
index 1c44d3b3eba03bac62b9a69e6e560aac61a0a3ff..451bf6071c6e28036f0da81dedb35c8b184f3b5a 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
 generic-y += siginfo.h
index a23f48181fd6a4c39fa672bc9c2757ffd18e384b..442bdeee6bd7920c9d6bb4e3edefc734951d072e 100644 (file)
@@ -65,7 +65,7 @@ void __init amiga_init_sound(void)
 #endif
 }
 
-static void nosound( unsigned long ignored );
+static void nosound(struct timer_list *unused);
 static DEFINE_TIMER(sound_timer, nosound);
 
 void amiga_mksound( unsigned int hz, unsigned int ticks )
@@ -107,7 +107,7 @@ void amiga_mksound( unsigned int hz, unsigned int ticks )
 }
 
 
-static void nosound( unsigned long ignored )
+static void nosound(struct timer_list *unused)
 {
        /* turn off DMA for audio channel 2 */
        custom.dmacon = DMAF_AUD2;
index 55e55dbc2fb66410f757f635a7cc46a7c28b1287..3d07b1de7eb0aa807d9aa637b3b0cec07e4ca1e2 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../uClinux-dist/romfs"
 # CONFIG_RD_BZIP2 is not set
 # CONFIG_RD_LZMA is not set
 # CONFIG_RD_XZ is not set
index 3717b64a620df54a46495f07a3597188b9f727ec..c2e26a44c482da3a6d87d9b26173d64e7c9ca78f 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 3aa571a513b5dfa1f48bf2e17c120289f1a0d246..cf6edda389719535e25d505dfdc31d885995cffb 100644 (file)
@@ -45,6 +45,8 @@ SECTIONS {
        .text : {
                HEAD_TEXT
                TEXT_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
                SCHED_TEXT
                CPUIDLE_TEXT
                LOCK_TEXT
index 89172b8974b95444f4def01ce4104b664391eaea..625a5785804faf8d706442150e6cdcac0ee117c3 100644 (file)
@@ -16,6 +16,8 @@ SECTIONS
   .text : {
        HEAD_TEXT
        TEXT_TEXT
+       IRQENTRY_TEXT
+       SOFTIRQENTRY_TEXT
        SCHED_TEXT
        CPUIDLE_TEXT
        LOCK_TEXT
index 293990efc9173288d38313e22c81e2c93aea0909..9868270b0984487c5a83100514cd88e8ddd743cd 100644 (file)
@@ -16,6 +16,8 @@ SECTIONS
   .text : {
        HEAD_TEXT
        TEXT_TEXT
+       IRQENTRY_TEXT
+       SOFTIRQENTRY_TEXT
        SCHED_TEXT
        CPUIDLE_TEXT
        LOCK_TEXT
index d176686496410a5131047d02d345b40b8e4065b2..388780797f7d2290fc6a6a9247272243a7ab756d 100644 (file)
@@ -48,9 +48,9 @@ static unsigned long mac_bell_phasepersample;
  * some function protos
  */
 static void mac_init_asc( void );
-static void mac_nosound( unsigned long );
+static void mac_nosound(struct timer_list *);
 static void mac_quadra_start_bell( unsigned int, unsigned int, unsigned int );
-static void mac_quadra_ring_bell( unsigned long );
+static void mac_quadra_ring_bell(struct timer_list *);
 static void mac_av_start_bell( unsigned int, unsigned int, unsigned int );
 static void ( *mac_special_bell )( unsigned int, unsigned int, unsigned int );
 
@@ -216,7 +216,7 @@ void mac_mksound( unsigned int freq, unsigned int length )
 /*
  * regular ASC: stop whining ..
  */
-static void mac_nosound( unsigned long ignored )
+static void mac_nosound(struct timer_list *unused)
 {
        mac_asc_regs[ ASC_ENABLE ] = 0;
 }
@@ -270,7 +270,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
  * already load the wave table, or at least call this one...
  * This piece keeps reloading the wave table until done.
  */
-static void mac_quadra_ring_bell( unsigned long ignored )
+static void mac_quadra_ring_bell(struct timer_list *unused)
 {
        int     i, count = mac_asc_samplespersec / HZ;
        unsigned long flags;
index 6ac763d9a3e34e3cfb0caaf0072f78c26ec8b03f..f9eaf07d29f84ab1871d49b89c11a107fb5e1149 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 99472d2ca3404d44e43d1565ff7bef7fdb30d442..97559fe0b95364f78e3eca81f951641bfbe09c26 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/atomic.h>
 #include <linux/mm_types.h>
+#include <linux/sched.h>
 
 #include <asm/bitops.h>
 #include <asm/mmu.h>
index 06609ca361150ab77529bf0999d8e258ad25d62c..2c6a6bffea3265d3f3ef1b4d04f6c64347e395e4 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 09ba7e894bad006471cf39a9924efaa41084c80f..d8787c9a499e4713a7e5092f540fe4d3ba49255b 100644 (file)
@@ -35,6 +35,3 @@ dtb-$(CONFIG_DT_NONE) += \
        bcm97435svmb.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index f5d01b31df50139eaff92f1e5e01f286a925d016..24a8efcd7b038760db523802c4fbca851a740cf4 100644 (file)
@@ -2,6 +2,3 @@
 dtb-$(CONFIG_CAVIUM_OCTEON_SOC)        += octeon_3xxx.dtb octeon_68xx.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 3eb2597a4d6c5b884c34360aa03bf6b71d5eec9c..441a3c16efb0d97842ad2326622a85563b5f7129 100644 (file)
@@ -3,6 +3,3 @@ dtb-$(CONFIG_FIT_IMAGE_FDT_BOSTON)      += boston.dtb
 
 dtb-$(CONFIG_MACH_PISTACHIO)   += pistachio_marduk.dtb
 obj-$(CONFIG_MACH_PISTACHIO)   += pistachio_marduk.dtb.o
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 035769269cbc412ba32b61a5c6488d4012a2851c..6a31759839b415d008b35e5159b2a81335de92c9 100644 (file)
@@ -3,6 +3,3 @@ dtb-$(CONFIG_JZ4740_QI_LB60)    += qi_lb60.dtb
 dtb-$(CONFIG_JZ4780_CI20)      += ci20.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 00e2e540ed3f2d230ea6615dffe12a8c3e5a5bd9..51ab9c1dff42a2553acc2ce559f5629bd41c4ed5 100644 (file)
@@ -2,6 +2,3 @@
 dtb-$(CONFIG_DT_EASY50712)     += easy50712.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 480af498a9ddba4fa68afe919722d531cb1e9e7e..3508720cb6d9e1da773e52c7ca21b2ac5cb57c30 100644 (file)
@@ -3,6 +3,3 @@ dtb-$(CONFIG_MIPS_MALTA)        += malta.dtb
 dtb-$(CONFIG_LEGACY_BOARD_SEAD3)       += sead3.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 2b99450d743344b4e2be402bba1ea6544e05a680..d630b27950f0605d58d9047cc7ab76090206dafe 100644 (file)
@@ -6,6 +6,3 @@ dtb-$(CONFIG_DT_XLP_GVP)        += xlp_gvp.dtb
 dtb-$(CONFIG_DT_XLP_RVP)       += xlp_rvp.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 6cd9c606f0255cb6678c282487cd02a85a9b68be..9e2c9faede4739853e0bb35442cb51cd2e4cca1a 100644 (file)
@@ -1,4 +1 @@
 dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445)   += 169445.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                                   += dummy.o
index a139a0fbd7b794095d92ddf513875e61687d2f0a..ba9bcef8fde91dfaa03a75f61cda7c5edb92e464 100644 (file)
@@ -5,6 +5,3 @@ dtb-$(CONFIG_DTB_PIC32_NONE)            += \
                                        pic32mzda_sk.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 639adeac90af8cd68389ec168ba6b29000197c55..4451cf45b0ad04532dd108677a3b8329093c4756 100644 (file)
@@ -5,6 +5,3 @@ dtb-$(CONFIG_ATH79)                     += ar9331_dpt_module.dtb
 dtb-$(CONFIG_ATH79)                    += ar9331_dragino_ms14.dtb
 dtb-$(CONFIG_ATH79)                    += ar9331_omega.dtb
 dtb-$(CONFIG_ATH79)                    += ar9331_tl_mr3020.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 323c8bcfb602b9072d12c0cafceec8cf69da50ca..94bee5b38b53b3701072002e8d5c5939bbfb6dd9 100644 (file)
@@ -7,6 +7,3 @@ dtb-$(CONFIG_DTB_OMEGA2P)       += omega2p.dtb
 dtb-$(CONFIG_DTB_VOCORE2)      += vocore2.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 616322405ade7a1e2bc13fd7738534bceeb10d17..9987e0e378c50c6f19eb0457eae914f827f9688b 100644 (file)
@@ -2,6 +2,3 @@
 dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA)    += nexys4ddr.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 7c8aab23bce8da59f727ff8725250a8f0d385b12..b1f66699677dbaa31931e84f702c88ad0d2feb8f 100644 (file)
@@ -16,7 +16,6 @@ generic-y += qrwlock.h
 generic-y += qspinlock.h
 generic-y += sections.h
 generic-y += segment.h
-generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
 generic-y += user.h
index 9e9e94415d08f13db779b0d63ea42692f0705c64..1a508a74d48d3f70595a2c5981b114f3e37d1061 100644 (file)
@@ -552,7 +552,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                       pmd_t *pmdp, pmd_t pmd);
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return !!(pmd_val(pmd) & _PAGE_WRITE);
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
new file mode 100644 (file)
index 0000000..1d830c6
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 MIPS Tech, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __ASM__SERIAL_H
+#define __ASM__SERIAL_H
+
+#ifdef CONFIG_MIPS_GENERIC
+/*
+ * Generic kernels cannot know a correct value for all platforms at
+ * compile time. Set it to 0 to prevent 8250_early using it
+ */
+#define BASE_BAUD 0
+#else
+#include <asm-generic/serial.h>
+#endif
+
+#endif /* __ASM__SERIAL_H */
index a0266feba9e6d996d5469ed18fd23df081a2ab38..7a4becd8963a219331632203849998a0a3f56e03 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += ipcbuf.h
index d535edc01434117a8809fc21fb152226e0b46521..75fdeaa8c62f21a5420c963968c0188bbb459f49 100644 (file)
@@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r = -EINTR;
-       sigset_t sigsaved;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (vcpu->mmio_needed) {
                if (!vcpu->mmio_is_write)
@@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        local_irq_enable();
 
 out:
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index a8103f6972cd43a63163eec228faa5b917685f83..5d89e1ec5fcc3f31feb8181f1c635284036c6ea2 100644 (file)
@@ -156,7 +156,7 @@ static const struct file_operations pvc_scroll_proc_fops = {
        .write          = pvc_scroll_proc_write,
 };
 
-void pvc_proc_timerfunc(unsigned long data)
+void pvc_proc_timerfunc(struct timer_list *unused)
 {
        if (scroll_dir < 0)
                pvc_move(DISPLAY|RIGHT);
@@ -197,7 +197,7 @@ static int __init pvc_proc_init(void)
        if (proc_entry == NULL)
                goto error;
 
-       setup_timer(&timer, pvc_proc_timerfunc, 0UL);
+       timer_setup(&timer, pvc_proc_timerfunc, 0);
 
        return 0;
 error:
index 063de44675cefbd256dde16690f6070fac0725f7..ee0bd50f754bfb0f3d81f534d599d2aada55cfb3 100644 (file)
@@ -36,10 +36,10 @@ void mips_display_message(const char *str)
        }
 }
 
-static void scroll_display_message(unsigned long unused);
+static void scroll_display_message(struct timer_list *unused);
 static DEFINE_TIMER(mips_scroll_timer, scroll_display_message);
 
-static void scroll_display_message(unsigned long unused)
+static void scroll_display_message(struct timer_list *unused)
 {
        mips_display_message(&display_string[display_count++]);
        if (display_count == max_display_count)
index c94ee54210bc489efd469493800596cd2b7061a3..81271d3af47cb1000ebfab2539efa92080a1eccc 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y      += bpf_perf_event.h
 generic-y      += siginfo.h
index ffca24da7647b80e0f45728dff8da4e9474dc65f..13a3d77b4d7bdc487b814ae2933940638b62c759 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 62286dbeb9043c6ff6eecbd5859c23fd21ce1baa..130c16ccba0a0abb135e31275eb34c564d6cd700 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 9345b44b86f036572e33721eb80e9bbbe4493aa4..f57118e1f6b4265257799ae2cf8ea356077e20b9 100644 (file)
@@ -123,8 +123,8 @@ int puts(const char *s)
        while ((nuline = strchr(s, '\n')) != NULL) {
                if (nuline != s)
                        pdc_iodc_print(s, nuline - s);
-                       pdc_iodc_print("\r\n", 2);
-                       s = nuline + 1;
+               pdc_iodc_print("\r\n", 2);
+               s = nuline + 1;
        }
        if (*s != '\0')
                pdc_iodc_print(s, strlen(s));
index c980a02a52bc0dda0a23b205f59d1d86438553f2..598c8d60fa5e602cc9303e1986ada9680d64feb3 100644 (file)
@@ -35,7 +35,12 @@ struct thread_info {
 
 /* thread information allocation */
 
+#ifdef CONFIG_IRQSTACKS
+#define THREAD_SIZE_ORDER      2 /* PA-RISC requires at least 16k stack */
+#else
 #define THREAD_SIZE_ORDER      3 /* PA-RISC requires at least 32k stack */
+#endif
+
 /* Be sure to hunt all references to this down when you change the size of
  * the kernel stack */
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
index 196d2a4efb312be6d830fe2de80538d1f7aaf8f4..286ef5a5904b02d5f346dd783dbea9fdb6b35e70 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
+generic-y += bpf_perf_event.h
 generic-y += kvm_para.h
 generic-y += param.h
 generic-y += poll.h
index a4fd296c958e8e14f13a913aca50510b11eb49b7..f3cecf5117cf8ab14724f0ea3535220c3224d569 100644 (file)
@@ -878,9 +878,6 @@ ENTRY_CFI(syscall_exit_rfi)
        STREG   %r19,PT_SR7(%r16)
 
 intr_return:
-       /* NOTE: Need to enable interrupts incase we schedule. */
-       ssm     PSW_SM_I, %r0
-
        /* check for reschedule */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
@@ -907,6 +904,11 @@ intr_check_sig:
        LDREG   PT_IASQ1(%r16), %r20
        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
+       /* NOTE: We need to enable interrupts if we have to deliver
+        * signals. We used to do this earlier but it caused kernel
+        * stack overflows. */
+       ssm     PSW_SM_I, %r0
+
        copy    %r0, %r25                       /* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
@@ -958,6 +960,10 @@ intr_do_resched:
        cmpib,COND(=)   0, %r20, intr_do_preempt
        nop
 
+       /* NOTE: We need to enable interrupts if we schedule.  We used
+        * to do this earlier but it caused kernel stack overflows. */
+       ssm     PSW_SM_I, %r0
+
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #endif
index e3a8e5e4d5de75897adcea4134f87c7246f60646..8d072c44f300c16d45ba8f4ee0c2eee6435e4ddd 100644 (file)
@@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc)
 
 
        __INITRODATA
+       .align 4
        .export os_hpmc_size
 os_hpmc_size:
        .word .os_hpmc_end-.os_hpmc
index 27a2dd616a7d1732dc27dc332fcbd162375a7caf..c46bf29ae412f8007c8cd1e6a118a4730d4f1ff8 100644 (file)
@@ -91,7 +91,7 @@ static int pdc_console_setup(struct console *co, char *options)
 
 #define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
 
-static void pdc_console_poll(unsigned long unused);
+static void pdc_console_poll(struct timer_list *unused);
 static DEFINE_TIMER(pdc_console_timer, pdc_console_poll);
 static struct tty_port tty_port;
 
@@ -135,7 +135,7 @@ static const struct tty_operations pdc_console_tty_ops = {
        .chars_in_buffer = pdc_console_tty_chars_in_buffer,
 };
 
-static void pdc_console_poll(unsigned long unused)
+static void pdc_console_poll(struct timer_list *unused)
 {
        int data, count = 0;
 
index 5a657986ebbf4bef7beff4e8c8d20f1343872347..143f90e2f9f3c631616d4af52f0fe3fa08f44af9 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/kallsyms.h>
 #include <linux/sort.h>
-#include <linux/sched.h>
 
 #include <linux/uaccess.h>
 #include <asm/assembly.h>
index 7eab4bb8abe630b14c54c3b457285b4228607dc6..66e506520505d8a3245d49d492831df5e3bbb42a 100644 (file)
@@ -16,9 +16,7 @@
 #include <linux/preempt.h>
 #include <linux/init.h>
 
-#include <asm/processor.h>
 #include <asm/delay.h>
-
 #include <asm/special_insns.h>    /* for mfctl() */
 #include <asm/processor.h> /* for boot_cpu_data */
 
index 9a677cd5997f9a891c2ecc8f6e3bbd08c5c41dbe..44697817ccc6ddc13406dc30388d06d7e8795335 100644 (file)
@@ -1005,7 +1005,6 @@ static inline int pmd_protnone(pmd_t pmd)
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 #define __pmd_write(pmd)       __pte_write(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)    pte_savedwrite(pmd_pte(pmd))
index 7f74c282710f4c232c873119ebd2118c2307eb29..fad0e6ff460f22398b8487083cb34cb7abcb1de9 100644 (file)
 #include <linux/io.h>
 #include <asm/opal.h>
 
-/*
- * For static allocation of some of the structures.
- */
-#define IMC_MAX_PMUS                   32
-
 /*
  * Compatibility macros for IMC devices
  */
@@ -125,4 +120,5 @@ enum {
 extern int init_imc_pmu(struct device_node *parent,
                                struct imc_pmu *pmu_ptr, int pmu_id);
 extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
 #endif /* __ASM_POWERPC_IMC_PMU_H */
index 96753f3aac6dd7e753ba9b2f9ad5e4ebba7f50aa..941c2a3f231b90686481b6e711dbded50f72eaf6 100644 (file)
@@ -180,6 +180,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
                struct iommu_group *grp);
 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
 
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                                struct kvm_create_spapr_tce_64 *args);
index 73b92017b6d7b5231cd8c9d46b7986bb9131f12c..cd2fc1cc1cc7c056255b6f49effefb1e6d0cc1e6 100644 (file)
@@ -76,6 +76,7 @@ struct machdep_calls {
 
        void __noreturn (*restart)(char *cmd);
        void __noreturn (*halt)(void);
+       void            (*panic)(char *str);
        void            (*cpu_die)(void);
 
        long            (*time_init)(void); /* Optional, may be NULL */
index 257d23dbf55dce902644334280e9d72b3ff91ba8..cf00ec26303aef1415c7d5fcd7425f2149ac0295 100644 (file)
@@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long);
 
 void check_for_initrd(void);
 void initmem_init(void);
+void setup_panic(void);
 #define ARCH_PANIC_TIMEOUT 180
 
 #ifdef CONFIG_PPC_PSERIES
index 0d960ef78a9a95a682fe17d8e5050e3803a57323..1a6ed5919ffdb13878ab7f4a8c2f958c8a41f166 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += param.h
 generic-y += poll.h
 generic-y += resource.h
index 610955fe8b81c528420cc43fec3c92cf80924763..679bbe714e8561b8c259f37bbfc9db8264555554 100644 (file)
@@ -102,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
        li      r0,0
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PID,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE  | LPCR_HEIC)
        or      r3, r3, r4
@@ -126,6 +127,7 @@ _GLOBAL(__restore_cpu_power9)
        li      r0,0
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
+       mtspr   SPRN_PID,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
        or      r3, r3, r4
index 602e0fde19b4a28ca505162dc07546bb2422b988..8bdc2f96c5d6a7a29b0fd299f2e121dc6a993911 100644 (file)
@@ -735,8 +735,8 @@ static __init void cpufeatures_cpu_quirks(void)
         */
        if ((version & 0xffffff00) == 0x004e0100)
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
-       else if ((version & 0xffffefff) == 0x004e0200)
-               cur_cpu_spec->cpu_features &= ~CPU_FTR_POWER9_DD2_1;
+       else if ((version & 0xffffefff) == 0x004e0201)
+               cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
 }
 
 static void __init cpufeatures_setup_finished(void)
index 04ea5c04fd24825a6c6affc4d7d6956b30321b84..3c2c2688918fffdcfbdbb64e2ec2f1b0dfe1ccf9 100644 (file)
@@ -1462,25 +1462,6 @@ static void fadump_init_files(void)
        return;
 }
 
-static int fadump_panic_event(struct notifier_block *this,
-                             unsigned long event, void *ptr)
-{
-       /*
-        * If firmware-assisted dump has been registered then trigger
-        * firmware-assisted dump and let firmware handle everything
-        * else. If this returns, then fadump was not registered, so
-        * go through the rest of the panic path.
-        */
-       crash_fadump(NULL, ptr);
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block fadump_panic_block = {
-       .notifier_call = fadump_panic_event,
-       .priority = INT_MIN /* may not return; must be done last */
-};
-
 /*
  * Prepare for firmware-assisted dump.
  */
@@ -1513,9 +1494,6 @@ int __init setup_fadump(void)
                init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
        fadump_init_files();
 
-       atomic_notifier_chain_register(&panic_notifier_list,
-                                       &fadump_panic_block);
-
        return 1;
 }
 subsys_initcall(setup_fadump);
index 8ac0bd2bddb0c93b95dccfd61807807219913b5e..3280953a82cf63c4372735762a09804fd9c21677 100644 (file)
@@ -623,7 +623,9 @@ BEGIN_FTR_SECTION
         * NOTE, we rely on r0 being 0 from above.
         */
        mtspr   SPRN_IAMR,r0
+BEGIN_FTR_SECTION_NESTED(42)
        mtspr   SPRN_AMOR,r0
+END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
        /* save regs for local vars on new stack.
index bfdd783e39166143e205fd2424bae692fdbe5aa8..5acb5a176dbe5c8bffe6ddb7458b7d3ac2b7019f 100644 (file)
@@ -1569,16 +1569,22 @@ void arch_release_task_struct(struct task_struct *t)
  */
 int set_thread_tidr(struct task_struct *t)
 {
+       int rc;
+
        if (!cpu_has_feature(CPU_FTR_ARCH_300))
                return -EINVAL;
 
        if (t != current)
                return -EINVAL;
 
-       t->thread.tidr = assign_thread_tidr();
-       if (t->thread.tidr < 0)
-               return t->thread.tidr;
+       if (t->thread.tidr)
+               return 0;
+
+       rc = assign_thread_tidr();
+       if (rc < 0)
+               return rc;
 
+       t->thread.tidr = rc;
        mtspr(SPRN_TIDR, t->thread.tidr);
 
        return 0;
index 2075322cd22522edf7de78e32ef2679fae8e9913..9d213542a48bb91360b7b540ef429473494ebe98 100644 (file)
@@ -704,6 +704,30 @@ int check_legacy_ioport(unsigned long base_port)
 }
 EXPORT_SYMBOL(check_legacy_ioport);
 
+static int ppc_panic_event(struct notifier_block *this,
+                             unsigned long event, void *ptr)
+{
+       /*
+        * If firmware-assisted dump has been registered then trigger
+        * firmware-assisted dump and let firmware handle everything else.
+        */
+       crash_fadump(NULL, ptr);
+       ppc_md.panic(ptr);  /* May not return */
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_panic_block = {
+       .notifier_call = ppc_panic_event,
+       .priority = INT_MIN /* may not return; must be done last */
+};
+
+void __init setup_panic(void)
+{
+       if (!ppc_md.panic)
+               return;
+       atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+}
+
 #ifdef CONFIG_CHECK_CACHE_COHERENCY
 /*
  * For platforms that have configurable cache-coherency.  This function
@@ -848,6 +872,9 @@ void __init setup_arch(char **cmdline_p)
        /* Probe the machine type, establish ppc_md. */
        probe_machine();
 
+       /* Setup panic notifier if requested by the platform. */
+       setup_panic();
+
        /*
         * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
         * it from their respective probe() function.
index e3c5f75d137c51ec94e5d73e5e4d10f9d2944b99..8cdd852aedd1e86193b036b765b6c5b6d27ac643 100644 (file)
@@ -188,7 +188,7 @@ static void tau_timeout(void * info)
        local_irq_restore(flags);
 }
 
-static void tau_timeout_smp(unsigned long unused)
+static void tau_timeout_smp(struct timer_list *unused)
 {
 
        /* schedule ourselves to be run again */
@@ -230,7 +230,7 @@ int __init TAU_init(void)
 
 
        /* first, set up the window shrinking timer */
-       setup_timer(&tau_timer, tau_timeout_smp, 0UL);
+       timer_setup(&tau_timer, tau_timeout_smp, 0);
        tau_timer.expires = jiffies + shrink_timer;
        add_timer(&tau_timer);
 
index 235319c2574e07f03c3473d66e160e6e900204e2..966097232d2147bbcd79df41354a5f973fb9b7c1 100644 (file)
@@ -1238,8 +1238,9 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
        unsigned long vpte, rpte, guest_rpte;
        int ret;
        struct revmap_entry *rev;
-       unsigned long apsize, psize, avpn, pteg, hash;
+       unsigned long apsize, avpn, pteg, hash;
        unsigned long new_idx, new_pteg, replace_vpte;
+       int pshift;
 
        hptep = (__be64 *)(old->virt + (idx << 4));
 
@@ -1298,8 +1299,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                goto out;
 
        rpte = be64_to_cpu(hptep[1]);
-       psize = hpte_base_page_size(vpte, rpte);
-       avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23);
+       pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
+       avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
        pteg = idx / HPTES_PER_GROUP;
        if (vpte & HPTE_V_SECONDARY)
                pteg = ~pteg;
@@ -1311,20 +1312,20 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                offset = (avpn & 0x1f) << 23;
                vsid = avpn >> 5;
                /* We can find more bits from the pteg value */
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (offset / psize);
+               hash = vsid ^ (offset >> pshift);
        } else {
                unsigned long offset, vsid;
 
                /* We only have 40 - 23 bits of seg_off in avpn */
                offset = (avpn & 0x1ffff) << 23;
                vsid = avpn >> 17;
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (vsid << 25) ^ (offset / psize);
+               hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
        }
 
        new_pteg = hash & new_hash_mask;
@@ -1801,6 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
        ssize_t nb;
        long int err, ret;
        int mmu_ready;
+       int pshift;
 
        if (!access_ok(VERIFY_READ, buf, count))
                return -EFAULT;
@@ -1855,6 +1857,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                        err = -EINVAL;
                        if (!(v & HPTE_V_VALID))
                                goto out;
+                       pshift = kvmppc_hpte_base_page_shift(v, r);
+                       if (pshift <= 0)
+                               goto out;
                        lbuf += 2;
                        nb += HPTE_SIZE;
 
@@ -1869,14 +1874,18 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!mmu_ready && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_base_page_size(v, r);
-                               unsigned long senc = slb_pgsize_encoding(psize);
-                               unsigned long lpcr;
+                               unsigned long senc, lpcr;
 
+                               senc = slb_pgsize_encoding(1ul << pshift);
                                kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
                                        (VRMA_VSID << SLB_VSID_SHIFT_1T);
-                               lpcr = senc << (LPCR_VRMASD_SH - 4);
-                               kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+                               if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+                                       lpcr = senc << (LPCR_VRMASD_SH - 4);
+                                       kvmppc_update_lpcr(kvm, lpcr,
+                                                          LPCR_VRMASD);
+                               } else {
+                                       kvmppc_setup_partition_table(kvm);
+                               }
                                mmu_ready = 1;
                        }
                        ++i;
index 79ea3d9269dbf568904e504d78cc56850c77860d..2d46037ce93664199adee27806b8972d9130368d 100644 (file)
@@ -120,7 +120,6 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-static void kvmppc_setup_partition_table(struct kvm *kvm);
 
 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
                int *ip)
@@ -3574,7 +3573,7 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
        return;
 }
 
-static void kvmppc_setup_partition_table(struct kvm *kvm)
+void kvmppc_setup_partition_table(struct kvm *kvm)
 {
        unsigned long dw0, dw1;
 
index 071b87ee682f8ea85e37ae42cbb227714695c284..83b485810aea2fbfccc01718d1823f30b18e9398 100644 (file)
@@ -599,9 +599,9 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
        spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
 }
 
-void kvmppc_watchdog_func(unsigned long data)
+void kvmppc_watchdog_func(struct timer_list *t)
 {
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+       struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
        u32 tsr, new_tsr;
        int final;
 
@@ -1412,8 +1412,7 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
 {
        /* setup watchdog timer once */
        spin_lock_init(&vcpu->arch.wdt_lock);
-       setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
-                   (unsigned long)vcpu);
+       timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
 
        /*
         * Clear DBSR.MRR to avoid guest debug interrupt as
index 6b6c53c42ac9455f2a8c4f157f402da772163336..1915e86cef6f8fc2e05852ddc7a0867eca1c560b 100644 (file)
@@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r;
-       sigset_t sigsaved;
 
        if (vcpu->mmio_needed) {
                vcpu->mmio_needed = 0;
@@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 #endif
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (run->immediate_exit)
                r = -EINTR;
        else
                r = kvmppc_vcpu_run(run, vcpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index c9de03e0c1f123de531d376d8b4d330d6f61bcdc..d469224c4ada8c23b923dc077b6249fa79ae0583 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/tlbflush.h>
 #include <asm/page.h>
 #include <asm/code-patching.h>
+#include <asm/setup.h>
 
 static int __patch_instruction(unsigned int *addr, unsigned int instr)
 {
@@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
         * During early early boot patch_instruction is called
         * when text_poke_area is not ready, but we still need
         * to allow patching. We just do the plain old patching
-        * We use slab_is_available and per cpu read * via this_cpu_read
-        * of text_poke_area. Per-CPU areas might not be up early
-        * this can create problems with just using this_cpu_read()
         */
-       if (!slab_is_available() || !this_cpu_read(text_poke_area))
+       if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
                return __patch_instruction(addr, instr);
 
        local_irq_save(flags);
index 3848af167df9de4b183d7cb8472b5d20ce97d0a4..640cf566e98653ab43c06744b6cf9ef76622fa1c 100644 (file)
@@ -47,7 +47,8 @@
 
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
-static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
+                                               int apsize, int ssize)
 {
        unsigned long va;
        unsigned int penc;
@@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
                             : "memory");
                break;
        }
-       trace_tlbie(0, 0, va, 0, 0, 0, 0);
+       return va;
+}
+
+static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+{
+       unsigned long rb;
+
+       rb = ___tlbie(vpn, psize, apsize, ssize);
+       trace_tlbie(0, 0, rb, 0, 0, 0, 0);
 }
 
 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
@@ -652,7 +661,7 @@ static void native_hpte_clear(void)
                if (hpte_v & HPTE_V_VALID) {
                        hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
                        hptep->v = 0;
-                       __tlbie(vpn, psize, apsize, ssize);
+                       ___tlbie(vpn, psize, apsize, ssize);
                }
        }
 
index 564fff06f5c11ed32bd1ef97b1b9ad521e9cd9a3..23ec2c5e3b782412f8b10717cee352e56cc31217 100644 (file)
@@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
        return !slice_area_is_free(mm, start, end - start);
 }
 
-static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+                               unsigned long high_limit)
 {
        unsigned long i;
 
@@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
                if (!slice_low_has_vma(mm, i))
                        ret->low_slices |= 1u << i;
 
-       if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
+       if (high_limit <= SLICE_LOW_TOP)
                return;
 
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
                if (!slice_high_has_vma(mm, i))
                        __set_bit(i, ret->high_slices);
 }
 
-static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
+static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
+                               unsigned long high_limit)
 {
        unsigned char *hpsizes;
        int index, mask_index;
@@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
                if (((lpsizes >> (i * 4)) & 0xf) == psize)
                        ret->low_slices |= 1u << i;
 
+       if (high_limit <= SLICE_LOW_TOP)
+               return;
+
        hpsizes = mm->context.high_slices_psize;
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
                mask_index = i & 0x1;
                index = i >> 1;
                if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
                           struct slice_mask mask, struct slice_mask available)
 {
        DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+       /*
+        * Make sure we just do bit compare only to the max
+        * addr limit and not the full bit map size.
+        */
        unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
 
        bitmap_and(result, mask.high_slices,
@@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
        /* First make up a "good" mask of slices that have the right size
         * already
         */
-       slice_mask_for_size(mm, psize, &good_mask);
+       slice_mask_for_size(mm, psize, &good_mask, high_limit);
        slice_print_mask(" good_mask", good_mask);
 
        /*
@@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
 #ifdef CONFIG_PPC_64K_PAGES
        /* If we support combo pages, we can allow 64k pages in 4k slices */
        if (psize == MMU_PAGE_64K) {
-               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
                if (fixed)
                        slice_or_mask(&good_mask, &compat_mask);
        }
@@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
                        return newaddr;
                }
        }
-
-       /* We don't fit in the good mask, check what other slices are
+       /*
+        * We don't fit in the good mask, check what other slices are
         * empty and thus can be converted
         */
-       slice_mask_for_free(mm, &potential_mask);
+       slice_mask_for_free(mm, &potential_mask, high_limit);
        slice_or_mask(&potential_mask, &good_mask);
        slice_print_mask(" potential", potential_mask);
 
@@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
 {
        struct slice_mask mask, available;
        unsigned int psize = mm->context.user_psize;
+       unsigned long high_limit = mm->context.slb_addr_limit;
 
        if (radix_enabled())
                return 0;
 
        slice_range_to_mask(addr, len, &mask);
-       slice_mask_for_size(mm, psize, &available);
+       slice_mask_for_size(mm, psize, &available, high_limit);
 #ifdef CONFIG_PPC_64K_PAGES
        /* We need to account for 4k slices too */
        if (psize == MMU_PAGE_64K) {
                struct slice_mask compat_mask;
-               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
                slice_or_mask(&available, &compat_mask);
        }
 #endif
index 46d74e81aff1b4caad4769e7686fa0a800695cd4..d183b4801bdbded832b90d2aa1a18e713f70695b 100644 (file)
@@ -763,7 +763,8 @@ emit_clear:
                        func = (u8 *) __bpf_call_base + imm;
 
                        /* Save skb pointer if we need to re-cache skb data */
-                       if (bpf_helper_changes_pkt_data(func))
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func))
                                PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
 
                        bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -772,7 +773,8 @@ emit_clear:
                        PPC_MR(b2p[BPF_REG_0], 3);
 
                        /* refresh skb cache */
-                       if (bpf_helper_changes_pkt_data(func)) {
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func)) {
                                /* reload skb pointer to r3 */
                                PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
                                bpf_jit_emit_skb_loads(image, ctx);
index 264b6ab11978dc7a84cd0fbb58fb2245003c1b3d..b90a21bc2f3faeb2e14384c0df62b7c53a6acda7 100644 (file)
@@ -451,7 +451,7 @@ static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
  * This routine will alternate loading the virtual counters for
  * virtual CPUs
  */
-static void cell_virtual_cntr(unsigned long data)
+static void cell_virtual_cntr(struct timer_list *unused)
 {
        int i, prev_hdw_thread, next_hdw_thread;
        u32 cpu;
@@ -555,7 +555,7 @@ static void cell_virtual_cntr(unsigned long data)
 
 static void start_virt_cntrs(void)
 {
-       setup_timer(&timer_virt_cntr, cell_virtual_cntr, 0UL);
+       timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
        timer_virt_cntr.expires = jiffies + HZ / 10;
        add_timer(&timer_virt_cntr);
 }
@@ -587,7 +587,7 @@ static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
  * periodically based on kernel timer to switch which SPU is
  * being monitored in a round robbin fashion.
  */
-static void spu_evnt_swap(unsigned long data)
+static void spu_evnt_swap(struct timer_list *unused)
 {
        int node;
        int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
@@ -677,7 +677,7 @@ static void spu_evnt_swap(unsigned long data)
 
 static void start_spu_event_swap(void)
 {
-       setup_timer(&timer_spu_event_swap, spu_evnt_swap, 0UL);
+       timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
        timer_spu_event_swap.expires = jiffies + HZ / 25;
        add_timer(&timer_spu_event_swap);
 }
index 9e3da168d54cdcd36e3911ff040ff2ad187c92b7..1538129663658381b6b1a425dcbf582b1ed09531 100644 (file)
@@ -1415,7 +1415,7 @@ static int collect_events(struct perf_event *group, int max_count,
        int n = 0;
        struct perf_event *event;
 
-       if (!is_software_event(group)) {
+       if (group->pmu->task_ctx_nr == perf_hw_context) {
                if (n >= max_count)
                        return -1;
                ctrs[n] = group;
@@ -1423,7 +1423,7 @@ static int collect_events(struct perf_event *group, int max_count,
                events[n++] = group->hw.config;
        }
        list_for_each_entry(event, &group->sibling_list, group_entry) {
-               if (!is_software_event(event) &&
+               if (event->pmu->task_ctx_nr == perf_hw_context &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
                                return -1;
index 36344117c680b9e0500b345c18f98d11cb246134..0ead3cd73caa2f8816e8c04f47cca691efba0560 100644 (file)
@@ -26,7 +26,7 @@
  */
 static DEFINE_MUTEX(nest_init_lock);
 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
-static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
+static struct imc_pmu **per_nest_pmu_arr;
 static cpumask_t nest_imc_cpumask;
 struct imc_pmu_ref *nest_imc_refc;
 static int nest_pmus;
@@ -286,13 +286,14 @@ static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
 static void nest_change_cpu_context(int old_cpu, int new_cpu)
 {
        struct imc_pmu **pn = per_nest_pmu_arr;
-       int i;
 
        if (old_cpu < 0 || new_cpu < 0)
                return;
 
-       for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
+       while (*pn) {
                perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
+               pn++;
+       }
 }
 
 static int ppc_nest_imc_cpu_offline(unsigned int cpu)
@@ -467,7 +468,7 @@ static int nest_imc_event_init(struct perf_event *event)
         * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
         * Get the base memory addresss for this cpu.
         */
-       chip_id = topology_physical_package_id(event->cpu);
+       chip_id = cpu_to_chip_id(event->cpu);
        pcni = pmu->mem_info;
        do {
                if (pcni->id == chip_id) {
@@ -524,19 +525,19 @@ static int nest_imc_event_init(struct perf_event *event)
  */
 static int core_imc_mem_init(int cpu, int size)
 {
-       int phys_id, rc = 0, core_id = (cpu / threads_per_core);
+       int nid, rc = 0, core_id = (cpu / threads_per_core);
        struct imc_mem_info *mem_info;
 
        /*
         * alloc_pages_node() will allocate memory for core in the
         * local node only.
         */
-       phys_id = topology_physical_package_id(cpu);
+       nid = cpu_to_node(cpu);
        mem_info = &core_imc_pmu->mem_info[core_id];
        mem_info->id = core_id;
 
        /* We need only vbase for core counters */
-       mem_info->vbase = page_address(alloc_pages_node(phys_id,
+       mem_info->vbase = page_address(alloc_pages_node(nid,
                                          GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
                                          __GFP_NOWARN, get_order(size)));
        if (!mem_info->vbase)
@@ -797,14 +798,14 @@ static int core_imc_event_init(struct perf_event *event)
 static int thread_imc_mem_alloc(int cpu_id, int size)
 {
        u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
-       int phys_id = topology_physical_package_id(cpu_id);
+       int nid = cpu_to_node(cpu_id);
 
        if (!local_mem) {
                /*
                 * This case could happen only once at start, since we dont
                 * free the memory in cpu offline path.
                 */
-               local_mem = page_address(alloc_pages_node(phys_id,
+               local_mem = page_address(alloc_pages_node(nid,
                                  GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
                                  __GFP_NOWARN, get_order(size)));
                if (!local_mem)
@@ -1194,6 +1195,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
                kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
        kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
        kfree(pmu_ptr);
+       kfree(per_nest_pmu_arr);
        return;
 }
 
@@ -1218,6 +1220,13 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
                        return -ENOMEM;
 
                /* Needed for hotplug/migration */
+               if (!per_nest_pmu_arr) {
+                       per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
+                                               sizeof(struct imc_pmu *),
+                                               GFP_KERNEL);
+                       if (!per_nest_pmu_arr)
+                               return -ENOMEM;
+               }
                per_nest_pmu_arr[pmu_index] = pmu_ptr;
                break;
        case IMC_DOMAIN_CORE:
index e47761cdcb98fe4f13bdb3fd395b439ab9b5405a..9033c8194eda5d7d39db99af0af9e6f33811a272 100644 (file)
@@ -992,13 +992,13 @@ static void spu_calc_load(void)
        CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
 }
 
-static void spusched_wake(unsigned long data)
+static void spusched_wake(struct timer_list *unused)
 {
        mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
        wake_up_process(spusched_task);
 }
 
-static void spuloadavg_wake(unsigned long data)
+static void spuloadavg_wake(struct timer_list *unused)
 {
        mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
        spu_calc_load();
@@ -1124,8 +1124,8 @@ int __init spu_sched_init(void)
        }
        spin_lock_init(&spu_prio->runq_lock);
 
-       setup_timer(&spusched_timer, spusched_wake, 0);
-       setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
+       timer_setup(&spusched_timer, spusched_wake, 0);
+       timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
 
        spusched_task = kthread_run(spusched_thread, NULL, "spusched");
        if (IS_ERR(spusched_task)) {
index 39a1d4225e0f7c114701e8550b073eb3aabd647d..3408f315ef48ed238a43f81c82e63a9bcb652e00 100644 (file)
@@ -361,9 +361,9 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void kw_i2c_timeout(unsigned long data)
+static void kw_i2c_timeout(struct timer_list *t)
 {
-       struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
+       struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&host->lock, flags);
@@ -513,7 +513,7 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
        mutex_init(&host->mutex);
        init_completion(&host->complete);
        spin_lock_init(&host->lock);
-       setup_timer(&host->timeout_timer, kw_i2c_timeout, (unsigned long)host);
+       timer_setup(&host->timeout_timer, kw_i2c_timeout, 0);
 
        psteps = of_get_property(np, "AAPL,address-step", NULL);
        steps = psteps ? (*psteps) : 0x10;
index 21f6531fae20fc1e010f499923cde964540b895a..465ea105b7710ecf0ff7320dcec8fd2b9775cf2e 100644 (file)
@@ -153,6 +153,22 @@ static void disable_core_pmu_counters(void)
        put_online_cpus();
 }
 
+int get_max_nest_dev(void)
+{
+       struct device_node *node;
+       u32 pmu_units = 0, type;
+
+       for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
+               if (of_property_read_u32(node, "type", &type))
+                       continue;
+
+               if (type == IMC_TYPE_CHIP)
+                       pmu_units++;
+       }
+
+       return pmu_units;
+}
+
 static int opal_imc_counters_probe(struct platform_device *pdev)
 {
        struct device_node *imc_dev = pdev->dev.of_node;
@@ -191,8 +207,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
                        break;
                }
 
-               if (!imc_pmu_create(imc_dev, pmu_count, domain))
-                       pmu_count++;
+               if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
+                       if (domain == IMC_DOMAIN_NEST)
+                               pmu_count++;
+               }
        }
 
        return 0;
index c488621dbec30f66db91ed8713212181820d7e25..aebbe95c9230bc4b85016a945303307aa96c369e 100644 (file)
@@ -135,6 +135,7 @@ int chip_to_vas_id(int chipid)
        }
        return -1;
 }
+EXPORT_SYMBOL(chip_to_vas_id);
 
 static int vas_probe(struct platform_device *pdev)
 {
index 9dabea6e14439647b726d007a63de63a853ac3d0..6244bc849469e33af7dcc5a4ac2b21dd970ac1c6 100644 (file)
@@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void)
        ps3_sys_manager_halt(); /* never returns */
 }
 
+static void ps3_panic(char *str)
+{
+       DBG("%s:%d %s\n", __func__, __LINE__, str);
+
+       smp_send_stop();
+       printk("\n");
+       printk("   System does not reboot automatically.\n");
+       printk("   Please press POWER button.\n");
+       printk("\n");
+
+       while(1)
+               lv1_pause(1);
+}
+
 #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
     defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
 static void __init prealloc(struct ps3_prealloc *p)
@@ -255,6 +269,7 @@ define_machine(ps3) {
        .probe                          = ps3_probe,
        .setup_arch                     = ps3_setup_arch,
        .init_IRQ                       = ps3_init_IRQ,
+       .panic                          = ps3_panic,
        .get_boot_time                  = ps3_get_boot_time,
        .set_dabr                       = ps3_set_dabr,
        .calibrate_decr                 = ps3_calibrate_decr,
index 5f1beb8367acaa4562dbc647ad06a1f4c12a8f72..a8531e01265842f39e759bc59aa67f3afd461486 100644 (file)
@@ -726,6 +726,7 @@ define_machine(pseries) {
        .pcibios_fixup          = pSeries_final_fixup,
        .restart                = rtas_restart,
        .halt                   = rtas_halt,
+       .panic                  = rtas_os_term,
        .get_boot_time          = rtas_get_boot_time,
        .get_rtc_time           = rtas_get_rtc_time,
        .set_rtc_time           = rtas_set_rtc_time,
index 1b2d8cb49abb2e8ea209a511bd239dd58e710661..cab24f549e7cbdc8786c5fa5b7450f0dcda8d87c 100644 (file)
@@ -1590,7 +1590,7 @@ static void print_bug_trap(struct pt_regs *regs)
        printf("kernel BUG at %s:%u!\n",
               bug->file, bug->line);
 #else
-       printf("kernel BUG at %p!\n", (void *)bug->bug_addr);
+       printf("kernel BUG at %px!\n", (void *)bug->bug_addr);
 #endif
 #endif /* CONFIG_BUG */
 }
@@ -2329,7 +2329,7 @@ static void dump_one_paca(int cpu)
 
        p = &paca[cpu];
 
-       printf("paca for cpu 0x%x @ %p:\n", cpu, p);
+       printf("paca for cpu 0x%x @ %px:\n", cpu, p);
 
        printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no");
        printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no");
@@ -2945,7 +2945,7 @@ static void show_task(struct task_struct *tsk)
                (tsk->exit_state & EXIT_DEAD) ? 'E' :
                (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
 
-       printf("%p %016lx %6d %6d %c %2d %s\n", tsk,
+       printf("%px %016lx %6d %6d %c %2d %s\n", tsk,
                tsk->thread.ksp,
                tsk->pid, tsk->parent->pid,
                state, task_thread_info(tsk)->cpu,
@@ -2988,7 +2988,7 @@ static void show_pte(unsigned long addr)
 
        if (setjmp(bus_error_jmp) != 0) {
                catch_memory_errors = 0;
-               printf("*** Error dumping pte for task %p\n", tsk);
+               printf("*** Error dumping pte for task %px\n", tsk);
                return;
        }
 
@@ -3074,7 +3074,7 @@ static void show_tasks(void)
 
        if (setjmp(bus_error_jmp) != 0) {
                catch_memory_errors = 0;
-               printf("*** Error dumping task %p\n", tsk);
+               printf("*** Error dumping task %px\n", tsk);
                return;
        }
 
index 18158be62a2bfbb9f82250f889b61ea4d91f2da7..970460a0b492efe9fcf6bc126e06ab11074641ce 100644 (file)
@@ -40,6 +40,7 @@ generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += sembuf.h
+generic-y += serial.h
 generic-y += setup.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
index 6cbbb6a68d76c2ba846fbcdeb65d91cf27e04f05..5ad4cb622bedf02f8b77bc27e7867c3cec6c7aca 100644 (file)
 #endif
 
 #if (__SIZEOF_INT__ == 4)
-#define INT            __ASM_STR(.word)
-#define SZINT          __ASM_STR(4)
-#define LGINT          __ASM_STR(2)
+#define RISCV_INT              __ASM_STR(.word)
+#define RISCV_SZINT            __ASM_STR(4)
+#define RISCV_LGINT            __ASM_STR(2)
 #else
 #error "Unexpected __SIZEOF_INT__"
 #endif
 
 #if (__SIZEOF_SHORT__ == 2)
-#define SHORT          __ASM_STR(.half)
-#define SZSHORT                __ASM_STR(2)
-#define LGSHORT                __ASM_STR(1)
+#define RISCV_SHORT            __ASM_STR(.half)
+#define RISCV_SZSHORT          __ASM_STR(2)
+#define RISCV_LGSHORT          __ASM_STR(1)
 #else
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
index e2e37c57cbeb24e19c6c48422818abb8f32af130..e65d1cd89e28bb5ae52291e8fa2d13937f28e1a9 100644 (file)
@@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
  * have the AQ or RL bits set.  These don't return anything, so there's only
  * one version to worry about.
  */
-#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix)                               \
-static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)             \
-{                                                                                              \
-       __asm__ __volatile__ (                                                                  \
-               "amo" #asm_op "." #asm_type " zero, %1, %0"                                     \
-               : "+A" (v->counter)                                                             \
-               : "r" (I)                                                                       \
-               : "memory");                                                                    \
+#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)                             \
+static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)     \
+{                                                                                      \
+       __asm__ __volatile__ (                                                          \
+               "amo" #asm_op "." #asm_type " zero, %1, %0"                             \
+               : "+A" (v->counter)                                                     \
+               : "r" (I)                                                               \
+               : "memory");                                                            \
 }
 
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I)                        \
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I)                      \
+        ATOMIC_OP (op, asm_op, I, w,  int,   )
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I)                        \
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )   \
-        ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I)                      \
+        ATOMIC_OP (op, asm_op, I, w,  int,   ) \
+        ATOMIC_OP (op, asm_op, I, d, long, 64)
 #endif
 
-ATOMIC_OPS(add, add, +,  i)
-ATOMIC_OPS(sub, add, +, -i)
-ATOMIC_OPS(and, and, &,  i)
-ATOMIC_OPS( or,  or, |,  i)
-ATOMIC_OPS(xor, xor, ^,  i)
+ATOMIC_OPS(add, add,  i)
+ATOMIC_OPS(sub, add, -i)
+ATOMIC_OPS(and, and,  i)
+ATOMIC_OPS( or,  or,  i)
+ATOMIC_OPS(xor, xor,  i)
 
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^,  i)
  * There's two flavors of these: the arithmatic ops have both fetch and return
  * versions, while the logical ops only have fetch versions.
  */
-#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix)                   \
+#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix)                         \
 static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v)       \
 {                                                                                                      \
        register c_type ret;                                                                            \
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )       \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64)       \
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, d, long, 64)       \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
 #endif
 
@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl,         )
 #undef ATOMIC_OPS
 
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)                                \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)                          \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )                \
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)                                \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )      \
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
 #endif
 
-ATOMIC_OPS(and, and, &,  i,      , _relaxed)
-ATOMIC_OPS(and, and, &,  i, .aq  , _acquire)
-ATOMIC_OPS(and, and, &,  i, .rl  , _release)
-ATOMIC_OPS(and, and, &,  i, .aqrl,         )
+ATOMIC_OPS(and, and, i,      , _relaxed)
+ATOMIC_OPS(and, and, i, .aq  , _acquire)
+ATOMIC_OPS(and, and, i, .rl  , _release)
+ATOMIC_OPS(and, and, i, .aqrl,         )
 
-ATOMIC_OPS( or,  or, |,  i,      , _relaxed)
-ATOMIC_OPS( or,  or, |,  i, .aq  , _acquire)
-ATOMIC_OPS( or,  or, |,  i, .rl  , _release)
-ATOMIC_OPS( or,  or, |,  i, .aqrl,         )
+ATOMIC_OPS( or,  or, i,      , _relaxed)
+ATOMIC_OPS( or,  or, i, .aq  , _acquire)
+ATOMIC_OPS( or,  or, i, .rl  , _release)
+ATOMIC_OPS( or,  or, i, .aqrl,         )
 
-ATOMIC_OPS(xor, xor, ^,  i,      , _relaxed)
-ATOMIC_OPS(xor, xor, ^,  i, .aq  , _acquire)
-ATOMIC_OPS(xor, xor, ^,  i, .rl  , _release)
-ATOMIC_OPS(xor, xor, ^,  i, .aqrl,         )
+ATOMIC_OPS(xor, xor, i,      , _relaxed)
+ATOMIC_OPS(xor, xor, i, .aq  , _acquire)
+ATOMIC_OPS(xor, xor, i, .rl  , _release)
+ATOMIC_OPS(xor, xor, i, .aqrl,         )
 
 #undef ATOMIC_OPS
 
@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add,  <, 0)
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
 
-#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix)                                \
+#define ATOMIC_OP(op, func_op, I, c_type, prefix)                              \
 static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v)       \
 {                                                                              \
        atomic##prefix##_##func_op(I, v);                                       \
 }
 
-#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix)                          \
+#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)                                        \
 static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)       \
 {                                                                                      \
        return atomic##prefix##_fetch_##func_op(I, v);                                  \
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                                \
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )                                \
+        ATOMIC_OP       (op, asm_op,       I,  int,   )                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                                \
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )                                \
+        ATOMIC_OP       (op, asm_op,       I,  int,   )                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )                                \
-        ATOMIC_OP       (op, asm_op, c_op, I, long, 64)                                \
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64)                                \
+        ATOMIC_OP       (op, asm_op,       I, long, 64)                                \
+        ATOMIC_FETCH_OP (op, asm_op,       I, long, 64)                                \
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
 #endif
 
@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
 
 /*
  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
- * {cmp,}xchg and the operations that return, so they need a barrier.  We just
- * use the other implementations directly.
+ * {cmp,}xchg and the operations that return, so they need a barrier.
+ */
+/*
+ * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
+ * assigning the same barrier to both the LR and SC operations, but that might
+ * not make any sense.  We're waiting on a memory model specification to
+ * determine exactly what the right thing to do is here.
  */
 #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or)                                             \
 static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n)         \
index 183534b7c39b7d663ea757b2d0ff346e58bae15c..c0319cbf1eec58d7ea8960259838b865c23f49a1 100644 (file)
 #define smp_wmb()      RISCV_FENCE(w,w)
 
 /*
- * These fences exist to enforce ordering around the relaxed AMOs.  The
- * documentation defines that
- * "
- *     atomic_fetch_add();
- *   is equivalent to:
- *     smp_mb__before_atomic();
- *     atomic_fetch_add_relaxed();
- *     smp_mb__after_atomic();
- * "
- * So we emit full fences on both sides.
- */
-#define __smb_mb__before_atomic()      smp_mb()
-#define __smb_mb__after_atomic()       smp_mb()
-
-/*
- * These barriers prevent accesses performed outside a spinlock from being moved
- * inside a spinlock.  Since RISC-V sets the aq/rl bits on our spinlock only
- * enforce release consistency, we need full fences here.
+ * This is a very specific barrier: it's currently only used in two places in
+ * the kernel, both in the scheduler.  See include/linux/spinlock.h for the two
+ * orderings it guarantees, but the "critical section is RCsc" guarantee
+ * mandates a barrier on RISC-V.  The sequence looks like:
+ *
+ *    lr.aq lock
+ *    sc    lock <= LOCKED
+ *    smp_mb__after_spinlock()
+ *    // critical section
+ *    lr    lock
+ *    sc.rl lock <= UNLOCKED
+ *
+ * The AQ/RL pair provides a RCpc critical section, but there's not really any
+ * way we can take advantage of that here because the ordering is only enforced
+ * on that one lock.  Thus, we're just doing a full fence.
  */
-#define smb_mb__before_spinlock()      smp_mb()
-#define smb_mb__after_spinlock()       smp_mb()
+#define smp_mb__after_spinlock()       RISCV_FENCE(rw,rw)
 
 #include <asm-generic/barrier.h>
 
index 7c281ef1d58320d24caf33d4d1af2c57f4c75d9b..f30daf26f08f44bbb1028640c0d8e2539220ab0f 100644 (file)
@@ -67,7 +67,7 @@
                : "memory");
 
 #define __test_and_op_bit(op, mod, nr, addr)                   \
-       __test_and_op_bit_ord(op, mod, nr, addr, )
+       __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
 #define __op_bit(op, mod, nr, addr)                            \
        __op_bit_ord(op, mod, nr, addr, )
 
index c3e13764a943c6f66b80dfe54f92abb10da87962..bfc7f099ab1fea28981d2a3a1ffce8aa25e4ea4d 100644 (file)
@@ -27,8 +27,8 @@
 typedef u32 bug_insn_t;
 
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
-#define __BUG_ENTRY_ADDR       INT " 1b - 2b"
-#define __BUG_ENTRY_FILE       INT " %0 - 2b"
+#define __BUG_ENTRY_ADDR       RISCV_INT " 1b - 2b"
+#define __BUG_ENTRY_FILE       RISCV_INT " %0 - 2b"
 #else
 #define __BUG_ENTRY_ADDR       RISCV_PTR " 1b"
 #define __BUG_ENTRY_FILE       RISCV_PTR " %0"
@@ -38,7 +38,7 @@ typedef u32 bug_insn_t;
 #define __BUG_ENTRY                    \
        __BUG_ENTRY_ADDR "\n\t"         \
        __BUG_ENTRY_FILE "\n\t"         \
-       SHORT " %1"
+       RISCV_SHORT " %1"
 #else
 #define __BUG_ENTRY                    \
        __BUG_ENTRY_ADDR
index 0595585013b07d899bccbe98598f3750bdd39bca..efd89a88d2d0e9b2bcd639a436a6143440a24d77 100644 (file)
 
 #undef flush_icache_range
 #undef flush_icache_user_range
+#undef flush_dcache_page
 
 static inline void local_flush_icache_all(void)
 {
        asm volatile ("fence.i" ::: "memory");
 }
 
+#define PG_dcache_clean PG_arch_1
+
+static inline void flush_dcache_page(struct page *page)
+{
+       if (test_bit(PG_dcache_clean, &page->flags))
+               clear_bit(PG_dcache_clean, &page->flags);
+}
+
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range(start, end) flush_icache_all()
+#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
+
 #ifndef CONFIG_SMP
 
-#define flush_icache_range(start, end) local_flush_icache_all()
-#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
+#define flush_icache_all() local_flush_icache_all()
+#define flush_icache_mm(mm, local) flush_icache_all()
 
 #else /* CONFIG_SMP */
 
-#define flush_icache_range(start, end) sbi_remote_fence_i(0)
-#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
+#define flush_icache_all() sbi_remote_fence_i(0)
+void flush_icache_mm(struct mm_struct *mm, bool local);
 
 #endif /* CONFIG_SMP */
 
+/*
+ * Bits in sys_riscv_flush_icache()'s flags argument.
+ */
+#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
+#define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+
 #endif /* _ASM_RISCV_CACHEFLUSH_H */
index c1f32cfcc79bbb7f35786e3ab9b9ab5b8f683df8..a82ce599b639813c9ed3ad697f217cb09a6538e1 100644 (file)
@@ -19,6 +19,8 @@
 #ifndef _ASM_RISCV_IO_H
 #define _ASM_RISCV_IO_H
 
+#include <linux/types.h>
+
 #ifdef CONFIG_MMU
 
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@@ -32,7 +34,7 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 #define ioremap_wc(addr, size) ioremap((addr), (size))
 #define ioremap_wt(addr, size) ioremap((addr), (size))
 
-extern void iounmap(void __iomem *addr);
+extern void iounmap(volatile void __iomem *addr);
 
 #endif /* CONFIG_MMU */
 
@@ -250,7 +252,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
                        const ctype *buf = buffer;                              \
                                                                                \
                        do {                                                    \
-                               __raw_writeq(*buf++, addr);                     \
+                               __raw_write ## len(*buf++, addr);               \
                        } while (--count);                                      \
                }                                                               \
                afence;                                                         \
@@ -266,9 +268,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar())
 __io_reads_ins(ins,  u8, b, __io_pbr(), __io_par())
 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
-#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count)
-#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count)
-#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count)
+#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
+#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
+#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
 
 __io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -280,9 +282,9 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
 __io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
-#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count)
-#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count)
-#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count)
+#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
+#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
+#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
 
 #ifdef CONFIG_64BIT
 __io_reads_ins(reads, u64, q, __io_br(), __io_ar())
index 66805cba9a27ad819a8cfc938e93f705fe432943..5df2dccdba122c4d5d02ffff7d82e8727b735ba6 100644 (file)
 
 typedef struct {
        void *vdso;
+#ifdef CONFIG_SMP
+       /* A local icache flush is needed before user execution can resume. */
+       cpumask_t icache_stale_mask;
+#endif
 } mm_context_t;
 
 #endif /* __ASSEMBLY__ */
index de1fc1631fc4367b439ce96ff21e2137ee8aa797..97424834dce2a7706bccebe505602d21fa78834b 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
 #ifndef _ASM_RISCV_MMU_CONTEXT_H
 #define _ASM_RISCV_MMU_CONTEXT_H
 
+#include <linux/mm_types.h>
 #include <asm-generic/mm_hooks.h>
 
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
 
 static inline void enter_lazy_tlb(struct mm_struct *mm,
        struct task_struct *task)
@@ -46,12 +49,54 @@ static inline void set_pgdir(pgd_t *pgd)
        csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
 }
 
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU.  RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches.  To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart.  This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+       unsigned int cpu = smp_processor_id();
+       cpumask_t *mask = &mm->context.icache_stale_mask;
+
+       if (cpumask_test_cpu(cpu, mask)) {
+               cpumask_clear_cpu(cpu, mask);
+               /*
+                * Ensure the remote hart's writes are visible to this hart.
+                * This pairs with a barrier in flush_icache_mm.
+                */
+               smp_mb();
+               local_flush_icache_all();
+       }
+#endif
+}
+
 static inline void switch_mm(struct mm_struct *prev,
        struct mm_struct *next, struct task_struct *task)
 {
        if (likely(prev != next)) {
+               /*
+                * Mark the current MM context as inactive, and the next as
+                * active.  This is at least used by the icache flushing
+                * routines in order to determine who should
+                */
+               unsigned int cpu = smp_processor_id();
+
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+
                set_pgdir(next->pgd);
                local_flush_tlb_all();
+
+               flush_icache_deferred(next);
        }
 }
 
index 3399257780b2cc219b7c6a29a48e54f67d342865..2cbd92ed1629c00df42b8ceaffd2250b6de7413a 100644 (file)
@@ -178,28 +178,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
 #define pte_offset_map(dir, addr)      pte_offset_kernel((dir), (addr))
 #define pte_unmap(pte)                 ((void)(pte))
 
-/*
- * Certain architectures need to do special things when PTEs within
- * a page table are directly modified.  Thus, the following hook is
- * made available.
- */
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
-       *ptep = pteval;
-}
-
-static inline void set_pte_at(struct mm_struct *mm,
-       unsigned long addr, pte_t *ptep, pte_t pteval)
-{
-       set_pte(ptep, pteval);
-}
-
-static inline void pte_clear(struct mm_struct *mm,
-       unsigned long addr, pte_t *ptep)
-{
-       set_pte_at(mm, addr, ptep, __pte(0));
-}
-
 static inline int pte_present(pte_t pte)
 {
        return (pte_val(pte) & _PAGE_PRESENT);
@@ -210,21 +188,22 @@ static inline int pte_none(pte_t pte)
        return (pte_val(pte) == 0);
 }
 
-/* static inline int pte_read(pte_t pte) */
-
 static inline int pte_write(pte_t pte)
 {
        return pte_val(pte) & _PAGE_WRITE;
 }
 
+static inline int pte_exec(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_EXEC;
+}
+
 static inline int pte_huge(pte_t pte)
 {
        return pte_present(pte)
                && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
 }
 
-/* static inline int pte_exec(pte_t pte) */
-
 static inline int pte_dirty(pte_t pte)
 {
        return pte_val(pte) & _PAGE_DIRTY;
@@ -311,6 +290,33 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
        return pte_val(pte_a) == pte_val(pte_b);
 }
 
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified.  Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       *ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void set_pte_at(struct mm_struct *mm,
+       unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+       if (pte_present(pteval) && pte_exec(pteval))
+               flush_icache_pte(pteval);
+
+       set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm,
+       unsigned long addr, pte_t *ptep)
+{
+       set_pte_at(mm, addr, ptep, __pte(0));
+}
+
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
                                        unsigned long address, pte_t *ptep,
index 04c71d938afdbf40f93d0d5d9d89229fa5f72c56..2fd27e8ef1fd686d8cf234143174a538223acc79 100644 (file)
@@ -24,7 +24,7 @@
 
 /* FIXME: Replace this with a ticket lock, like MIPS. */
 
-#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
@@ -58,15 +58,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        }
 }
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_rmb();
-       do {
-               cpu_relax();
-       } while (arch_spin_is_locked(lock));
-       smp_acquire__after_ctrl_dep();
-}
-
 /***********************************************************/
 
 static inline void arch_read_lock(arch_rwlock_t *lock)
index 3df4932d8964faad964d0d3a3894ee0f0425c9d7..2f26989cb864bedaa1eb0bd53456ae33fcdcaabe 100644 (file)
@@ -18,7 +18,7 @@
 
 typedef unsigned long cycles_t;
 
-static inline cycles_t get_cycles(void)
+static inline cycles_t get_cycles_inline(void)
 {
        cycles_t n;
 
@@ -27,6 +27,7 @@ static inline cycles_t get_cycles(void)
                : "=r" (n));
        return n;
 }
+#define get_cycles get_cycles_inline
 
 #ifdef CONFIG_64BIT
 static inline uint64_t get_cycles64(void)
index 5ee4ae370b5e54d49f3c062d1a19c30ca3ddfec2..715b0f10af580811dfca3ba067819e07fe1e7374 100644 (file)
 
 #ifdef CONFIG_MMU
 
-/* Flush entire local TLB */
+#include <linux/mm_types.h>
+
+/*
+ * Flush entire local TLB.  'sfence.vma' implicitly fences with the instruction
+ * cache as well, so a 'fence.i' is not necessary.
+ */
 static inline void local_flush_tlb_all(void)
 {
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h
new file mode 100644 (file)
index 0000000..a2ccf18
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
+#define _ASM_RISCV_VDSO_SYSCALLS_H
+
+#ifdef CONFIG_SMP
+
+/* These syscalls are only used by the vDSO and are not in the uapi. */
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+#endif
+
+#endif /* _ASM_RISCV_VDSO_H */
index 602f61257553727ff87a9b077702a6407e240c30..541544d64c33b2f95a0d2f3a1f7f3e80498ea006 100644 (file)
@@ -38,4 +38,8 @@ struct vdso_data {
        (void __user *)((unsigned long)(base) + __vdso_##name);                 \
 })
 
+#ifdef CONFIG_SMP
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif
+
 #endif /* _ASM_RISCV_VDSO_H */
index 5ded96b063526e0073e3d79a41ba8b62e21e040c..7e91f485047576b559b3a8549ec7b5fc80827ac3 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += setup.h
 generic-y += unistd.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 76af908f87c18b76502c71d31ed12925790ce816..78f670d701339055c11c8cbeef409f9edf08ba3a 100644 (file)
@@ -152,6 +152,3 @@ END(_start)
 __PAGE_ALIGNED_BSS
        /* Empty zero page */
        .balign PAGE_SIZE
-ENTRY(empty_zero_page)
-       .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00
-END(empty_zero_page)
index 23cc81ec9e9444be312a2714b8aed2a06c5eee68..5517342487489b6ee35c4a95bbfa3d5c4a31f2aa 100644 (file)
@@ -12,4 +12,7 @@
 /*
  * Assembly functions that may be used (directly or indirectly) by modules
  */
+EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
index de7db114c31531ba244cb45c765b3fb2e1c559eb..cb7b0c63014ecbc61c8d9a2b8263a65dd1775d11 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/thread_info.h>
 
-#ifdef CONFIG_HVC_RISCV_SBI
-#include <asm/hvc_riscv_sbi.h>
-#endif
-
 #ifdef CONFIG_DUMMY_CONSOLE
 struct screen_info screen_info = {
        .orig_video_lines       = 30,
@@ -58,7 +54,12 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 #endif /* CONFIG_CMDLINE_BOOL */
 
 unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
 unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
 
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
@@ -207,13 +208,6 @@ static void __init setup_bootmem(void)
 
 void __init setup_arch(char **cmdline_p)
 {
-#if defined(CONFIG_HVC_RISCV_SBI)
-       if (likely(early_console == NULL)) {
-               early_console = &riscv_sbi_early_console_dev;
-               register_console(early_console);
-       }
-#endif
-
 #ifdef CONFIG_CMDLINE_BOOL
 #ifdef CONFIG_CMDLINE_OVERRIDE
        strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
index b4a71ec5906f64089e7ec613daaade6bd3fef95a..6d3962435720d1ee7fe84bf74d50ffbe16d49219 100644 (file)
@@ -38,6 +38,13 @@ enum ipi_message_type {
        IPI_MAX
 };
 
+
+/* Unsupported */
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
 irqreturn_t handle_ipi(void)
 {
        unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -108,3 +115,51 @@ void smp_send_reschedule(int cpu)
 {
        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 }
+
+/*
+ * Performs an icache flush for the given MM context.  RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+       unsigned int cpu;
+       cpumask_t others, *mask;
+
+       preempt_disable();
+
+       /* Mark every hart's icache as needing a flush for this MM. */
+       mask = &mm->context.icache_stale_mask;
+       cpumask_setall(mask);
+       /* Flush this hart's I$ now, and mark it as flushed. */
+       cpu = smp_processor_id();
+       cpumask_clear_cpu(cpu, mask);
+       local_flush_icache_all();
+
+       /*
+        * Flush the I$ of other harts concurrently executing, and mark them as
+        * flushed.
+        */
+       cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+       local |= cpumask_empty(&others);
+       if (mm != current->active_mm || !local)
+               sbi_remote_fence_i(others.bits);
+       else {
+               /*
+                * It's assumed that at least one strongly ordered operation is
+                * performed on this hart between setting a hart's cpumask bit
+                * and scheduling this MM context on that hart.  Sending an SBI
+                * remote message will do this, but in the case where no
+                * messages are sent we still need to order this hart's writes
+                * with flush_icache_deferred().
+                */
+               smp_mb();
+       }
+
+       preempt_enable();
+}
index 4351be7d0533a6e3857169fd1de5d6cfdbd1bafc..79c78668258ede202086c072c63352ca14585903 100644 (file)
@@ -14,8 +14,8 @@
  */
 
 #include <linux/syscalls.h>
-#include <asm/cmpxchg.h>
 #include <asm/unistd.h>
+#include <asm/cacheflush.h>
 
 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
                           unsigned long prot, unsigned long flags,
@@ -47,3 +47,34 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
        return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
 }
 #endif /* !CONFIG_64BIT */
+
+#ifdef CONFIG_SMP
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * sys_riscv_flush_icache() is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
+       uintptr_t, flags)
+{
+       struct mm_struct *mm = current->mm;
+       bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
+
+       /* Check the reserved flags. */
+       if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
+               return -EINVAL;
+
+       flush_icache_mm(mm, local);
+
+       return 0;
+}
+#endif
index 4e30dc5fb593580e67eef62f8ea61c797c498cf6..a5bd6401f95e6988a376406f02b7e39cb7c1352e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/linkage.h>
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
+#include <asm/vdso.h>
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
@@ -22,4 +23,5 @@
 void *sys_call_table[__NR_syscalls] = {
        [0 ... __NR_syscalls - 1] = sys_ni_syscall,
 #include <asm/unistd.h>
+#include <asm/vdso-syscalls.h>
 };
index 523d0a8ac8db7f37750cf45496132d1156aceb38..324568d3392130fe5beba7bd03928a762848d377 100644 (file)
@@ -1,7 +1,12 @@
 # Copied from arch/tile/kernel/vdso/Makefile
 
 # Symbols present in the vdso
-vdso-syms = rt_sigreturn
+vdso-syms  = rt_sigreturn
+vdso-syms += gettimeofday
+vdso-syms += clock_gettime
+vdso-syms += clock_getres
+vdso-syms += getcpu
+vdso-syms += flush_icache
 
 # Files to link into the vdso
 obj-vdso = $(patsubst %, %.o, $(vdso-syms))
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
new file mode 100644 (file)
index 0000000..edf7e23
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
+ENTRY(__vdso_clock_getres)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_clock_getres
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
new file mode 100644 (file)
index 0000000..aac6567
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
+ENTRY(__vdso_clock_gettime)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_clock_gettime
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
new file mode 100644 (file)
index 0000000..b0fbad7
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <asm/vdso-syscalls.h>
+
+       .text
+/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
+ENTRY(__vdso_flush_icache)
+       .cfi_startproc
+#ifdef CONFIG_SMP
+       li a7, __NR_riscv_flush_icache
+       ecall
+#else
+       fence.i
+       li a0, 0
+#endif
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_flush_icache)
diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S
new file mode 100644 (file)
index 0000000..cc7e989
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
+ENTRY(__vdso_getcpu)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_getcpu
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_getcpu)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
new file mode 100644 (file)
index 0000000..da85d33
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+       .text
+/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
+ENTRY(__vdso_gettimeofday)
+       .cfi_startproc
+       /* For now, just do the syscall. */
+       li a7, __NR_gettimeofday
+       ecall
+       ret
+       .cfi_endproc
+ENDPROC(__vdso_gettimeofday)
index 8c9dce95c11d4f472c4b555c644c8caa63294345..cd1d47e0724ba0bd811101e0fa0eb59f7841bd5c 100644 (file)
@@ -70,8 +70,11 @@ VERSION
        LINUX_4.15 {
        global:
                __vdso_rt_sigreturn;
-               __vdso_cmpxchg32;
-               __vdso_cmpxchg64;
+               __vdso_gettimeofday;
+               __vdso_clock_gettime;
+               __vdso_clock_getres;
+               __vdso_getcpu;
+               __vdso_flush_icache;
        local: *;
        };
 }
index 1cc4ac3964b4c124864b72f3c807560d28df1461..dce8ae24c6d33b4ca3ebd0c19c3650f1ce890bde 100644 (file)
@@ -84,6 +84,7 @@ void __delay(unsigned long cycles)
        while ((unsigned long)(get_cycles() - t0) < cycles)
                cpu_relax();
 }
+EXPORT_SYMBOL(__delay);
 
 void udelay(unsigned long usecs)
 {
index 81f7d9ce6d881df0da157b0913275f49517495d4..eb22ab49b3e008ec4ab677778302d5dbbea358b1 100644 (file)
@@ -2,3 +2,4 @@ obj-y += init.o
 obj-y += fault.o
 obj-y += extable.o
 obj-y += ioremap.o
+obj-y += cacheflush.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
new file mode 100644 (file)
index 0000000..498c0a0
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+void flush_icache_pte(pte_t pte)
+{
+       struct page *page = pte_page(pte);
+
+       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+               flush_icache_all();
+}
index e99194a4077ec2397aa978732e8554a2b303a6b4..70ef2724cdf61e5b2001f0ec6243b7f5e9c6bfaa 100644 (file)
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(ioremap);
  *
  * Caller must ensure there is only one unmapping for the same pointer.
  */
-void iounmap(void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
 {
        vunmap((void *)((unsigned long)addr & PAGE_MASK));
 }
index eae2c64cf69d1cc8d6d4d920d8ae4ec18b520bb4..9fdff3fe1a42aac7414c06ccea3018399906fd42 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 obj-y                          += kernel/
 obj-y                          += mm/
 obj-$(CONFIG_KVM)              += kvm/
index 6b3f41985f28e17763b0f71973d56186ed28c7dc..de54cfc6109d833017b29b6e115549aad2601d53 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # s390/Makefile
 #
@@ -6,10 +7,6 @@
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1994 by Linus Torvalds
 #
 
index 99f1cf071304bd7686d1c26c5a7416207c802b13..b06def4a4f2f9955d9bb31c7950eeca0fce92af1 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the Linux - z/VM Monitor Stream.
 #
index ef3fb1b9201f0331d333dc991af47c84597292a7..cb6e8066b1ad64b1a65ee441e3c1fb53e5599a8b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  * Exports appldata_register_ops() and appldata_unregister_ops() for the
index 598df5708501734307565d3d7ba7c2e8b3472f6e..e68136c3c23aa467a9e10b362888023abe9559d4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects data related to memory management.
index 66037d2622b4075c708068a414aa4b346f8740c1..8bc14b0d1def0a6847437f7ade9a0abc4217b770 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects accumulated network statistics (Packets received/transmitted,
index 45b3178200abc184ef790458d7d9c44d717379df..433a994b1a89ef30861e86559d1849d8f6d01a16 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects misc. OS related data (CPU utilization, running processes).
index f02382ae5c48b1cd319a4cb1e755fa060f03cfdc..42a242597f346345c50d9be5422e74db79ad8496 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 SECTIONS
 {
   .rodata.compressed : {
index aed3069699bd5abf94ffbeb71a7db756b210b12f..bed227f267ae52aab3a02ec5e8a0f01767896a10 100644 (file)
@@ -1,11 +1,8 @@
 #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
 #
 # arch/s390x/boot/install.sh
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1995 by Linus Torvalds
 #
 # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
index b48e20dd94e96a52f845782c2dd134b015794004..d60798737d8669ea447ae0905b98abf64e85aea6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
  *             Harald Freudenberger <freude@de.ibm.com>
  *
  * Derived from "crypto/aes_generic.c"
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #define KMSG_COMPONENT "aes_s390"
index 36aefc07d10cda9e9b28705d057952558d86e8d3..8720e9203ecfb07beac7c878aedfc925e2b0db1b 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 arch random implementation.
  *
  * Copyright IBM Corp. 2017
  * Author(s): Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #include <linux/kernel.h>
index 992e630c227b58febc6e489f25e439979a2df4ca..436865926c26e00b0652c5330f45dd7365962703 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Crypto-API module for CRC-32 algorithms implemented with the
  * z/Architecture Vector Extension Facility.
index 0d296662bbf0aba03dbaa79de182114e315b6184..5346b5a80bb6c1bfd805b421e7fcf05e86157a12 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * Copyright IBM Corp. 2003, 2011
  * Author(s): Thomas Spatzier
  *           Jan Glauber (jan.glauber@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include <linux/init.h>
index 564616d48d8bd885ce31c232843f02bbed1d2b3f..3b7f96c9eead8f994e979603216fd0d190fc9e42 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
index a4e903ed7e21c0ccc7ceb66944ac6b43662003ea..003932db8d12d04bfc479d17d9a6677068059293 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
@@ -7,11 +8,6 @@
  *   Copyright IBM Corp. 2017
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *             Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "paes_s390"
index 3e47c4a0f18b346f4ce9eb58ddb4928bcfe517a3..a97a1802cfb4d37d6e0ef5b9a9305aaf45febff7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2006, 2015
  * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
index 10f2007900790919f41ac6f3d3bd425c9decebb6..d6f8258b44df381943b8c570883ace6484363edb 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #ifndef _CRYPTO_ARCH_S390_SHA_H
 #define _CRYPTO_ARCH_S390_SHA_H
index c7de53d8da7553d58c797d7d43de6b23101aadf6..a00c17f761c190269058d2052cc1cf40758bb0c8 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
  *   Copyright (c) Alan Smithee.
  *   Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
  *   Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
index 53c277999a2866b3aea6e9cc412ab6fc55aeb124..944aa6b237cd828fa17356b2eb961be87b57bbab 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * s390 Version:
  *   Copyright IBM Corp. 2005, 2011
  *   Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
index 2f4caa1ef123d1e6ce1bbf332ba045510e12ae07..b17eded532b121a76ddab6a8e9227a92fcbbfef6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
index c740f77285b2a6cf9d468b84a4c357cc8b6eba17..cf0718d121bcbb02f035f39474481947e447cf0e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #include <crypto/internal/hash.h>
index 2ee25ba252d68ab98efdcd7a525ae0b29778950d..06f601509ce983bf250ac58cf2e15c902f75b7b8 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the linux hypfs filesystem routines.
 #
index cf8a2d92467f363a6a2195f77a6d07dff2b5d3d0..43bbe63e2992c1ef525597bf4475b5f4c26d134b 100644 (file)
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *    Hypervisor filesystem for Linux on s390.
  *
  *    Copyright IBM Corp. 2006, 2008
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
- *    License: GPL
  */
 
 #define KMSG_COMPONENT "hypfs"
index 41c211a4d8b17023ff2fde819d8c103527c09878..0484508693287feebdaf10ffb289161e1c9f2f50 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 generic-y += asm-offsets.h
 generic-y += cacheflush.h
 generic-y += clkdev.h
index a72002056b54848103fc626338fee956c145bfc9..c2cf7bcdef9b7491a6b0c3dbc0998f0686625a51 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_S390_ALTERNATIVE_H
 #define _ASM_S390_ALTERNATIVE_H
 
index c02f4aba88a6220bfc1bc7714632d52676aa5bef..cfce6835b109fd88e74d24af3022afdc01ddc5d3 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Adjunct processor (AP) interfaces
  *
  * Copyright IBM Corp. 2017
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  * Author(s): Tony Krowiak <akrowia@linux.vnet.ibm.com>
  *           Martin Schwidefsky <schwidefsky@de.ibm.com>
  *           Harald Freudenberger <freude@de.ibm.com>
index 0f5bd894f4dcfcbac666e3c5521c52c960d60623..aa42a179be33a5f92592c4099f38dc951e7a7abf 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  *  S390 version
  *    Copyright IBM Corp. 1999
index 792cda339af1ae3ad25ce8fc0457f4db961a4b8a..dd08db491b89e149fc7894a5f77578db8fe950d6 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * CPU-measurement facilities
  *
  *  Copyright IBM Corp. 2012
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *            Jan Glauber <jang@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #ifndef _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H
index 9a3cb3983c0140110f791c14ea0dbdb8cd84159d..1a61b1b997f2a0da08882411a8b701780fffba9a 100644 (file)
@@ -194,13 +194,14 @@ struct arch_elf_state {
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
-/*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
-#define ELF_ET_DYN_BASE                (is_compat_task() ? 0x000400000UL : \
-                                                   0x100000000UL)
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk. 64-bit
+   tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
+                               (STACK_TOP / 3 * 2) : \
+                               (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
index 921391f2341eb8c4e886ee1bb4b67554f2d35d91..13de80cf741c09d94a4996a5ca18d883cbe0eba7 100644 (file)
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 #ifndef _ASM_S390_KPROBES_H
 #define _ASM_S390_KPROBES_H
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
index f3a9b5a445b64382c1020099b6dbcec5d9f30ede..e14f381757f67b6c0111c78c491c2c1078a7f177 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for kernel virtual machines on s390
  *
  * Copyright IBM Corp. 2008, 2009
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
index 41393052ac57e1966ca735295be5f20f58ac3c55..74eeec9c0a809bffecbb26f3875ffc5be62e7e3b 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for paravirtual devices on s390
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
 /*
@@ -20,8 +17,6 @@
  *
  * Copyright IBM Corp. 2007,2008
  * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
  */
 #ifndef __S390_KVM_PARA_H
 #define __S390_KVM_PARA_H
index 6de5c6cb0061a337d251fbabfe6d8a1c3769682d..672f95b12d4065b4fa023444dbd5575ca71d767e 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * livepatch.h - s390-specific Kernel Live Patching Core
  *
@@ -7,13 +8,6 @@
  *           Jiri Slaby
  */
 
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
 #ifndef ASM_LIVEPATCH_H
 #define ASM_LIVEPATCH_H
 
index f4a07f788f78b3160f9ae312e699805b47f67ebe..65154eaa3714a4e9182cb87654e7b896e7be3e2f 100644 (file)
@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste ||
                test_thread_flag(TIF_PGSTE) ||
-               current->mm->context.alloc_pgste;
+               (current->mm && current->mm->context.alloc_pgste);
        mm->context.has_pgste = 0;
        mm->context.use_skey = 0;
        mm->context.use_cmma = 0;
index d6c9d1e0dc2d4bc0fe36a46109211f93682e5cfd..b9c0e361748bb46eb5dddb60f79e5de27eef51b1 100644 (file)
@@ -40,6 +40,7 @@ struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs) perf_misc_flags(regs)
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
 
 /* Perf pt_regs extension for sample-data-entry indicators */
 struct perf_sf_sde_regs {
index d7fe9838084d3b2df31b26d16d6d3f4074d9a35d..0a6b0286c32e9e0a7283d9cfb66dc357fe2e36fa 100644 (file)
@@ -709,7 +709,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
index a3788dafc0e1f2272abd0ba9c455b775e81f627a..6f70d81c40f239fde907795707c2e8ebc2bb9d47 100644 (file)
@@ -74,9 +74,14 @@ enum {
  */
 struct pt_regs 
 {
-       unsigned long args[1];
-       psw_t psw;
-       unsigned long gprs[NUM_GPRS];
+       union {
+               user_pt_regs user_regs;
+               struct {
+                       unsigned long args[1];
+                       psw_t psw;
+                       unsigned long gprs[NUM_GPRS];
+               };
+       };
        unsigned long orig_gpr2;
        unsigned int int_code;
        unsigned int int_parm;
index 8bfce3475b1c46715d6b82f1d911e82054269b86..97a0582b8d0f175942034895789c8988f24934a5 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_SEGMENT_H
 #define _ASM_SEGMENT_H
 
index ec7b476c1ac571b82e219a786ad2525a0ff9ceb2..c61b2cc1a8a86a9508fe093a607d106ff0118be3 100644 (file)
@@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
        asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
 }
 
-#define switch_to(prev,next,last) do {                                 \
-       if (prev->mm) {                                                 \
-               save_fpu_regs();                                        \
-               save_access_regs(&prev->thread.acrs[0]);                \
-               save_ri_cb(prev->thread.ri_cb);                         \
-               save_gs_cb(prev->thread.gs_cb);                         \
-       }                                                               \
+#define switch_to(prev, next, last) do {                               \
+       /* save_fpu_regs() sets the CIF_FPU flag, which enforces        \
+        * a restore of the floating point / vector registers as        \
+        * soon as the next task returns to user space                  \
+        */                                                             \
+       save_fpu_regs();                                                \
+       save_access_regs(&prev->thread.acrs[0]);                        \
+       save_ri_cb(prev->thread.ri_cb);                                 \
+       save_gs_cb(prev->thread.gs_cb);                                 \
        update_cr_regs(next);                                           \
-       if (next->mm) {                                                 \
-               set_cpu_flag(CIF_FPU);                                  \
-               restore_access_regs(&next->thread.acrs[0]);             \
-               restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);  \
-               restore_gs_cb(next->thread.gs_cb);                      \
-       }                                                               \
-       prev = __switch_to(prev,next);                                  \
+       restore_access_regs(&next->thread.acrs[0]);                     \
+       restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);          \
+       restore_gs_cb(next->thread.gs_cb);                              \
+       prev = __switch_to(prev, next);                                 \
 } while (0)
 
 #endif /* __ASM_SWITCH_TO_H */
index 6bc941be6921773f566efd701a213ef44793a793..96f9a9151fde02fc6f76633d76d292f47512d364 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Access to user system call parameters and results
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef _ASM_SYSCALL_H
index a702cb9d4269240c462764878b50e02971f5d8ae..25057c118d563d46f9b45a637670c1d124a88509 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for store system information stsi
  *
  * Copyright IBM Corp. 2001, 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Ulrich Weigand <weigand@de.ibm.com>
  *              Christian Borntraeger <borntraeger@de.ibm.com>
  */
index 1807229b292f005a4a0658ac221c3cb0481a39bf..cca406fdbe51fcf9985320c10988a02025b099bc 100644 (file)
@@ -53,6 +53,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
 static inline void topology_init_early(void) { }
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
 static inline void topology_expect_change(void) { }
 
 #endif /* CONFIG_SCHED_TOPOLOGY */
index d375526c261f19a99053e7e3e06029ecde7c1983..605dc46bac5e0d182268e33246983e12b48bc41a 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_S390_VGA_H
 #define _ASM_S390_VGA_H
 
index 098f28778a13408e220e7d3b0a8a657e7e508c69..92b7c9b3e6417b50515f2cf4cdc3b4e3f3b7042e 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..cefe7c7
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 9ad172dcd912d5763b0bf954617c9e398ad31aa8..38535a57fef8327c3b08bf20e1f8621fd93c776a 100644 (file)
@@ -6,10 +6,6 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index 0dc86b3a7cb0d6340d9de5bee6032f6bfcc0c85c..b9ab584adf43d71232ce44414e51413423fbf673 100644 (file)
@@ -4,9 +4,5 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
index c36c97ffdc6fa24f246c41bfe993790410e4e032..84606b8cc49e47c794fb3b16ef5681de098bfc6f 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright 2014 IBM Corp.
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef __LINUX_KVM_PERF_S390_H
index 7c8564f98205a4b65163865ee1743c0c09bc3a92..d17dd9e5d51638f08ee44ad3e94d840ea0fc568d 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _ASM_S390_PERF_REGS_H
 #define _ASM_S390_PERF_REGS_H
 
index 0d23c8ff290085b43745fdc23cd18a2911624aca..543dd70e12c81d59a9987c437f1895a216c2525b 100644 (file)
 #define GPR_SIZE       8
 #define CR_SIZE                8
 
-#define STACK_FRAME_OVERHEAD    160      /* size of minimum stack frame */
+#define STACK_FRAME_OVERHEAD   160      /* size of minimum stack frame */
 
 #endif /* __s390x__ */
 
 #define ACR_SIZE       4
 
 
-#define PTRACE_OLDSETOPTIONS         21
+#define PTRACE_OLDSETOPTIONS        21
 
 #ifndef __ASSEMBLY__
 #include <linux/stddef.h>
 #include <linux/types.h>
 
-typedef union
-{
-       float   f;
-       double  d;
-        __u64   ui;
+typedef union {
+       float   f;
+       double  d;
+       __u64   ui;
        struct
        {
                __u32 hi;
@@ -197,23 +196,21 @@ typedef union
        } fp;
 } freg_t;
 
-typedef struct
-{
-       __u32   fpc;
+typedef struct {
+       __u32   fpc;
        __u32   pad;
-       freg_t  fprs[NUM_FPRS];              
+       freg_t  fprs[NUM_FPRS];
 } s390_fp_regs;
 
-#define FPC_EXCEPTION_MASK      0xF8000000
-#define FPC_FLAGS_MASK          0x00F80000
-#define FPC_DXC_MASK            0x0000FF00
-#define FPC_RM_MASK             0x00000003
+#define FPC_EXCEPTION_MASK     0xF8000000
+#define FPC_FLAGS_MASK         0x00F80000
+#define FPC_DXC_MASK           0x0000FF00
+#define FPC_RM_MASK            0x00000003
 
 /* this typedef defines how a Program Status Word looks like */
-typedef struct 
-{
-        unsigned long mask;
-        unsigned long addr;
+typedef struct {
+       unsigned long mask;
+       unsigned long addr;
 } __attribute__ ((aligned(8))) psw_t;
 
 #ifndef __s390x__
@@ -282,33 +279,40 @@ typedef struct
 /*
  * The s390_regs structure is used to define the elf_gregset_t.
  */
-typedef struct
-{
+typedef struct {
        psw_t psw;
        unsigned long gprs[NUM_GPRS];
        unsigned int  acrs[NUM_ACRS];
        unsigned long orig_gpr2;
 } s390_regs;
 
+/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+       unsigned long args[1];
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
 /*
  * Now for the user space program event recording (trace) definitions.
  * The following structures are used only for the ptrace interface, don't
  * touch or even look at it if you don't want to modify the user-space
  * ptrace interface. In particular stay away from it for in-kernel PER.
  */
-typedef struct
-{
+typedef struct {
        unsigned long cr[NUM_CR_WORDS];
 } per_cr_words;
 
 #define PER_EM_MASK 0xE8000000UL
 
-typedef        struct
-{
+typedef struct {
 #ifdef __s390x__
-       unsigned                       : 32;
+       unsigned                       : 32;
 #endif /* __s390x__ */
-       unsigned em_branching          : 1;
+       unsigned em_branching          : 1;
        unsigned em_instruction_fetch  : 1;
        /*
         * Switching on storage alteration automatically fixes
@@ -317,44 +321,41 @@ typedef   struct
        unsigned em_storage_alteration : 1;
        unsigned em_gpr_alt_unused     : 1;
        unsigned em_store_real_address : 1;
-       unsigned                       : 3;
+       unsigned                       : 3;
        unsigned branch_addr_ctl       : 1;
-       unsigned                       : 1;
+       unsigned                       : 1;
        unsigned storage_alt_space_ctl : 1;
-       unsigned                       : 21;
+       unsigned                       : 21;
        unsigned long starting_addr;
        unsigned long ending_addr;
 } per_cr_bits;
 
-typedef struct
-{
+typedef struct {
        unsigned short perc_atmid;
        unsigned long address;
        unsigned char access_id;
 } per_lowcore_words;
 
-typedef struct
-{
-       unsigned perc_branching          : 1;
+typedef struct {
+       unsigned perc_branching          : 1;
        unsigned perc_instruction_fetch  : 1;
        unsigned perc_storage_alteration : 1;
-       unsigned perc_gpr_alt_unused     : 1;
+       unsigned perc_gpr_alt_unused     : 1;
        unsigned perc_store_real_address : 1;
-       unsigned                         : 3;
-       unsigned atmid_psw_bit_31        : 1;
-       unsigned atmid_validity_bit      : 1;
-       unsigned atmid_psw_bit_32        : 1;
-       unsigned atmid_psw_bit_5         : 1;
-       unsigned atmid_psw_bit_16        : 1;
-       unsigned atmid_psw_bit_17        : 1;
-       unsigned si                      : 2;
+       unsigned                         : 3;
+       unsigned atmid_psw_bit_31        : 1;
+       unsigned atmid_validity_bit      : 1;
+       unsigned atmid_psw_bit_32        : 1;
+       unsigned atmid_psw_bit_5         : 1;
+       unsigned atmid_psw_bit_16        : 1;
+       unsigned atmid_psw_bit_17        : 1;
+       unsigned si                      : 2;
        unsigned long address;
-       unsigned                         : 4;
-       unsigned access_id               : 4;
+       unsigned                         : 4;
+       unsigned access_id               : 4;
 } per_lowcore_bits;
 
-typedef struct
-{
+typedef struct {
        union {
                per_cr_words   words;
                per_cr_bits    bits;
@@ -364,9 +365,9 @@ typedef struct
         * the kernel always sets them to zero. To enable single
         * stepping use ptrace(PTRACE_SINGLESTEP) instead.
         */
-       unsigned  single_step       : 1;
+       unsigned  single_step       : 1;
        unsigned  instruction_fetch : 1;
-       unsigned                    : 30;
+       unsigned                    : 30;
        /*
         * These addresses are copied into cr10 & cr11 if single
         * stepping is switched off
@@ -376,11 +377,10 @@ typedef struct
        union {
                per_lowcore_words words;
                per_lowcore_bits  bits;
-       } lowcore; 
+       } lowcore;
 } per_struct;
 
-typedef struct
-{
+typedef struct {
        unsigned int  len;
        unsigned long kernel_addr;
        unsigned long process_addr;
@@ -390,12 +390,12 @@ typedef struct
  * S/390 specific non posix ptrace requests. I chose unusual values so
  * they are unlikely to clash with future ptrace definitions.
  */
-#define PTRACE_PEEKUSR_AREA           0x5000
-#define PTRACE_POKEUSR_AREA           0x5001
+#define PTRACE_PEEKUSR_AREA          0x5000
+#define PTRACE_POKEUSR_AREA          0x5001
 #define PTRACE_PEEKTEXT_AREA         0x5002
 #define PTRACE_PEEKDATA_AREA         0x5003
 #define PTRACE_POKETEXT_AREA         0x5004
-#define PTRACE_POKEDATA_AREA         0x5005
+#define PTRACE_POKEDATA_AREA         0x5005
 #define PTRACE_GET_LAST_BREAK        0x5006
 #define PTRACE_PEEK_SYSTEM_CALL       0x5007
 #define PTRACE_POKE_SYSTEM_CALL              0x5008
@@ -413,21 +413,19 @@ typedef struct
  * PT_PROT definition is loosely based on hppa bsd definition in
  * gdb/hppab-nat.c
  */
-#define PTRACE_PROT                       21
+#define PTRACE_PROT                      21
 
-typedef enum
-{
+typedef enum {
        ptprot_set_access_watchpoint,
        ptprot_set_write_watchpoint,
        ptprot_disable_watchpoint
 } ptprot_flags;
 
-typedef struct
-{
+typedef struct {
        unsigned long lowaddr;
        unsigned long hiaddr;
        ptprot_flags prot;
-} ptprot_area;                     
+} ptprot_area;
 
 /* Sequence of bytes for breakpoint illegal instruction.  */
 #define S390_BREAKPOINT     {0x0,0x1}
@@ -439,8 +437,7 @@ typedef struct
  * The user_regs_struct defines the way the user registers are
  * store on the stack for signal handling.
  */
-struct user_regs_struct
-{
+struct user_regs_struct {
        psw_t psw;
        unsigned long gprs[NUM_GPRS];
        unsigned int  acrs[NUM_ACRS];
index ec113db4eb7e2658909f80f35bc5679cf53b3189..b1b0223169839da11aeb00884d7dc901e4ab5329 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _UAPI_ASM_STHYI_H
 #define _UAPI_ASM_STHYI_H
 
index 967aad39010515cd9614e51529517712630bbf4e..2b605f7e8483675cde7cdf2e553e068afde0f950 100644 (file)
@@ -1,13 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
 /*
  * Definitions for virtio-ccw devices.
  *
  * Copyright IBM Corp. 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *  Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 #ifndef __KVM_VIRTIO_CCW_H
index 4caf71714a552332a169cb6799288442984ca088..aeaaa030030e6c9869e0d93b51232176b4845ef2 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
  * Copyright IBM Corp. 2004, 2005
  * Interface implementation for communication with the z/VM control program
index 137ef473584ee5e2c6e42f70857cadd4e90df3b6..d568307321fcc54f51c1b3609dffc365cdc3a65d 100644 (file)
@@ -9,20 +9,6 @@
  *            Eric Rossman (edrossma@us.ibm.com)
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef __ASM_S390_ZCRYPT_H
index 315986a06cf57f2c768b4c1a549a794af7952b46..574e77622c049c68e557d1a7413e6c161b457fb8 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 #include <linux/module.h>
 #include <asm/alternative.h>
 #include <asm/facility.h>
index f04db3779b34507f9dd38791fc89131505d5f0c3..59eea9c65d3e9e8595d509001b1c794420060887 100644 (file)
@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
                return retval;
        }
 
+       groups_sort(group_info);
        retval = set_current_groups(group_info);
        put_group_info(group_info);
 
index 58b9e127b61517c3f1cbe41a76757f7db156df76..80e974adb9e8be39d4ab558495eb69d110a2b3a3 100644 (file)
@@ -1392,7 +1392,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
        else
                except_str = "-";
        caller = (unsigned long) entry->caller;
-       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p  ",
+       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK  ",
                      area, sec, usec, level, except_str,
                      entry->id.fields.cpuid, (void *)caller);
        return rc;
index 3be829721cf948adc5349a342f04073a90dcf20e..b2c68fbf26346a3e9d6e626333a34c7c374bef31 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Disassemble s390 instructions.
  *
@@ -396,9 +397,14 @@ struct s390_insn *find_insn(unsigned char *code)
        unsigned char opfrag;
        int i;
 
+       /* Search the opcode offset table to find an entry which
+        * matches the beginning of the opcode. If there is no match
+        * the last entry will be used, which is the default entry for
+        * unknown instructions as well as 1-byte opcode instructions.
+        */
        for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
                entry = &opcode_offset[i];
-               if (entry->opcode == code[0] || entry->opcode == 0)
+               if (entry->opcode == code[0])
                        break;
        }
 
@@ -543,7 +549,7 @@ void show_code(struct pt_regs *regs)
                start += opsize;
                pr_cont("%s", buffer);
                ptr = buffer;
-               ptr += sprintf(ptr, "\n\t  ");
+               ptr += sprintf(ptr, "\n          ");
                hops++;
        }
        pr_cont("\n");
index 2aa545dca4d53c1c5b2bb7c6bdc18db10fd37930..5b23c4f6e50cd452177477105b914774d67898ad 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack dumping functions
  *
index a316cd6999ad9712defdf46db85e16eb429aebcb..9e5f6cd8e4c2e443a2c7fb46792157933447d170 100644 (file)
@@ -180,18 +180,17 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
  */
 ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       lgr     %r1,%r2
-       aghi    %r1,__TASK_thread               # thread_struct of prev task
-       lg      %r5,__TASK_stack(%r3)           # start of kernel stack of next
-       stg     %r15,__THREAD_ksp(%r1)          # store kernel stack of prev
-       lgr     %r1,%r3
-       aghi    %r1,__TASK_thread               # thread_struct of next task
+       lghi    %r4,__TASK_stack
+       lghi    %r1,__TASK_thread
+       lg      %r5,0(%r4,%r3)                  # start of kernel stack of next
+       stg     %r15,__THREAD_ksp(%r1,%r2)      # store kernel stack of prev
        lgr     %r15,%r5
        aghi    %r15,STACK_INIT                 # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
-       lg      %r15,__THREAD_ksp(%r1)          # load kernel stack of next
-       mvc     __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+       lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
+       aghi    %r3,__TASK_pid
+       mvc     __LC_CURRENT_PID(4,%r0),0(%r3)  # store pid of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
        bzr     %r14
index 310e59e6eb4b20bb7f17debb8ec412546289baf8..8ecb8726ac4762582a6ced188444ed14ea48cf46 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    ipl/reipl/dump support for Linux on s390.
  *
index 1a6521af17514a722c02e1b032fe3514d6857a15..af3722c28fd961283ff31c578d5329b4ee74fa60 100644 (file)
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
index bf9622f0e6b16aa291037d880f8ffe3ec75c1046..452502f9a0d986d4f8f97295143146cc26569c30 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux Guest Relocation (LGR) detection
  *
index 7b87991416fd6d882e7edf3f52b6f4af6fc45ad3..b7abfad4fd7df5583b19867dda1f81d22b81dcaa 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel module help for s390.
  *
@@ -8,20 +9,6 @@
  *
  *  based on i386 version
  *    Copyright (C) 2001 Rusty Russell.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/module.h>
 #include <linux/elf.h>
index 6ff169253caeea0be88da521b84b796a72accb6e..c7a627620e5ebc4a7050167f15201abcad3eaf6e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   Machine check handler
  *
index 746d034233336f3804923a9340288d0f595f0211..cc085e2d2ce9907690fbe0912dd301ab44e8171d 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x - CPU-measurement Counter Facility
  *
  *  Copyright IBM Corp. 2012, 2017
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_cf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 227b38bd82c94f211392348ec03dd146549d19c4..1c9ddd7aa5ec8fd32ee626d036a3c3ea6ed79362 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for the System z CPU-measurement Sampling Facility
  *
  * Copyright IBM Corp. 2013
  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_sf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 93a386f4a3b5a4533e9b0d52c1db2548a72e20ef..0d770e513abf404ff2cea26f7c01931b4d60cf4b 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x
  *
  *  Copyright IBM Corp. 2012, 2013
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "perf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index f8603ebed669b6501de788e5c2c6cbb902023f88..54e2d634b849e128c1ec1d1ccc15c600978ed50b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 #include <linux/perf_event.h>
 #include <linux/perf_regs.h>
 #include <linux/kernel.h>
index 26c0523c14882d1b967ef8cb81a5cb6b387017a2..cd3df5514552cc262dee1d1b99260d0ee089668b 100644 (file)
@@ -1650,6 +1650,14 @@ static const struct user_regset s390_compat_regsets[] = {
                .get = s390_gs_cb_get,
                .set = s390_gs_cb_set,
        },
+       {
+               .core_note_type = NT_S390_GS_BC,
+               .n = sizeof(struct gs_cb) / sizeof(__u64),
+               .size = sizeof(__u64),
+               .align = sizeof(__u64),
+               .get = s390_gs_bc_get,
+               .set = s390_gs_bc_set,
+       },
        {
                .core_note_type = NT_S390_RI_CB,
                .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
index 090053cf279bb1d7077082dcc1a90ababb716478..793da97f9a6e53716e415bbc5e68cf844ce4cb1d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  S390 version
  *    Copyright IBM Corp. 1999, 2012
index cd4334e80b64cdb7908842f44015183be17c62f2..b8c1a85bcf2de75eccba0b5b86eb2870bd4b71b6 100644 (file)
@@ -55,6 +55,7 @@
 #include <asm/sigp.h>
 #include <asm/idle.h>
 #include <asm/nmi.h>
+#include <asm/topology.h>
 #include "entry.h"
 
 enum {
index e66687dc61446dc929c4450a7c887740dca11595..460dcfba7d4ec08db7de61942ea387ef38579a99 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack trace management functions
  *
index 12981e197f0125dcaea9f0bcb13f49207be00cec..80b862e9c53c6b108e611935ab8dd7c9b794ff13 100644 (file)
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * store hypervisor information instruction emulation functions.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  * Copyright IBM Corp. 2016
  * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
  */
index 308a7b63348b3f1950c979dd947d22466f89059f..f7fc633855534cbc0ab53c0d00f26aed59c1a1ad 100644 (file)
@@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
 SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
 SYSCALL(sys_socket,sys_socket)
 SYSCALL(sys_socketpair,compat_sys_socketpair)          /* 360 */
-SYSCALL(sys_bind,sys_bind)
-SYSCALL(sys_connect,sys_connect)
+SYSCALL(sys_bind,compat_sys_bind)
+SYSCALL(sys_connect,compat_sys_connect)
 SYSCALL(sys_listen,sys_listen)
-SYSCALL(sys_accept4,sys_accept4)
+SYSCALL(sys_accept4,compat_sys_accept4)
 SYSCALL(sys_getsockopt,compat_sys_getsockopt)          /* 365 */
 SYSCALL(sys_setsockopt,compat_sys_setsockopt)
 SYSCALL(sys_getsockname,compat_sys_getsockname)
index 5cbd52169348faf9c09bc258c2cd0072e0089ee0..cf561160ea887f9b6395e1d5ec30f9ee02aba77d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Time of day based timer functions.
  *
@@ -523,7 +524,7 @@ static void __init stp_reset(void)
        }
 }
 
-static void stp_timeout(unsigned long dummy)
+static void stp_timeout(struct timer_list *unused)
 {
        queue_work(time_sync_wq, &stp_work);
 }
@@ -532,7 +533,7 @@ static int __init stp_init(void)
 {
        if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
                return 0;
-       setup_timer(&stp_timer, stp_timeout, 0UL);
+       timer_setup(&stp_timer, stp_timeout, 0);
        time_init_wq();
        if (!stp_online)
                return 0;
index f9b393d4a078365ff8f83db8feaae839a20e0274..4d5b65e527b5495f17598ea20eb22bfb2c5ff754 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
index 39a218703c50add3defe9aa2ce5a85f88188e776..f3a1c7c6824ef0da8933fbb64864fdf5b97bd99a 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * vdso setup for s390
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #include <linux/init.h>
index eca3f001f081309c88372de459a6dfcccd754e7d..f61df5253c23c55ed26b357a645b16f4ab2e26a4 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index a5769b83d90e687f08175af96ded9a4893ebd2d2..2d6ec3abe095ea30bac898e85fd4cf4faf06bbe5 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 63b86dceb0bfec0f72886d96230e5248620f8728..aa8bf13a2edb1f77c861ed6de0acc2a2836e26fa 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index c8513deb8c663f51fa4f4bc2b2299ea24f1aaefd..faf5213b15dfae9536f2583b601c1b4610558249 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 5d7b56b49458d03ba885f105a9c07bcdecea019a..6046b3bfca4622ea87bd1759bfa36e3f2d5b6d89 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index b02e62f3bc12d4ffb3850aff587d20ad944f4688..cc9dbc27da6fbcd22e865dd502be7f8bd515a11c 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 79a071e4357e4bc51f8813427d0f0ffa4c5ba56c..db19d0680a0afdf34b85eddf418a7e822dcad0a7 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
  * Here we can supply some information useful to userland.
index dd7178fbb4f3bd3f32955eeacf6e640f371e06a3..f24395a0191828ec7a638042632c69d4f4fcb939 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Virtual cpu timer based timer functions.
  *
index 6048b1c6e58062bac1258b003a4ebc3814896712..05ee90a5ea08bdb50eb3b9f2b77e70349f7e40bf 100644 (file)
@@ -1,10 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
 # Makefile for kernel virtual machines on s390
 #
 # Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
 
 KVM := ../../../virt/kvm
 common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
index d93a2c0474bf67e45698882ecb5f36f0d8688a7d..89aa114a2cbada0989cec25757ec93daed36d064 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling diagnose instructions
  *
  * Copyright IBM Corp. 2008, 2011
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index bec42b852246f0c44f88acf20ae36c51a36c3d20..f4c51756c46239cb4246ae7bd2e0aeb166d46383 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * access guest memory
  *
  * Copyright IBM Corp. 2008, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
index bcbd86621d018085d036f43d815ae1c97791951c..b5f3e82006d0b2e2d8806e8a7e5e58e559255e5b 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * kvm guest debug support
  *
  * Copyright IBM Corp. 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  */
 #include <linux/kvm_host.h>
index 8fe034beb623217f42bc990c58dea9984426ef30..9c7d707158622e7f0743570db60599fa95a9de3b 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * in-kernel handling for sie intercepts
  *
  * Copyright IBM Corp. 2008, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index fa557372d600a0283663635ff198895cfad91709..024ad8bcc51655e98ffed300817bc0e06e051cf7 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling kvm guest interrupts
  *
  * Copyright IBM Corp. 2008, 2015
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
index d98e4159643df457a4dfeb50e93ab77d53291698..484608c71dd01185b88f9db1a11e3f523bcefebf 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * s390 irqchip routines
  *
  * Copyright IBM Corp. 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 #ifndef __KVM_IRQ_H
index 98ad8b9e036093c8a784cfc0dfd3887e925c6357..ec8b68e97d3cd4755074463e467a82474471e7c6 100644 (file)
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * hosting zSeries kernel virtual machines
+ * hosting IBM Z kernel virtual machines (s390x)
  *
- * Copyright IBM Corp. 2008, 2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
+ * Copyright IBM Corp. 2008, 2017
  *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
@@ -3372,7 +3369,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int rc;
-       sigset_t sigsaved;
 
        if (kvm_run->immediate_exit)
                return -EINTR;
@@ -3382,8 +3378,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return 0;
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
                kvm_s390_vcpu_start(vcpu);
@@ -3417,8 +3412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        disable_cpu_timer_accounting(vcpu);
        store_regs(vcpu, kvm_run);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        vcpu->stat.exit_userspace++;
        return rc;
@@ -3811,6 +3805,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        r = -EINVAL;
                        break;
                }
+               /* do not use irq_state.flags, it will break old QEMUs */
                r = kvm_s390_set_irq_state(vcpu,
                                           (void __user *) irq_state.buf,
                                           irq_state.len);
@@ -3826,6 +3821,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        r = -EINVAL;
                        break;
                }
+               /* do not use irq_state.flags, it will break old QEMUs */
                r = kvm_s390_get_irq_state(vcpu,
                                           (__u8 __user *)  irq_state.buf,
                                           irq_state.len);
index 10d65dfbc306736bc31e1a7f363c2b48516c52e7..5e46ba429bcb4dfe4345f531339557b6af71ef40 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for kvm on s390
  *
  * Copyright IBM Corp. 2008, 2009
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
index c954ac49eee47158ac27bd1d16ba9dbcab7e25a3..572496c688cc0c647bd220310bfdc6e9635d4723 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling privileged instructions
  *
  * Copyright IBM Corp. 2008, 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
@@ -235,8 +232,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
                VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
                return -EAGAIN;
        }
-       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
-               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
        return 0;
 }
 
@@ -247,6 +242,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
        int reg1, reg2;
        int rc;
 
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
        rc = try_handle_skey(vcpu);
        if (rc)
                return rc != -EAGAIN ? rc : 0;
@@ -276,6 +274,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
        int reg1, reg2;
        int rc;
 
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
        rc = try_handle_skey(vcpu);
        if (rc)
                return rc != -EAGAIN ? rc : 0;
@@ -311,6 +312,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
        int reg1, reg2;
        int rc;
 
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
        rc = try_handle_skey(vcpu);
        if (rc)
                return rc != -EAGAIN ? rc : 0;
index 9d592ef4104b0416029944fed5770a8ea74a47cb..c1f5cde2c878e63e32d44a47a10e3c77ccfc32ae 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * handling interprocessor communication
  *
  * Copyright IBM Corp. 2008, 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
index a311938b63b3b4eea0dd57e80bd02c4708dfcd92..5d6ae0326d9e8fa2707e8d066488c8313dfef879 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * kvm nested virtualization support for s390x
  *
  * Copyright IBM Corp. 2016
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  */
 #include <linux/vmalloc.h>
index 2dbdcd85b68f200762846ce0c59dc39438d0f9e0..6cf024eb2085d86e6a729e3cfa94e80a50f02a38 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Collaborative memory management interface.
  *
@@ -56,10 +57,10 @@ static DEFINE_SPINLOCK(cmm_lock);
 
 static struct task_struct *cmm_thread_ptr;
 static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL);
 
-static void cmm_timer_fn(unsigned long);
+static void cmm_timer_fn(struct timer_list *);
 static void cmm_set_timer(void);
+static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
 
 static long cmm_alloc_pages(long nr, long *counter,
                            struct cmm_page_array **list)
@@ -194,13 +195,11 @@ static void cmm_set_timer(void)
                if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
                        return;
        }
-       cmm_timer.function = cmm_timer_fn;
-       cmm_timer.data = 0;
        cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
        add_timer(&cmm_timer);
 }
 
-static void cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(struct timer_list *unused)
 {
        long nr;
 
index b2c140193b0af72273ffcbafd78a4ee38417ca42..05d459b638f55d563d479eb978a5d60f0e421e1b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  KVM guest address space mapping code
  *
index 5bea139517a2edc21dc50074d2c2e9a94dabb19e..831bdcf407bbc1d2d76edc78e6a9f50aae1406cb 100644 (file)
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  flexible mmap layout support
  *
  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  * All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *
  * Started by Ingo Molnar <mingo@elte.hu>
  */
 
index 434a9564917beceadeffd0f34d041683b717af1c..cb364153c43ce204b8736fe1758e7bac8fda2a69 100644 (file)
@@ -83,8 +83,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
 
        /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
        VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
-       if (end >= TASK_SIZE_MAX)
-               return -ENOMEM;
        rc = 0;
        notify = 0;
        while (mm->context.asce_limit < end) {
index ae677f814bc07a406f7f996a81ed2db65718f5ff..4f2b65d01a70418c802d6ce33713101ec0e2909b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
index 90568c33ddb0ef72aaed02bcf4bcc15da9afef37..e0d5f245e42bc713443d5c6d09d9034850adbec6 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Arch-specific network modules
 #
index e81c16838b90f1bc9a5418bc1b4e5365e9cb0aef..9557d8b516df5a689dda995cd7fd501ddd6cf54c 100644 (file)
@@ -55,8 +55,7 @@ struct bpf_jit {
 #define SEEN_LITERAL   8       /* code uses literals */
 #define SEEN_FUNC      16      /* calls C functions */
 #define SEEN_TAIL_CALL 32      /* code uses tail calls */
-#define SEEN_SKB_CHANGE        64      /* code changes skb data */
-#define SEEN_REG_AX    128     /* code uses constant blinding */
+#define SEEN_REG_AX    64      /* code uses constant blinding */
 #define SEEN_STACK     (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
-       if (jit->seen & SEEN_SKB)
+       if (jit->seen & SEEN_SKB) {
                emit_load_skb_data_hlen(jit);
-       if (jit->seen & SEEN_SKB_CHANGE)
                /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
                EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
                              STK_OFF_SKBP);
+       }
 }
 
 /*
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                EMIT2(0x0d00, REG_14, REG_W1);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
-               if (bpf_helper_changes_pkt_data((void *)func)) {
-                       jit->seen |= SEEN_SKB_CHANGE;
+               if ((jit->seen & SEEN_SKB) &&
+                   bpf_helper_changes_pkt_data((void *)func)) {
                        /* lg %b1,ST_OFF_SKBP(%r15) */
                        EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
                                      REG_15, STK_OFF_SKBP);
index f94ecaffa71bb543f0d90c9340d7bee23739fe85..66c2dff74895f8ef5924e3a19606e18b7029a882 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 obj-y                  += numa.o
 obj-y                  += toptree.o
 obj-$(CONFIG_NUMA_EMU) += mode_emu.o
index 805d8b29193a5964b2d3d6edb617f94a9b37e905..22d0871291eef12438398207cb06e964b14ad73d 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the s390 PCI subsystem.
 #
index 0fe649c0d5423a2ed51fcff4dc7d011204fdf4e9..4902fed221c0effa59ff21fedabdda152641d112 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
index c2f786f0ea0688c5fb9c36659ff66fb8140e6f2b..b482e95b6249e380dfb39d89253789c61dedb1e3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012,2015
  *
index 0d300ee00f4e95b987884bbeb0fa1fce1bee7b59..f7aa5a77827ec17d893d59834a0d33082bb8fd82 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
index 81b840bc6e4e733064d20f309bb57f24820f01cb..19bcb3b45a70fc12fa426d636fd4482c570c6654 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 specific pci instructions
  *
index 01d4c5a4bfe9781fdba3ccfd4e727e518892815f..357d42681cefae0e8c7e40d500bdb5b452d9173e 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Generate opcode table initializers for the in-kernel disassembler.
  *
index c94ee54210bc489efd469493800596cd2b7061a3..81271d3af47cb1000ebfab2539efa92080a1eccc 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y      += bpf_perf_event.h
 generic-y      += siginfo.h
index c6d96049a0bb07a91e3515fdc5d3261489d60fbe..e8af2ff29bc3b06ca974a5e7a5139a4b2ba52a29 100644 (file)
@@ -59,9 +59,9 @@ static inline void heartbeat_toggle_bit(struct heartbeat_data *hd,
        }
 }
 
-static void heartbeat_timer(unsigned long data)
+static void heartbeat_timer(struct timer_list *t)
 {
-       struct heartbeat_data *hd = (struct heartbeat_data *)data;
+       struct heartbeat_data *hd = from_timer(hd, t, timer);
        static unsigned bit = 0, up = 1;
 
        heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED);
@@ -133,7 +133,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev)
                }
        }
 
-       setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
+       timer_setup(&hd->timer, heartbeat_timer, 0);
        platform_set_drvdata(pdev, hd);
 
        return mod_timer(&hd->timer, jiffies + 1);
index cae707f3472dc59fbc79d9fe8ecfeeb75806e666..fe163ecd071970737faf086d47cffe8605d3c046 100644 (file)
@@ -85,18 +85,18 @@ int __init pci_is_66mhz_capable(struct pci_channel *hose,
        return cap66 > 0;
 }
 
-static void pcibios_enable_err(unsigned long __data)
+static void pcibios_enable_err(struct timer_list *t)
 {
-       struct pci_channel *hose = (struct pci_channel *)__data;
+       struct pci_channel *hose = from_timer(hose, t, err_timer);
 
        del_timer(&hose->err_timer);
        printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n");
        enable_irq(hose->err_irq);
 }
 
-static void pcibios_enable_serr(unsigned long __data)
+static void pcibios_enable_serr(struct timer_list *t)
 {
-       struct pci_channel *hose = (struct pci_channel *)__data;
+       struct pci_channel *hose = from_timer(hose, t, serr_timer);
 
        del_timer(&hose->serr_timer);
        printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n");
@@ -106,15 +106,11 @@ static void pcibios_enable_serr(unsigned long __data)
 void pcibios_enable_timers(struct pci_channel *hose)
 {
        if (hose->err_irq) {
-               init_timer(&hose->err_timer);
-               hose->err_timer.data = (unsigned long)hose;
-               hose->err_timer.function = pcibios_enable_err;
+               timer_setup(&hose->err_timer, pcibios_enable_err, 0);
        }
 
        if (hose->serr_irq) {
-               init_timer(&hose->serr_timer);
-               hose->serr_timer.data = (unsigned long)hose;
-               hose->serr_timer.function = pcibios_enable_serr;
+               timer_setup(&hose->serr_timer, pcibios_enable_serr, 0);
        }
 }
 
index 5bfb341cc5c4ad71eefe29a8f5175f715d07c59b..a171811602337e01c46230e546dd8855ba717eb3 100644 (file)
@@ -26,9 +26,9 @@ static ssize_t switch_show(struct device *dev,
 }
 static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
 
-static void switch_timer(unsigned long data)
+static void switch_timer(struct timer_list *t)
 {
-       struct push_switch *psw = (struct push_switch *)data;
+       struct push_switch *psw = from_timer(psw, t, debounce);
 
        schedule_work(&psw->work);
 }
@@ -78,10 +78,7 @@ static int switch_drv_probe(struct platform_device *pdev)
        }
 
        INIT_WORK(&psw->work, switch_work_handler);
-       init_timer(&psw->debounce);
-
-       psw->debounce.function = switch_timer;
-       psw->debounce.data = (unsigned long)psw;
+       timer_setup(&psw->debounce, switch_timer, 0);
 
        /* Workqueue API brain-damage */
        psw->pdev = pdev;
index e28531333efa96d7195e1e9771d574c83caa040d..ba4d39cb321d0608d96f0b0c5ea45ddadea7cc45 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 5a9e96be16652bc13bb4e6cd0f298b0e613d5883..9937c5ff94a9fe9eaeb8744ee2786ad7a5a2afa2 100644 (file)
@@ -715,7 +715,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return pte_pfn(pte);
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline unsigned long pmd_write(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
index 2178c78c7c1a6336d4a11c9619de521519245c0e..4680ba246b554708aec94287f7974adcca4c8c97 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += types.h
index 0f0f76b4f6cd634d1391a028910e393b16858d64..063556fe2cb1d8877c0a6028621658e6dd041d07 100644 (file)
@@ -19,7 +19,7 @@ lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 lib-$(CONFIG_SPARC64) += multi3.o
 lib-$(CONFIG_SPARC64) += fls.o
 lib-$(CONFIG_SPARC64) += fls64.o
-obj-$(CONFIG_SPARC64) += NG4fls.o
+lib-$(CONFIG_SPARC64) += NG4fls.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
index be3136f142a9993e0c6c8cfa1d651b1685654a73..a8103a84b4ac4a2ec84c44c302862b3aed8b7e7f 100644 (file)
@@ -113,7 +113,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
index 815c03d7a765524424b92866b1567ea2a43695d4..41363f46797bf9f74dd922fadbd2a3f190e8c9bb 100644 (file)
@@ -154,7 +154,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
index 5765e7e711f78248d2bff70f9c57ca48a4514355..ff5f9cb3039af1f91c8701915f08c051c21d0d81 100644 (file)
@@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                u8 *func = ((u8 *)__bpf_call_base) + imm;
 
                ctx->saw_call = true;
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
 
                emit_call((u32 *)func, ctx);
                emit_nop(ctx);
 
                emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
 
-               if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind)
-                       load_skb_regs(ctx, bpf2sparc[BPF_REG_6]);
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       load_skb_regs(ctx, L7);
                break;
        }
 
index 2a26cc4fefc27fda65d15be3b3a0b719231ad609..adfa21b18488f215b7b56cfc43daa52877c77dd3 100644 (file)
@@ -475,7 +475,6 @@ static inline void pmd_clear(pmd_t *pmdp)
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_huge_page(pmd)     pte_huge(pmd_pte(pmd))
 #define pmd_mkhuge(pmd)                pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-#define __HAVE_ARCH_PMD_WRITE
 
 #define pfn_pmd(pfn, pgprot)   pte_pmd(pfn_pte((pfn), (pgprot)))
 #define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
index 5711de0a1b5efc92519e152462a0ef1516612add..cc439612bcd52fee78256802f5aac1ded0c37ec8 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 50a32c33d729ba2a570eadaf77cff69925218c42..73c57f614c9e0600a5b4df7b28a7fa55f4abe471 100644 (file)
@@ -1,4 +1,5 @@
 generic-y += barrier.h
+generic-y += bpf_perf_event.h
 generic-y += bug.h
 generic-y += clkdev.h
 generic-y += current.h
index 4e6fcb32620ffb2125f648622499e5bf7c950e72..428644175956231aad112a0ce221452913736635 100644 (file)
@@ -150,7 +150,7 @@ static void show_segv_info(struct uml_pt_regs *regs)
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
                (void *)UPT_IP(regs), (void *)UPT_SP(regs),
index 759a71411169f4df318e3eb3e97e0f7602a04bb0..8611ef980554c2ef81378a087d15d42f1e3f3acb 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += auxvec.h
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index df3276d6bfe33b48ff30c0d672c3029ed650678b..8eed3f94bfc774de5e3f344590f8889a999dea9c 100644 (file)
@@ -1804,14 +1804,20 @@ config X86_SMAP
          If unsure, say Y.
 
 config X86_INTEL_UMIP
-       def_bool n
+       def_bool y
        depends on CPU_SUP_INTEL
        prompt "Intel User Mode Instruction Prevention" if EXPERT
        ---help---
          The User Mode Instruction Prevention (UMIP) is a security
          feature in newer Intel processors. If enabled, a general
-         protection fault is issued if the instructions SGDT, SLDT,
-         SIDT, SMSW and STR are executed in user mode.
+         protection fault is issued if the SGDT, SLDT, SIDT, SMSW
+         or STR instructions are executed in user mode. These instructions
+         unnecessarily expose information about the hardware state.
+
+         The vast majority of applications do not use these instructions.
+         For the very few that do, software emulation is provided in
+         specific cases in protected and virtual-8086 modes. Emulated
+         results are dummy.
 
 config X86_INTEL_MPX
        prompt "Intel MPX (Memory Protection Extensions)"
index 6293a8768a9123038eeced9e8dc2c4874feed275..672441c008c73ae3d949b974b128ef167d7d486a 100644 (file)
@@ -400,6 +400,7 @@ config UNWINDER_FRAME_POINTER
 config UNWINDER_GUESS
        bool "Guess unwinder"
        depends on EXPERT
+       depends on !STACKDEPOT
        ---help---
          This option enables the "guess" unwinder for unwinding kernel stack
          traces.  It scans the stack and reports every kernel text address it
index 1e9c322e973af0e1024dc3751b99d16eb76574f7..f25e1530e0644c83d8c69b1a90436c54ce823ae4 100644 (file)
@@ -80,6 +80,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
 ifdef CONFIG_X86_64
        vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
        vmlinux-objs-y += $(obj)/mem_encrypt.o
+       vmlinux-objs-y += $(obj)/pgtable_64.o
 endif
 
 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
index 20919b4f31330fbc36528e47cf7dd010e516c31c..fc313e29fe2c4be637ff7d41bfafdc7a29144ba4 100644 (file)
@@ -305,10 +305,18 @@ ENTRY(startup_64)
        leaq    boot_stack_end(%rbx), %rsp
 
 #ifdef CONFIG_X86_5LEVEL
-       /* Check if 5-level paging has already enabled */
-       movq    %cr4, %rax
-       testl   $X86_CR4_LA57, %eax
-       jnz     lvl5
+       /*
+        * Check if we need to enable 5-level paging.
+        * RSI holds real mode data and need to be preserved across
+        * a function call.
+        */
+       pushq   %rsi
+       call    l5_paging_required
+       popq    %rsi
+
+       /* If l5_paging_required() returned zero, we're done here. */
+       cmpq    $0, %rax
+       je      lvl5
 
        /*
         * At this point we are in long mode with 4-level paging enabled,
index a63fbc25ce84bd4ddd43cd1867ade39a98df83df..8199a6187251d0179276c96b5260a2226397ef06 100644 (file)
@@ -171,7 +171,6 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
 static void mem_avoid_memmap(char *str)
 {
        static int i;
-       int rc;
 
        if (i >= MAX_MEMMAP_REGIONS)
                return;
@@ -219,7 +218,7 @@ static int handle_mem_memmap(void)
                return 0;
 
        tmp_cmdline = malloc(len + 1);
-       if (!tmp_cmdline )
+       if (!tmp_cmdline)
                error("Failed to allocate space for tmp_cmdline");
 
        memcpy(tmp_cmdline, args, len);
@@ -363,7 +362,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
        cmd_line |= boot_params->hdr.cmd_line_ptr;
        /* Calculate size of cmd_line. */
        ptr = (char *)(unsigned long)cmd_line;
-       for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+       for (cmd_line_size = 0; ptr[cmd_line_size++];)
                ;
        mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
        mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
index b50c42455e25257bff89dd2d9d5f23534340076e..98761a1576ceb5c21b2d8c7e98c1217fd48abb26 100644 (file)
@@ -169,6 +169,16 @@ void __puthex(unsigned long value)
        }
 }
 
+static bool l5_supported(void)
+{
+       /* Check if leaf 7 is supported. */
+       if (native_cpuid_eax(0) < 7)
+               return 0;
+
+       /* Check if la57 is supported. */
+       return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31));
+}
+
 #if CONFIG_X86_NEED_RELOCS
 static void handle_relocations(void *output, unsigned long output_len,
                               unsigned long virt_addr)
@@ -362,6 +372,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        console_init();
        debug_putstr("early console in extract_kernel\n");
 
+       if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) {
+               error("This linux kernel as configured requires 5-level paging\n"
+                       "This CPU does not support the required 'cr4.la57' feature\n"
+                       "Unable to boot - please use a kernel appropriate for your CPU\n");
+       }
+
        free_mem_ptr     = heap;        /* Heap */
        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
new file mode 100644 (file)
index 0000000..b4469a3
--- /dev/null
@@ -0,0 +1,28 @@
+#include <asm/processor.h>
+
+/*
+ * __force_order is used by special_insns.h asm code to force instruction
+ * serialization.
+ *
+ * It is not referenced from the code, but GCC < 5 with -fPIE would fail
+ * due to an undefined symbol. Define it to make these ancient GCCs work.
+ */
+unsigned long __force_order;
+
+int l5_paging_required(void)
+{
+       /* Check if leaf 7 is supported. */
+
+       if (native_cpuid_eax(0) < 7)
+               return 0;
+
+       /* Check if la57 is supported. */
+       if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
+               return 0;
+
+       /* Check if 5-level paging has already been enabled. */
+       if (native_read_cr4() & X86_CR4_LA57)
+               return 0;
+
+       return 1;
+}
index 49f4970f693b3bddacad0b2965acad0ae112cb03..c9e8499fbfe75c0a98d0223247fa6cb0746198bb 100644 (file)
@@ -44,9 +44,9 @@ FDINITRD=$6
 
 # Make sure the files actually exist
 verify "$FBZIMAGE"
-verify "$MTOOLSRC"
 
 genbzdisk() {
+       verify "$MTOOLSRC"
        mformat a:
        syslinux $FIMAGE
        echo "$KCMDLINE" | mcopy - a:syslinux.cfg
@@ -57,6 +57,7 @@ genbzdisk() {
 }
 
 genfdimage144() {
+       verify "$MTOOLSRC"
        dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
        mformat v:
        syslinux $FIMAGE
@@ -68,6 +69,7 @@ genfdimage144() {
 }
 
 genfdimage288() {
+       verify "$MTOOLSRC"
        dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
        mformat w:
        syslinux $FIMAGE
index 399a29d067d6367603714633fb8c4de6ab77275a..cb91a64a99e7cdbc0422227383611378fb6b076a 100644 (file)
@@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
 
        salsa20_ivsetup(ctx, walk.iv);
 
-       if (likely(walk.nbytes == nbytes))
-       {
-               salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
-                                     walk.dst.virt.addr, nbytes);
-               return blkcipher_walk_done(desc, &walk, 0);
-       }
-
        while (walk.nbytes >= 64) {
                salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
                                      walk.dst.virt.addr,
index 4838037f97f6edffda62b5b045c837fcc29402f0..bd8b57a5c874bc37ab23fb0489f840ba0b17168e 100644 (file)
@@ -941,7 +941,8 @@ ENTRY(debug)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
        subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
        cmpl    $SIZEOF_SYSENTER_stack, %ecx
        jb      .Ldebug_from_sysenter_stack
@@ -984,7 +985,8 @@ ENTRY(nmi)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
        subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
        cmpl    $SIZEOF_SYSENTER_stack, %ecx
        jb      .Lnmi_from_sysenter_stack
index a2b30ec69497277f39ef56e0dfad604170f78c19..423885bee398c6c9cb80f3bd4a2ec8317e41062a 100644 (file)
@@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64)
 END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
-.macro TRACE_IRQS_IRETQ
+.macro TRACE_IRQS_FLAGS flags:req
 #ifdef CONFIG_TRACE_IRQFLAGS
-       bt      $9, EFLAGS(%rsp)                /* interrupts off? */
+       bt      $9, \flags              /* interrupts off? */
        jnc     1f
        TRACE_IRQS_ON
 1:
 #endif
 .endm
 
+.macro TRACE_IRQS_IRETQ
+       TRACE_IRQS_FLAGS EFLAGS(%rsp)
+.endm
+
 /*
  * When dynamic function tracer is enabled it will add a breakpoint
  * to all locations that it is about to modify, sync CPUs, update
@@ -136,6 +140,64 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
+       .pushsection .entry_trampoline, "ax"
+
+/*
+ * The code in here gets remapped into cpu_entry_area's trampoline.  This means
+ * that the assembler and linker have the wrong idea as to where this code
+ * lives (and, in fact, it's mapped more than once, so it's not even at a
+ * fixed address).  So we can't reference any symbols outside the entry
+ * trampoline and expect it to work.
+ *
+ * Instead, we carefully abuse %rip-relative addressing.
+ * _entry_trampoline(%rip) refers to the start of the remapped) entry
+ * trampoline.  We can thus find cpu_entry_area with this macro:
+ */
+
+#define CPU_ENTRY_AREA \
+       _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
+
+/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
+#define RSP_SCRATCH    CPU_ENTRY_AREA_SYSENTER_stack + \
+                       SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
+
+ENTRY(entry_SYSCALL_64_trampoline)
+       UNWIND_HINT_EMPTY
+       swapgs
+
+       /* Stash the user RSP. */
+       movq    %rsp, RSP_SCRATCH
+
+       /* Load the top of the task stack into RSP */
+       movq    CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
+
+       /* Start building the simulated IRET frame. */
+       pushq   $__USER_DS                      /* pt_regs->ss */
+       pushq   RSP_SCRATCH                     /* pt_regs->sp */
+       pushq   %r11                            /* pt_regs->flags */
+       pushq   $__USER_CS                      /* pt_regs->cs */
+       pushq   %rcx                            /* pt_regs->ip */
+
+       /*
+        * x86 lacks a near absolute jump, and we can't jump to the real
+        * entry text with a relative jump.  We could push the target
+        * address and then use retq, but this destroys the pipeline on
+        * many CPUs (wasting over 20 cycles on Sandy Bridge).  Instead,
+        * spill RDI and restore it in a second-stage trampoline.
+        */
+       pushq   %rdi
+       movq    $entry_SYSCALL_64_stage2, %rdi
+       jmp     *%rdi
+END(entry_SYSCALL_64_trampoline)
+
+       .popsection
+
+ENTRY(entry_SYSCALL_64_stage2)
+       UNWIND_HINT_EMPTY
+       popq    %rdi
+       jmp     entry_SYSCALL_64_after_hwframe
+END(entry_SYSCALL_64_stage2)
+
 ENTRY(entry_SYSCALL_64)
        UNWIND_HINT_EMPTY
        /*
@@ -148,8 +210,6 @@ ENTRY(entry_SYSCALL_64)
        movq    %rsp, PER_CPU_VAR(rsp_scratch)
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
-       TRACE_IRQS_OFF
-
        /* Construct struct pt_regs on stack */
        pushq   $__USER_DS                      /* pt_regs->ss */
        pushq   PER_CPU_VAR(rsp_scratch)        /* pt_regs->sp */
@@ -170,6 +230,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
        sub     $(6*8), %rsp                    /* pt_regs->bp, bx, r12-15 not saved */
        UNWIND_HINT_REGS extra=0
 
+       TRACE_IRQS_OFF
+
        /*
         * If we need to do entry work or if we guess we'll need to do
         * exit work, go straight to the slow path.
@@ -326,8 +388,24 @@ syscall_return_via_sysret:
        popq    %rsi    /* skip rcx */
        popq    %rdx
        popq    %rsi
+
+       /*
+        * Now all regs are restored except RSP and RDI.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       pushq   RSP-RDI(%rdi)   /* RSP */
+       pushq   (%rdi)          /* RDI */
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+
        popq    %rdi
-       movq    RSP-ORIG_RAX(%rsp), %rsp
+       popq    %rsp
        USERGS_SYSRET64
 END(entry_SYSCALL_64)
 
@@ -462,12 +540,13 @@ END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
-       pushfq
-       testl $X86_EFLAGS_IF, (%rsp)
+       pushq %rax
+       SAVE_FLAGS(CLBR_RAX)
+       testl $X86_EFLAGS_IF, %eax
        jz .Lokay_\@
        ud2
 .Lokay_\@:
-       addq $8, %rsp
+       popq %rax
 #endif
 .endm
 
@@ -559,6 +638,13 @@ END(irq_entries_start)
 /* 0(%rsp): ~(interrupt number) */
        .macro interrupt func
        cld
+
+       testb   $3, CS-ORIG_RAX(%rsp)
+       jz      1f
+       SWAPGS
+       call    switch_to_thread_stack
+1:
+
        ALLOC_PT_GPREGS_ON_STACK
        SAVE_C_REGS
        SAVE_EXTRA_REGS
@@ -568,12 +654,8 @@ END(irq_entries_start)
        jz      1f
 
        /*
-        * IRQ from user mode.  Switch to kernel gsbase and inform context
-        * tracking that we're in kernel mode.
-        */
-       SWAPGS
-
-       /*
+        * IRQ from user mode.
+        *
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
         * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
@@ -626,10 +708,41 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
        ud2
 1:
 #endif
-       SWAPGS
        POP_EXTRA_REGS
-       POP_C_REGS
-       addq    $8, %rsp        /* skip regs->orig_ax */
+       popq    %r11
+       popq    %r10
+       popq    %r9
+       popq    %r8
+       popq    %rax
+       popq    %rcx
+       popq    %rdx
+       popq    %rsi
+
+       /*
+        * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       /* Copy the IRET frame to the trampoline stack. */
+       pushq   6*8(%rdi)       /* SS */
+       pushq   5*8(%rdi)       /* RSP */
+       pushq   4*8(%rdi)       /* EFLAGS */
+       pushq   3*8(%rdi)       /* CS */
+       pushq   2*8(%rdi)       /* RIP */
+
+       /* Push user RDI on the trampoline stack. */
+       pushq   (%rdi)
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+
+       /* Restore RDI. */
+       popq    %rdi
+       SWAPGS
        INTERRUPT_RETURN
 
 
@@ -825,7 +938,33 @@ apicinterrupt IRQ_WORK_VECTOR                      irq_work_interrupt              smp_irq_work_interrupt
 /*
  * Exception entry points.
  */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+
+/*
+ * Switch to the thread stack.  This is called with the IRET frame and
+ * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
+ * space has not been allocated for them.)
+ */
+ENTRY(switch_to_thread_stack)
+       UNWIND_HINT_FUNC
+
+       pushq   %rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+       UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
+
+       pushq   7*8(%rdi)               /* regs->ss */
+       pushq   6*8(%rdi)               /* regs->rsp */
+       pushq   5*8(%rdi)               /* regs->eflags */
+       pushq   4*8(%rdi)               /* regs->cs */
+       pushq   3*8(%rdi)               /* regs->ip */
+       pushq   2*8(%rdi)               /* regs->orig_ax */
+       pushq   8(%rdi)                 /* return address */
+       UNWIND_HINT_FUNC
+
+       movq    (%rdi), %rdi
+       ret
+END(switch_to_thread_stack)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
@@ -844,11 +983,12 @@ ENTRY(\sym)
 
        ALLOC_PT_GPREGS_ON_STACK
 
-       .if \paranoid
-       .if \paranoid == 1
+       .if \paranoid < 2
        testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
-       jnz     1f
+       jnz     .Lfrom_usermode_switch_stack_\@
        .endif
+
+       .if \paranoid
        call    paranoid_entry
        .else
        call    error_entry
@@ -890,20 +1030,15 @@ ENTRY(\sym)
        jmp     error_exit
        .endif
 
-       .if \paranoid == 1
+       .if \paranoid < 2
        /*
-        * Paranoid entry from userspace.  Switch stacks and treat it
+        * Entry from userspace.  Switch stacks and treat it
         * as a normal entry.  This means that paranoid handlers
         * run in real process context if user_mode(regs).
         */
-1:
+.Lfrom_usermode_switch_stack_\@:
        call    error_entry
 
-
-       movq    %rsp, %rdi                      /* pt_regs pointer */
-       call    sync_regs
-       movq    %rax, %rsp                      /* switch stack */
-
        movq    %rsp, %rdi                      /* pt_regs pointer */
 
        .if \has_error_code
@@ -943,11 +1078,13 @@ ENTRY(native_load_gs_index)
        FRAME_BEGIN
        pushfq
        DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
+       TRACE_IRQS_OFF
        SWAPGS
 .Lgs_change:
        movl    %edi, %gs
 2:     ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
        SWAPGS
+       TRACE_IRQS_FLAGS (%rsp)
        popfq
        FRAME_END
        ret
@@ -1164,6 +1301,14 @@ ENTRY(error_entry)
        SWAPGS
 
 .Lerror_entry_from_usermode_after_swapgs:
+       /* Put us onto the real thread stack. */
+       popq    %r12                            /* save return addr in %12 */
+       movq    %rsp, %rdi                      /* arg0 = pt_regs pointer */
+       call    sync_regs
+       movq    %rax, %rsp                      /* switch stack */
+       ENCODE_FRAME_POINTER
+       pushq   %r12
+
        /*
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
index 568e130d932cd2a7d44393e5fc52408cffe64f34..95ad40eb7effbdb6f605285df62d1e0bd33a6cac 100644 (file)
@@ -48,7 +48,7 @@
  */
 ENTRY(entry_SYSENTER_compat)
        /* Interrupts are off on entry. */
-       SWAPGS_UNSAFE_STACK
+       SWAPGS
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        /*
@@ -306,8 +306,11 @@ ENTRY(entry_INT80_compat)
         */
        movl    %eax, %eax
 
-       /* Construct struct pt_regs on stack (iret frame is already on stack) */
        pushq   %rax                    /* pt_regs->orig_ax */
+
+       /* switch to thread stack expects orig_ax to be pushed */
+       call    switch_to_thread_stack
+
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
        pushq   %rdx                    /* pt_regs->dx */
index 11b13c4b43d55f8d6c8b239f478ecb302d4cfd07..f19856d95c60919c92d1679e0037d9339c4c2a65 100644 (file)
@@ -324,5 +324,5 @@ notrace time_t __vdso_time(time_t *t)
                *t = result;
        return result;
 }
-int time(time_t *t)
+time_t time(time_t *t)
        __attribute__((weak, alias("__vdso_time")));
index 43445da30ceab12323772e81c95f0dfb3ba8cfa3..09c26a4f139c125e000675689ebc983acd8ab91a 100644 (file)
@@ -3734,6 +3734,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t,       "event=0x3c,in_tx=1");
 EVENT_ATTR_STR(cycles-ct,      cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
 
 static struct attribute *hsw_events_attrs[] = {
+       EVENT_PTR(mem_ld_hsw),
+       EVENT_PTR(mem_st_hsw),
+       EVENT_PTR(td_slots_issued),
+       EVENT_PTR(td_slots_retired),
+       EVENT_PTR(td_fetch_bubbles),
+       EVENT_PTR(td_total_slots),
+       EVENT_PTR(td_total_slots_scale),
+       EVENT_PTR(td_recovery_bubbles),
+       EVENT_PTR(td_recovery_bubbles_scale),
+       NULL
+};
+
+static struct attribute *hsw_tsx_events_attrs[] = {
        EVENT_PTR(tx_start),
        EVENT_PTR(tx_commit),
        EVENT_PTR(tx_abort),
@@ -3746,18 +3759,16 @@ static struct attribute *hsw_events_attrs[] = {
        EVENT_PTR(el_conflict),
        EVENT_PTR(cycles_t),
        EVENT_PTR(cycles_ct),
-       EVENT_PTR(mem_ld_hsw),
-       EVENT_PTR(mem_st_hsw),
-       EVENT_PTR(td_slots_issued),
-       EVENT_PTR(td_slots_retired),
-       EVENT_PTR(td_fetch_bubbles),
-       EVENT_PTR(td_total_slots),
-       EVENT_PTR(td_total_slots_scale),
-       EVENT_PTR(td_recovery_bubbles),
-       EVENT_PTR(td_recovery_bubbles_scale),
        NULL
 };
 
+static __init struct attribute **get_hsw_events_attrs(void)
+{
+       return boot_cpu_has(X86_FEATURE_RTM) ?
+               merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
+               hsw_events_attrs;
+}
+
 static ssize_t freeze_on_smi_show(struct device *cdev,
                                  struct device_attribute *attr,
                                  char *buf)
@@ -4186,7 +4197,7 @@ __init int intel_pmu_init(void)
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.cpu_events = get_hsw_events_attrs();
                x86_pmu.lbr_double_abort = true;
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
@@ -4225,7 +4236,7 @@ __init int intel_pmu_init(void)
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.cpu_events = get_hsw_events_attrs();
                x86_pmu.limit_period = bdw_limit_period;
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
@@ -4283,7 +4294,7 @@ __init int intel_pmu_init(void)
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
                extra_attr = merge_attr(extra_attr, skl_format_attr);
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.cpu_events = get_hsw_events_attrs();
                intel_pmu_pebs_data_source_skl(
                        boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
                pr_cont("Skylake events, ");
index d45e06346f14d8636f1b4348a84a6e503012c686..7874c980d56921961328fa0212da9dae61458789 100644 (file)
@@ -975,10 +975,10 @@ static void uncore_pci_remove(struct pci_dev *pdev)
        int i, phys_id, pkg;
 
        phys_id = uncore_pcibus_to_physid(pdev->bus);
-       pkg = topology_phys_to_logical_pkg(phys_id);
 
        box = pci_get_drvdata(pdev);
        if (!box) {
+               pkg = topology_phys_to_logical_pkg(phys_id);
                for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
                        if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
                                uncore_extra_pci_dev[pkg].dev[i] = NULL;
@@ -994,7 +994,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
                return;
 
        pci_set_drvdata(pdev, NULL);
-       pmu->boxes[pkg] = NULL;
+       pmu->boxes[box->pkgid] = NULL;
        if (atomic_dec_return(&pmu->activeboxes) == 0)
                uncore_pmu_unregister(pmu);
        uncore_box_exit(box);
index 4364191e7c6b2904a443bf2a522ca4445b7f0a12..414dc7e7c950c6cf7279b51f4161b04e6e868aad 100644 (file)
@@ -100,7 +100,7 @@ struct intel_uncore_extra_reg {
 
 struct intel_uncore_box {
        int pci_phys_id;
-       int pkgid;
+       int pkgid;      /* Logical package ID */
        int n_active;   /* number of active events */
        int n_events;
        int cpu;        /* cpu to collect events */
index 95cb19f4e06f03376b03a5a27c59f7e6612167e1..6d8044ab10607b6c668bfee0d6366266401e7e2f 100644 (file)
@@ -1057,7 +1057,7 @@ static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_eve
 
        if (reg1->idx != EXTRA_REG_NONE) {
                int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
-               int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
+               int pkg = box->pkgid;
                struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
 
                if (filter_pdev) {
@@ -3035,11 +3035,19 @@ static struct intel_uncore_type *bdx_msr_uncores[] = {
        NULL,
 };
 
+/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
+static struct event_constraint bdx_uncore_pcu_constraints[] = {
+       EVENT_CONSTRAINT(0x80, 0xe, 0x80),
+       EVENT_CONSTRAINT_END
+};
+
 void bdx_uncore_cpu_init(void)
 {
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
        uncore_msr_uncores = bdx_msr_uncores;
+
+       hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
 }
 
 static struct intel_uncore_type bdx_uncore_ha = {
index bf6a76202a779ee131b4df8c89449ab52abd0a79..ea9a7dde62e5c4d551ba89e429f911fb5c6603fd 100644 (file)
@@ -135,6 +135,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
        set_bit(bit, (unsigned long *)cpu_caps_set);    \
 } while (0)
 
+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
index c0b0e9e8aa66eb645eba71784e80aa93b0f0df79..800104c8a3edfee7f4f52a33b8451a51ee0ed90a 100644 (file)
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired Count */
+#define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP error pointers */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
index 4011cb03ef08e52db15f52779ce366c26359a34b..aab4fe9f49f868a03a5c2da5eeb788a6bb80c24d 100644 (file)
@@ -60,17 +60,10 @@ static inline struct desc_struct *get_current_gdt_rw(void)
        return this_cpu_ptr(&gdt_page)->gdt;
 }
 
-/* Get the fixmap index for a specific processor */
-static inline unsigned int get_cpu_gdt_ro_index(int cpu)
-{
-       return FIX_GDT_REMAP_BEGIN + cpu;
-}
-
 /* Provide the fixmap address of the remapped GDT */
 static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
 {
-       unsigned int idx = get_cpu_gdt_ro_index(cpu);
-       return (struct desc_struct *)__fix_to_virt(idx);
+       return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
 }
 
 /* Provide the current read-only GDT */
@@ -185,7 +178,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
 #endif
 }
 
-static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
+static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
 {
        struct desc_struct *d = get_cpu_gdt_rw(cpu);
        tss_desc tss;
index 3a091cea36c5a118d953fd25c897989270c6f0e4..0d157d2a1e2aef98b1e69c452f27d330a5fd7179 100644 (file)
@@ -309,6 +309,7 @@ static inline int mmap_is_ia32(void)
 extern unsigned long task_size_32bit(void);
 extern unsigned long task_size_64bit(int full_addr_space);
 extern unsigned long get_mmap_base(int is_legacy);
+extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
 
 #ifdef CONFIG_X86_32
 
index b0c505fe9a958c701fef6d96f281bb8ab1a773de..94fc4fa141275bcdd6eb43505fc8e5f20352aca4 100644 (file)
@@ -44,6 +44,45 @@ extern unsigned long __FIXADDR_TOP;
                         PAGE_SIZE)
 #endif
 
+/*
+ * cpu_entry_area is a percpu region in the fixmap that contains things
+ * needed by the CPU and early entry/exit code.  Real types aren't used
+ * for all fields here to avoid circular header dependencies.
+ *
+ * Every field is a virtual alias of some other allocated backing store.
+ * There is no direct allocation of a struct cpu_entry_area.
+ */
+struct cpu_entry_area {
+       char gdt[PAGE_SIZE];
+
+       /*
+        * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
+        * a a read-only guard page.
+        */
+       struct SYSENTER_stack_page SYSENTER_stack_page;
+
+       /*
+        * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
+        * we need task switches to work, and task switches write to the TSS.
+        */
+       struct tss_struct tss;
+
+       char entry_trampoline[PAGE_SIZE];
+
+#ifdef CONFIG_X86_64
+       /*
+        * Exception stacks used for IST entries.
+        *
+        * In the future, this should have a separate slot for each stack
+        * with guard pages between them.
+        */
+       char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+#endif
+};
+
+#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
+
+extern void setup_cpu_entry_areas(void);
 
 /*
  * Here we define all the compile-time 'special' virtual
@@ -101,8 +140,8 @@ enum fixed_addresses {
        FIX_LNW_VRTC,
 #endif
        /* Fixmap entries to remap the GDTs, one per processor. */
-       FIX_GDT_REMAP_BEGIN,
-       FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
+       FIX_CPU_ENTRY_AREA_TOP,
+       FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1,
 
 #ifdef CONFIG_ACPI_APEI_GHES
        /* Used for GHES mapping from assorted contexts */
@@ -191,5 +230,30 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
 void __early_set_fixmap(enum fixed_addresses idx,
                        phys_addr_t phys, pgprot_t flags);
 
+static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page)
+{
+       BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
+
+       return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page;
+}
+
+#define __get_cpu_entry_area_offset_index(cpu, offset) ({              \
+       BUILD_BUG_ON(offset % PAGE_SIZE != 0);                          \
+       __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE);       \
+       })
+
+#define get_cpu_entry_area_index(cpu, field)                           \
+       __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))
+
+static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
+{
+       return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
+}
+
+static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
+{
+       return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
+}
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASM_X86_FIXMAP_H */
index b80e46733909c981aa9125c055d7e1f9f22b2409..2851077b6051b257e710dd437e2cdba7b1516e56 100644 (file)
@@ -99,14 +99,6 @@ struct irq_alloc_info {
                        void            *dmar_data;
                };
 #endif
-#ifdef CONFIG_HT_IRQ
-               struct {
-                       int             ht_pos;
-                       int             ht_idx;
-                       struct pci_dev  *ht_dev;
-                       void            *ht_update;
-               };
-#endif
 #ifdef CONFIG_X86_UV
                struct {
                        int             uv_limit;
diff --git a/arch/x86/include/asm/hypertransport.h b/arch/x86/include/asm/hypertransport.h
deleted file mode 100644 (file)
index 5d55df3..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_HYPERTRANSPORT_H
-#define _ASM_X86_HYPERTRANSPORT_H
-
-/*
- * Constants for x86 Hypertransport Interrupts.
- */
-
-#define HT_IRQ_LOW_BASE                        0xf8000000
-
-#define HT_IRQ_LOW_VECTOR_SHIFT                16
-#define HT_IRQ_LOW_VECTOR_MASK         0x00ff0000
-#define HT_IRQ_LOW_VECTOR(v)                                           \
-       (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
-
-#define HT_IRQ_LOW_DEST_ID_SHIFT       8
-#define HT_IRQ_LOW_DEST_ID_MASK                0x0000ff00
-#define HT_IRQ_LOW_DEST_ID(v)                                          \
-       (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
-
-#define HT_IRQ_LOW_DM_PHYSICAL         0x0000000
-#define HT_IRQ_LOW_DM_LOGICAL          0x0000040
-
-#define HT_IRQ_LOW_RQEOI_EDGE          0x0000000
-#define HT_IRQ_LOW_RQEOI_LEVEL         0x0000020
-
-
-#define HT_IRQ_LOW_MT_FIXED            0x0000000
-#define HT_IRQ_LOW_MT_ARBITRATED       0x0000004
-#define HT_IRQ_LOW_MT_SMI              0x0000008
-#define HT_IRQ_LOW_MT_NMI              0x000000c
-#define HT_IRQ_LOW_MT_INIT             0x0000010
-#define HT_IRQ_LOW_MT_STARTUP          0x0000014
-#define HT_IRQ_LOW_MT_EXTINT           0x0000018
-#define HT_IRQ_LOW_MT_LINT1            0x000008c
-#define HT_IRQ_LOW_MT_LINT0            0x0000098
-
-#define HT_IRQ_LOW_IRQ_MASKED          0x0000001
-
-
-#define HT_IRQ_HIGH_DEST_ID_SHIFT      0
-#define HT_IRQ_HIGH_DEST_ID_MASK       0x00ffffff
-#define HT_IRQ_HIGH_DEST_ID(v)                                         \
-       ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
-
-#endif /* _ASM_X86_HYPERTRANSPORT_H */
index 1b0a5abcd8aeb6e700013c5434aaeb0bba7a152f..96aa6b9884dc5b3bc8d54c9ef1c6258eea13a0d0 100644 (file)
 #ifndef _ASM_X86_HYPERVISOR_H
 #define _ASM_X86_HYPERVISOR_H
 
-#ifdef CONFIG_HYPERVISOR_GUEST
-
-#include <asm/kvm_para.h>
-#include <asm/x86_init.h>
-#include <asm/xen/hypervisor.h>
-
-/*
- * x86 hypervisor information
- */
-
+/* x86 hypervisor types  */
 enum x86_hypervisor_type {
        X86_HYPER_NATIVE = 0,
        X86_HYPER_VMWARE,
@@ -39,6 +30,12 @@ enum x86_hypervisor_type {
        X86_HYPER_KVM,
 };
 
+#ifdef CONFIG_HYPERVISOR_GUEST
+
+#include <asm/kvm_para.h>
+#include <asm/x86_init.h>
+#include <asm/xen/hypervisor.h>
+
 struct hypervisor_x86 {
        /* Hypervisor name */
        const char      *name;
@@ -58,7 +55,15 @@ struct hypervisor_x86 {
 
 extern enum x86_hypervisor_type x86_hyper_type;
 extern void init_hypervisor_platform(void);
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return x86_hyper_type == type;
+}
 #else
 static inline void init_hypervisor_platform(void) { }
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return type == X86_HYPER_NATIVE;
+}
 #endif /* CONFIG_HYPERVISOR_GUEST */
 #endif /* _ASM_X86_HYPERVISOR_H */
index e1d3b4ce8a925350df55f898dbdb9cf0f416881f..2b6ccf2c49f11c9c54fbe448ea7d33d997b99ca6 100644 (file)
@@ -18,6 +18,6 @@
 void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
 int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);
 unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
-char insn_get_code_seg_params(struct pt_regs *regs);
+int insn_get_code_seg_params(struct pt_regs *regs);
 
 #endif /* _ASM_X86_INSN_EVAL_H */
index 93ae8aee178075da0110e026c546ab2db8674775..95e948627fd04878883041543c3b5f13703ceaa9 100644 (file)
@@ -111,6 +111,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
 
 #endif
 
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+
 /**
  *     virt_to_phys    -       map virtual addresses to physical
  *     @address: address to remap
index f695cc6b8e1f4476263d4477063aec247accde0d..139feef467f7e298c6f9db57c43facc64f5468b6 100644 (file)
@@ -56,10 +56,4 @@ extern void arch_init_msi_domain(struct irq_domain *domain);
 static inline void arch_init_msi_domain(struct irq_domain *domain) { }
 #endif
 
-#ifdef CONFIG_HT_IRQ
-extern void arch_init_htirq_domain(struct irq_domain *domain);
-#else
-static inline void arch_init_htirq_domain(struct irq_domain *domain) { }
-#endif
-
 #endif
index c8ef23f2c28f17c59308b9c41179c47f85e075ad..89f08955fff733c688a5ce4f4a0b8d74050ee617 100644 (file)
@@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void)
        swapgs;                                 \
        sysretl
 
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(x)          pushfq; popq %rax
+#endif
 #else
 #define INTERRUPT_RETURN               iret
 #define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
index f86a8caa561e8873c3f34e6e8b8cd509ebadd819..395c9631e000a3a17aa574c1b25fcc2cafd5b5fb 100644 (file)
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
 extern void __show_regs(struct pt_regs *regs, int all);
+extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
 
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index 034caa1a084e360ff74c77e84116bb0de6e28dcd..b24b1c8b397989304ec88d9979c66b1b5415f78e 100644 (file)
@@ -214,8 +214,6 @@ struct x86_emulate_ops {
        void (*halt)(struct x86_emulate_ctxt *ctxt);
        void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
        int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
-       void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
-       void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
        int (*intercept)(struct x86_emulate_ctxt *ctxt,
                         struct x86_instruction_info *info,
                         enum x86_intercept_stage stage);
index 1bfb99770c34197b6c0627897753d282b3e5c378..51679843132829e38ed204eda60d50379a4fabb5 100644 (file)
@@ -536,7 +536,20 @@ struct kvm_vcpu_arch {
        struct kvm_mmu_memory_cache mmu_page_cache;
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
+       /*
+        * QEMU userspace and the guest each have their own FPU state.
+        * In vcpu_run, we switch between the user and guest FPU contexts.
+        * While running a VCPU, the VCPU thread will have the guest FPU
+        * context.
+        *
+        * Note that while the PKRU state lives inside the fpu registers,
+        * it is switched out separately at VMENTER and VMEXIT time. The
+        * "guest_fpu" state here contains the guest FPU context, with the
+        * host PRKU bits.
+        */
+       struct fpu user_fpu;
        struct fpu guest_fpu;
+
        u64 xcr0;
        u64 guest_supported_xcr0;
        u32 guest_xstate_size;
@@ -1161,7 +1174,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
 static inline int emulate_instruction(struct kvm_vcpu *vcpu,
                        int emulation_type)
 {
-       return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+       return x86_emulate_instruction(vcpu, 0,
+                       emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
 }
 
 void kvm_enable_efer_bits(u64);
@@ -1434,4 +1448,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end);
+
 #endif /* _ASM_X86_KVM_HOST_H */
index 283efcaac8aff86f2c004bc23e4b8642cbf3d527..892df375b6155a51f584760efb9f9e77c3f732e8 100644 (file)
@@ -927,6 +927,15 @@ extern void default_banner(void);
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
                  CLBR_NONE,                                            \
                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(clobbers)                                        \
+       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
+                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+#endif
+
 #endif /* CONFIG_X86_32 */
 
 #endif /* __ASSEMBLY__ */
index 09f9e1e00e3bd30b5869b126f2ab11be49388f05..95e2dfd755218ccfaf6417b44c822b545a35568e 100644 (file)
@@ -1061,7 +1061,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
                                  unsigned long address, pmd_t *pmdp);
 
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return pmd_flags(pmd) & _PAGE_RW;
@@ -1088,6 +1088,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
        clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 }
 
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_RW;
+}
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
index 2db7cf720b04b2d067df4f3138f2914de0e5cc0f..1f2434ee9f806c4355a38599ab4485140a8cd1df 100644 (file)
@@ -132,6 +132,7 @@ struct cpuinfo_x86 {
        /* Index into per_cpu list: */
        u16                     cpu_index;
        u32                     microcode;
+       unsigned                initialized : 1;
 } __randomize_layout;
 
 struct cpuid_regs {
@@ -162,9 +163,9 @@ enum cpuid_regs_idx {
 extern struct cpuinfo_x86      boot_cpu_data;
 extern struct cpuinfo_x86      new_cpu_data;
 
-extern struct tss_struct       doublefault_tss;
-extern __u32                   cpu_caps_cleared[NCAPINTS];
-extern __u32                   cpu_caps_set[NCAPINTS];
+extern struct x86_hw_tss       doublefault_tss;
+extern __u32                   cpu_caps_cleared[NCAPINTS + NBUGINTS];
+extern __u32                   cpu_caps_set[NCAPINTS + NBUGINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
@@ -252,6 +253,11 @@ static inline void load_cr3(pgd_t *pgdir)
        write_cr3(__sme_pa(pgdir));
 }
 
+/*
+ * Note that while the legacy 'TSS' name comes from 'Task State Segment',
+ * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
+ * unrelated to the task-switch mechanism:
+ */
 #ifdef CONFIG_X86_32
 /* This is the TSS defined by the hardware. */
 struct x86_hw_tss {
@@ -304,7 +310,13 @@ struct x86_hw_tss {
 struct x86_hw_tss {
        u32                     reserved1;
        u64                     sp0;
+
+       /*
+        * We store cpu_current_top_of_stack in sp1 so it's always accessible.
+        * Linux does not use ring 1, so sp1 is not otherwise needed.
+        */
        u64                     sp1;
+
        u64                     sp2;
        u64                     reserved2;
        u64                     ist[7];
@@ -322,12 +334,22 @@ struct x86_hw_tss {
 #define IO_BITMAP_BITS                 65536
 #define IO_BITMAP_BYTES                        (IO_BITMAP_BITS/8)
 #define IO_BITMAP_LONGS                        (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET               offsetof(struct tss_struct, io_bitmap)
+#define IO_BITMAP_OFFSET               (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
 #define INVALID_IO_BITMAP_OFFSET       0x8000
 
+struct SYSENTER_stack {
+       unsigned long           words[64];
+};
+
+struct SYSENTER_stack_page {
+       struct SYSENTER_stack stack;
+} __aligned(PAGE_SIZE);
+
 struct tss_struct {
        /*
-        * The hardware state:
+        * The fixed hardware portion.  This must not cross a page boundary
+        * at risk of violating the SDM's advice and potentially triggering
+        * errata.
         */
        struct x86_hw_tss       x86_tss;
 
@@ -338,18 +360,9 @@ struct tss_struct {
         * be within the limit.
         */
        unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
+} __aligned(PAGE_SIZE);
 
-#ifdef CONFIG_X86_32
-       /*
-        * Space for the temporary SYSENTER stack.
-        */
-       unsigned long           SYSENTER_stack_canary;
-       unsigned long           SYSENTER_stack[64];
-#endif
-
-} ____cacheline_aligned;
-
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
 
 /*
  * sizeof(unsigned long) coming from an extra "long" at the end
@@ -363,6 +376,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
 
 #ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#else
+/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
+#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
 #endif
 
 /*
@@ -522,7 +538,7 @@ static inline void native_set_iopl_mask(unsigned mask)
 static inline void
 native_load_sp0(unsigned long sp0)
 {
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 static inline void native_swapgs(void)
@@ -534,12 +550,12 @@ static inline void native_swapgs(void)
 
 static inline unsigned long current_top_of_stack(void)
 {
-#ifdef CONFIG_X86_64
-       return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
-#else
-       /* sp0 on x86_32 is special in and around vm86 mode. */
+       /*
+        *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
+        *  and around vm86 mode and sp0 on x86_64 is special because of the
+        *  entry trampoline.
+        */
        return this_cpu_read_stable(cpu_current_top_of_stack);
-#endif
 }
 
 static inline bool on_thread_stack(void)
index b20f9d623f9c639fd212c5e313bd5e450747b1eb..8f09012b92e779d7aabf4ad663b8eb10b2379c37 100644 (file)
  */
 #define EARLY_IDT_HANDLER_SIZE 9
 
+/*
+ * xen_early_idt_handler_array is for Xen pv guests: for each entry in
+ * early_idt_handler_array it contains a prequel in the form of
+ * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
+ * max 8 bytes.
+ */
+#define XEN_EARLY_IDT_HANDLER_SIZE 8
+
 #ifndef __ASSEMBLY__
 
 extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 extern void early_ignore_irq(void);
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
+#endif
+
 /*
  * Load a segment. Fall back on loading the zero segment if something goes
  * wrong.  This variant assumes that loading zero fully clears the segment.
index 8da111b3c342bbb61a9e630e101c8a83422a15ea..f8062bfd43a072dc950b23f242893b6ca3310652 100644 (file)
@@ -16,6 +16,7 @@ enum stack_type {
        STACK_TYPE_TASK,
        STACK_TYPE_IRQ,
        STACK_TYPE_SOFTIRQ,
+       STACK_TYPE_SYSENTER,
        STACK_TYPE_EXCEPTION,
        STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
 };
@@ -28,6 +29,8 @@ struct stack_info {
 bool in_task_stack(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info);
 
+bool in_sysenter_stack(unsigned long *stack, struct stack_info *info);
+
 int get_stack_info(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info, unsigned long *visit_mask);
 
index 982c325dad3377a4f0d80a5808f4d731a87926a5..8be6afb584715dc8d5a50d1bbd989bf053366294 100644 (file)
 
 /* image of the saved processor state */
 struct saved_context {
-       u16 es, fs, gs, ss;
+       /*
+        * On x86_32, all segment registers, with the possible exception of
+        * gs, are saved at kernel entry in pt_regs.
+        */
+#ifdef CONFIG_X86_32_LAZY_GS
+       u16 gs;
+#endif
        unsigned long cr0, cr2, cr3, cr4;
        u64 misc_enable;
        bool misc_enable_saved;
index 7306e911faee20694908bdf482166546776a7e92..a7af9f53c0cb773d05fd84ebe94525a1971e3ebb 100644 (file)
  */
 struct saved_context {
        struct pt_regs regs;
-       u16 ds, es, fs, gs, ss;
-       unsigned long gs_base, gs_kernel_base, fs_base;
+
+       /*
+        * User CS and SS are saved in current_pt_regs().  The rest of the
+        * segment selectors need to be saved and restored here.
+        */
+       u16 ds, es, fs, gs;
+
+       /*
+        * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
+        * so we save them separately.  We save the kernelmode GSBASE to
+        * restore percpu access after resume.
+        */
+       unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
+
        unsigned long cr0, cr2, cr3, cr4, cr8;
        u64 misc_enable;
        bool misc_enable_saved;
@@ -30,8 +42,7 @@ struct saved_context {
        u16 gdt_pad; /* Unused */
        struct desc_ptr gdt_desc;
        u16 idt_pad;
-       u16 idt_limit;
-       unsigned long idt_base;
+       struct desc_ptr idt;
        u16 ldt;
        u16 tss;
        unsigned long tr;
index 8c6bd6863db9d6b737cd0649324c154f9b9798a3..9b6df68d8fd1eba26f3651faa5c8b8f4dcf223f1 100644 (file)
@@ -79,10 +79,10 @@ do {                                                                        \
 static inline void refresh_sysenter_cs(struct thread_struct *thread)
 {
        /* Only happens when SEP is enabled, no need to test "SEP"arately: */
-       if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
+       if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
                return;
 
-       this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
+       this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
        wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
 }
 #endif
@@ -90,10 +90,12 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
 /* This is used when switching tasks or entering/exiting vm86 mode. */
 static inline void update_sp0(struct task_struct *task)
 {
+       /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
 #ifdef CONFIG_X86_32
        load_sp0(task->thread.sp0);
 #else
-       load_sp0(task_top_of_stack(task));
+       if (static_cpu_has(X86_FEATURE_XENPV))
+               load_sp0(task_top_of_stack(task));
 #endif
 }
 
index 70f425947dc50f3e99ca639c0ead0d7e1cce636d..00223333821a96616647a9cbb6fe729c4a18b7b6 100644 (file)
@@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack,
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
 #endif
 
 #endif
index 509046cfa5ce893357366348468a5c5ff8e86a09..877b5c1a1b1247116e20e7272dbade77e1874fc4 100644 (file)
@@ -173,40 +173,43 @@ static inline void cr4_init_shadow(void)
        this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 }
 
+static inline void __cr4_set(unsigned long cr4)
+{
+       lockdep_assert_irqs_disabled();
+       this_cpu_write(cpu_tlbstate.cr4, cr4);
+       __write_cr4(cr4);
+}
+
 /* Set in this cpu's CR4. */
 static inline void cr4_set_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 | mask) != cr4) {
-               cr4 |= mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 | mask) != cr4)
+               __cr4_set(cr4 | mask);
+       local_irq_restore(flags);
 }
 
 /* Clear in this cpu's CR4. */
 static inline void cr4_clear_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 & ~mask) != cr4) {
-               cr4 &= ~mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 & ~mask) != cr4)
+               __cr4_set(cr4 & ~mask);
+       local_irq_restore(flags);
 }
 
-static inline void cr4_toggle_bits(unsigned long mask)
+static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
 {
        unsigned long cr4;
 
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       cr4 ^= mask;
-       this_cpu_write(cpu_tlbstate.cr4, cr4);
-       __write_cr4(cr4);
+       __cr4_set(cr4 ^ mask);
 }
 
 /* Read the CR4 shadow. */
index 1fadd310ff680ece697fa65a8db410c380a8547e..31051f35cbb768e452c4f76a60c5415a45f572e7 100644 (file)
@@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
 dotraplinkage void do_stack_segment(struct pt_regs *, long);
 #ifdef CONFIG_X86_64
 dotraplinkage void do_double_fault(struct pt_regs *, long);
-asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
 #endif
 dotraplinkage void do_general_protection(struct pt_regs *, long);
 dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
index e9cc6fe1fc6f953c38ddcc61fcf06fd90d72ab04..c1688c2d0a128f063053697dc60bcbfbca509765 100644 (file)
@@ -7,6 +7,9 @@
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
+#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip))
+#define IRET_FRAME_SIZE   (sizeof(struct pt_regs) - IRET_FRAME_OFFSET)
+
 struct unwind_state {
        struct stack_info stack_info;
        unsigned long stack_mask;
@@ -52,6 +55,10 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
 }
 
 #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
+/*
+ * WARNING: The entire pt_regs may not be safe to dereference.  In some cases,
+ * only the iret frame registers are accessible.  Use with caution!
+ */
 static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
 {
        if (unwind_done(state))
index da1489cb64dce5fcec622f2869cd26d2b58d00ac..1e901e421f2db09d4a0c2c543f47b0e5bbc8be62 100644 (file)
@@ -1,6 +1,7 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += bpf_perf_event.h
 generated-y += unistd_32.h
 generated-y += unistd_64.h
 generated-y += unistd_x32.h
index ef9e02e614d0691ac0c5cdba8fb2d878a59e416e..f4c463df8b0886816c5b32a989396fe5e0f714f0 100644 (file)
@@ -342,13 +342,12 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e
 #ifdef CONFIG_X86_IO_APIC
 #define MP_ISA_BUS             0
 
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+                                               u8 trigger, u32 gsi);
+
 static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
                                          u32 gsi)
 {
-       int ioapic;
-       int pin;
-       struct mpc_intsrc mp_irq;
-
        /*
         * Check bus_irq boundary.
         */
@@ -357,14 +356,6 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
                return;
        }
 
-       /*
-        * Convert 'gsi' to 'ioapic.pin'.
-        */
-       ioapic = mp_find_ioapic(gsi);
-       if (ioapic < 0)
-               return;
-       pin = mp_find_ioapic_pin(ioapic, gsi);
-
        /*
         * TBD: This check is for faulty timer entries, where the override
         *      erroneously sets the trigger to level, resulting in a HUGE
@@ -373,16 +364,8 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
        if ((bus_irq == 0) && (trigger == 3))
                trigger = 1;
 
-       mp_irq.type = MP_INTSRC;
-       mp_irq.irqtype = mp_INT;
-       mp_irq.irqflag = (trigger << 2) | polarity;
-       mp_irq.srcbus = MP_ISA_BUS;
-       mp_irq.srcbusirq = bus_irq;     /* IRQ */
-       mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */
-       mp_irq.dstirq = pin;    /* INTIN# */
-
-       mp_save_irq(&mp_irq);
-
+       if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
+               return;
        /*
         * Reset default identity mapping if gsi is also an legacy IRQ,
         * otherwise there will be more than one entry with the same GSI
@@ -429,6 +412,34 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
        return 0;
 }
 
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+                                               u8 trigger, u32 gsi)
+{
+       struct mpc_intsrc mp_irq;
+       int ioapic, pin;
+
+       /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
+       ioapic = mp_find_ioapic(gsi);
+       if (ioapic < 0) {
+               pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
+               return ioapic;
+       }
+
+       pin = mp_find_ioapic_pin(ioapic, gsi);
+
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (trigger << 2) | polarity;
+       mp_irq.srcbus = MP_ISA_BUS;
+       mp_irq.srcbusirq = bus_irq;
+       mp_irq.dstapic = mpc_ioapic_id(ioapic);
+       mp_irq.dstirq = pin;
+
+       mp_save_irq(&mp_irq);
+
+       return 0;
+}
+
 static int __init
 acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
 {
@@ -473,7 +484,11 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
        if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
                polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 
-       mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+       if (bus_irq < NR_IRQS_LEGACY)
+               mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+       else
+               mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
+
        acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 
        /*
index a9e08924927ef6da6f4620942b3ceba654d1578d..a6fcaf16cdbf9b26a36378bf764fb955bf6ab861 100644 (file)
@@ -12,7 +12,6 @@ obj-y                         += hw_nmi.o
 
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
 obj-$(CONFIG_PCI_MSI)          += msi.o
-obj-$(CONFIG_HT_IRQ)           += htirq.o
 obj-$(CONFIG_SMP)              += ipi.o
 
 ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
deleted file mode 100644 (file)
index b07075d..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Support Hypertransport IRQ
- *
- * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
- *     Moved from arch/x86/kernel/apic/io_apic.c.
- * Jiang Liu <jiang.liu@linux.intel.com>
- *     Add support of hierarchical irqdomain
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/htirq.h>
-#include <asm/irqdomain.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/hypertransport.h>
-
-static struct irq_domain *htirq_domain;
-
-/*
- * Hypertransport interrupt support
- */
-static int
-ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
-       struct irq_data *parent = data->parent_data;
-       int ret;
-
-       ret = parent->chip->irq_set_affinity(parent, mask, force);
-       if (ret >= 0) {
-               struct ht_irq_msg msg;
-               struct irq_cfg *cfg = irqd_cfg(data);
-
-               fetch_ht_irq_msg(data->irq, &msg);
-               msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK |
-                                   HT_IRQ_LOW_DEST_ID_MASK);
-               msg.address_lo |= HT_IRQ_LOW_VECTOR(cfg->vector) |
-                                 HT_IRQ_LOW_DEST_ID(cfg->dest_apicid);
-               msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-               msg.address_hi |= HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
-               write_ht_irq_msg(data->irq, &msg);
-       }
-
-       return ret;
-}
-
-static struct irq_chip ht_irq_chip = {
-       .name                   = "PCI-HT",
-       .irq_mask               = mask_ht_irq,
-       .irq_unmask             = unmask_ht_irq,
-       .irq_ack                = irq_chip_ack_parent,
-       .irq_set_affinity       = ht_set_affinity,
-       .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
-                             unsigned int nr_irqs, void *arg)
-{
-       struct ht_irq_cfg *ht_cfg;
-       struct irq_alloc_info *info = arg;
-       struct pci_dev *dev;
-       irq_hw_number_t hwirq;
-       int ret;
-
-       if (nr_irqs > 1 || !info)
-               return -EINVAL;
-
-       dev = info->ht_dev;
-       hwirq = (info->ht_idx & 0xFF) |
-               PCI_DEVID(dev->bus->number, dev->devfn) << 8 |
-               (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 24;
-       if (irq_find_mapping(domain, hwirq) > 0)
-               return -EEXIST;
-
-       ht_cfg = kmalloc(sizeof(*ht_cfg), GFP_KERNEL);
-       if (!ht_cfg)
-               return -ENOMEM;
-
-       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
-       if (ret < 0) {
-               kfree(ht_cfg);
-               return ret;
-       }
-
-       /* Initialize msg to a value that will never match the first write. */
-       ht_cfg->msg.address_lo = 0xffffffff;
-       ht_cfg->msg.address_hi = 0xffffffff;
-       ht_cfg->dev = info->ht_dev;
-       ht_cfg->update = info->ht_update;
-       ht_cfg->pos = info->ht_pos;
-       ht_cfg->idx = 0x10 + (info->ht_idx * 2);
-       irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg,
-                           handle_edge_irq, ht_cfg, "edge");
-
-       return 0;
-}
-
-static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
-                             unsigned int nr_irqs)
-{
-       struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
-
-       BUG_ON(nr_irqs != 1);
-       kfree(irq_data->chip_data);
-       irq_domain_free_irqs_top(domain, virq, nr_irqs);
-}
-
-static int htirq_domain_activate(struct irq_domain *domain,
-                                struct irq_data *irq_data, bool early)
-{
-       struct ht_irq_msg msg;
-       struct irq_cfg *cfg = irqd_cfg(irq_data);
-
-       msg.address_hi = HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
-       msg.address_lo =
-               HT_IRQ_LOW_BASE |
-               HT_IRQ_LOW_DEST_ID(cfg->dest_apicid) |
-               HT_IRQ_LOW_VECTOR(cfg->vector) |
-               ((apic->irq_dest_mode == 0) ?
-                       HT_IRQ_LOW_DM_PHYSICAL :
-                       HT_IRQ_LOW_DM_LOGICAL) |
-               HT_IRQ_LOW_RQEOI_EDGE |
-               ((apic->irq_delivery_mode != dest_LowestPrio) ?
-                       HT_IRQ_LOW_MT_FIXED :
-                       HT_IRQ_LOW_MT_ARBITRATED) |
-               HT_IRQ_LOW_IRQ_MASKED;
-       write_ht_irq_msg(irq_data->irq, &msg);
-       return 0;
-}
-
-static void htirq_domain_deactivate(struct irq_domain *domain,
-                                   struct irq_data *irq_data)
-{
-       struct ht_irq_msg msg;
-
-       memset(&msg, 0, sizeof(msg));
-       write_ht_irq_msg(irq_data->irq, &msg);
-}
-
-static const struct irq_domain_ops htirq_domain_ops = {
-       .alloc          = htirq_domain_alloc,
-       .free           = htirq_domain_free,
-       .activate       = htirq_domain_activate,
-       .deactivate     = htirq_domain_deactivate,
-};
-
-void __init arch_init_htirq_domain(struct irq_domain *parent)
-{
-       struct fwnode_handle *fn;
-
-       if (disable_apic)
-               return;
-
-       fn = irq_domain_alloc_named_fwnode("PCI-HT");
-       if (!fn)
-               goto warn;
-
-       htirq_domain = irq_domain_create_tree(fn, &htirq_domain_ops, NULL);
-       irq_domain_free_fwnode(fn);
-       if (!htirq_domain)
-               goto warn;
-
-       htirq_domain->parent = parent;
-       return;
-
-warn:
-       pr_warn("Failed to initialize irqdomain for HTIRQ.\n");
-}
-
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
-                     ht_irq_update_t *update)
-{
-       struct irq_alloc_info info;
-
-       if (!htirq_domain)
-               return -ENOSYS;
-
-       init_irq_alloc_info(&info, NULL);
-       info.ht_idx = idx;
-       info.ht_pos = pos;
-       info.ht_dev = dev;
-       info.ht_update = update;
-
-       return irq_domain_alloc_irqs(htirq_domain, 1, dev_to_node(&dev->dev),
-                                    &info);
-}
-
-void arch_teardown_ht_irq(unsigned int irq)
-{
-       irq_domain_free_irqs(irq, 1);
-}
index 05c85e693a5d4c9d9e17da19337f2318f29aa621..750449152b04b4feed3c98ddc772db91f003f818 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
+ * Local APIC related interfaces to support IOAPIC, MSI, etc.
  *
  * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  *     Moved from arch/x86/kernel/apic/io_apic.c.
@@ -542,8 +542,8 @@ error:
 }
 
 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
-void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
-                          struct irq_data *irqd, int ind)
+static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
+                                 struct irq_data *irqd, int ind)
 {
        unsigned int cpu, vector, prev_cpu, prev_vector;
        struct apic_chip_data *apicd;
@@ -601,7 +601,7 @@ int __init arch_probe_nr_irqs(void)
                nr_irqs = NR_VECTORS * nr_cpu_ids;
 
        nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+#if defined(CONFIG_PCI_MSI)
        /*
         * for MSI and HT dyn irq
         */
@@ -663,7 +663,6 @@ int __init arch_early_irq_init(void)
        irq_set_default_host(x86_vector_domain);
 
        arch_init_msi_domain(x86_vector_domain);
-       arch_init_htirq_domain(x86_vector_domain);
 
        BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
 
index 8ea78275480dafeb702e11ba73364cd9e7c52f21..cd360a5e0dca30f2f1ad052b197606e55f701db0 100644 (file)
@@ -93,4 +93,10 @@ void common(void) {
 
        BLANK();
        DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+
+       /* Layout info for cpu_entry_area */
+       OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
+       OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
+       OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page);
+       DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
 }
index dedf428b20b68b0a4748fc1ac3032193c9121362..7d20d9c0b3d69cfaa717233a868218fe9d2cb694 100644 (file)
@@ -47,13 +47,8 @@ void foo(void)
        BLANK();
 
        /* Offset from the sysenter stack to tss.sp0 */
-       DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
-              offsetofend(struct tss_struct, SYSENTER_stack));
-
-       /* Offset from cpu_tss to SYSENTER_stack */
-       OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
-       /* Size of SYSENTER_stack */
-       DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
+       DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+              offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack));
 
 #ifdef CONFIG_CC_STACKPROTECTOR
        BLANK();
index 630212fa9b9da3f0498fc30d4c193c5926c43abb..bf51e51d808dd8914abd3b4bca69b37ce3ec023b 100644 (file)
@@ -23,6 +23,9 @@ int main(void)
 #ifdef CONFIG_PARAVIRT
        OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
        OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+#ifdef CONFIG_DEBUG_ENTRY
+       OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl);
+#endif
        BLANK();
 #endif
 
@@ -63,6 +66,7 @@ int main(void)
 
        OFFSET(TSS_ist, tss_struct, x86_tss.ist);
        OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+       OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        BLANK();
 
 #ifdef CONFIG_CC_STACKPROTECTOR
index d58184b7cd4438144e2d0ac3f4744d19ff4ffb31..bcb75dc97d44075d2eecb3137b91f934072352b0 100644 (file)
@@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x17: init_amd_zn(c); break;
        }
 
-       /* Enable workaround for FXSAVE leak */
-       if (c->x86 >= 6)
+       /*
+        * Enable workaround for FXSAVE leak on CPUs
+        * without a XSaveErPtr feature
+        */
+       if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
                set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 
        cpu_detect_cache_sizes(c);
index 13ae9e5eec2f5c8e40f89f2ecd077fc852d8d32c..7416da3ec4dfa0b0f275dd10a5f9bfa12b884022 100644 (file)
@@ -341,6 +341,8 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c)
 
        cr4_set_bits(X86_CR4_UMIP);
 
+       pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
+
        return;
 
 out:
@@ -474,8 +476,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
        return NULL;            /* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS];
-__u32 cpu_caps_set[NCAPINTS];
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
 
 void load_percpu_segment(int cpu)
 {
@@ -488,27 +490,116 @@ void load_percpu_segment(int cpu)
        load_stack_canary_segment();
 }
 
-/* Setup the fixmap mapping only once per-processor */
-static inline void setup_fixmap_gdt(int cpu)
+#ifdef CONFIG_X86_32
+/* The 32-bit entry code needs to find cpu_entry_area. */
+DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+#endif
+
+#ifdef CONFIG_X86_64
+/*
+ * Special IST stacks which the CPU switches to when it calls
+ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+ * limit), all of them are 4K, except the debug stack which
+ * is 8K.
+ */
+static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
+         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
+};
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+#endif
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page,
+                                  SYSENTER_stack_storage);
+
+static void __init
+set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
+{
+       for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
+               __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
+}
+
+/* Setup the fixmap mappings only once per-processor */
+static void __init setup_cpu_entry_area(int cpu)
 {
 #ifdef CONFIG_X86_64
-       /* On 64-bit systems, we use a read-only fixmap GDT. */
-       pgprot_t prot = PAGE_KERNEL_RO;
+       extern char _entry_trampoline[];
+
+       /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
+       pgprot_t gdt_prot = PAGE_KERNEL_RO;
+       pgprot_t tss_prot = PAGE_KERNEL_RO;
 #else
        /*
         * On native 32-bit systems, the GDT cannot be read-only because
         * our double fault handler uses a task gate, and entering through
-        * a task gate needs to change an available TSS to busy.  If the GDT
-        * is read-only, that will triple fault.
+        * a task gate needs to change an available TSS to busy.  If the
+        * GDT is read-only, that will triple fault.  The TSS cannot be
+        * read-only because the CPU writes to it on task switches.
         *
-        * On Xen PV, the GDT must be read-only because the hypervisor requires
-        * it.
+        * On Xen PV, the GDT must be read-only because the hypervisor
+        * requires it.
         */
-       pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ?
+       pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
                PAGE_KERNEL_RO : PAGE_KERNEL;
+       pgprot_t tss_prot = PAGE_KERNEL;
+#endif
+
+       __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page),
+                               per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1,
+                               PAGE_KERNEL);
+
+       /*
+        * The Intel SDM says (Volume 3, 7.2.1):
+        *
+        *  Avoid placing a page boundary in the part of the TSS that the
+        *  processor reads during a task switch (the first 104 bytes). The
+        *  processor may not correctly perform address translations if a
+        *  boundary occurs in this area. During a task switch, the processor
+        *  reads and writes into the first 104 bytes of each TSS (using
+        *  contiguous physical addresses beginning with the physical address
+        *  of the first byte of the TSS). So, after TSS access begins, if
+        *  part of the 104 bytes is not physically contiguous, the processor
+        *  will access incorrect information without generating a page-fault
+        *  exception.
+        *
+        * There are also a lot of errata involving the TSS spanning a page
+        * boundary.  Assert that we're not doing that.
+        */
+       BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
+                     offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
+       BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
+                               &per_cpu(cpu_tss_rw, cpu),
+                               sizeof(struct tss_struct) / PAGE_SIZE,
+                               tss_prot);
+
+#ifdef CONFIG_X86_32
+       per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+#endif
+
+#ifdef CONFIG_X86_64
+       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+       BUILD_BUG_ON(sizeof(exception_stacks) !=
+                    sizeof(((struct cpu_entry_area *)0)->exception_stacks));
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
+                               &per_cpu(exception_stacks, cpu),
+                               sizeof(exception_stacks) / PAGE_SIZE,
+                               PAGE_KERNEL);
+
+       __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
+                    __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
 #endif
+}
+
+void __init setup_cpu_entry_areas(void)
+{
+       unsigned int cpu;
 
-       __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot);
+       for_each_possible_cpu(cpu)
+               setup_cpu_entry_area(cpu);
 }
 
 /* Load the original GDT from the per-cpu structure */
@@ -745,7 +836,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
 {
        int i;
 
-       for (i = 0; i < NCAPINTS; i++) {
+       for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
                c->x86_capability[i] &= ~cpu_caps_cleared[i];
                c->x86_capability[i] |= cpu_caps_set[i];
        }
@@ -1248,7 +1339,7 @@ void enable_sep_cpu(void)
                return;
 
        cpu = get_cpu();
-       tss = &per_cpu(cpu_tss, cpu);
+       tss = &per_cpu(cpu_tss_rw, cpu);
 
        /*
         * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
@@ -1257,11 +1348,7 @@ void enable_sep_cpu(void)
 
        tss->x86_tss.ss1 = __KERNEL_CS;
        wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
-
-       wrmsr(MSR_IA32_SYSENTER_ESP,
-             (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
-             0);
-
+       wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1), 0);
        wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
 
        put_cpu();
@@ -1355,25 +1442,19 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
-         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
-         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
-};
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
-       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-
 /* May not be marked __init: used by software suspend */
 void syscall_init(void)
 {
+       extern char _entry_trampoline[];
+       extern char entry_SYSCALL_64_trampoline[];
+
+       int cpu = smp_processor_id();
+       unsigned long SYSCALL64_entry_trampoline =
+               (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
+               (entry_SYSCALL_64_trampoline - _entry_trampoline);
+
        wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
-       wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+       wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
 
 #ifdef CONFIG_IA32_EMULATION
        wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
@@ -1384,7 +1465,7 @@ void syscall_init(void)
         * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
         */
        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1));
        wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
 #else
        wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
@@ -1528,7 +1609,7 @@ void cpu_init(void)
        if (cpu)
                load_ucode_ap();
 
-       t = &per_cpu(cpu_tss, cpu);
+       t = &per_cpu(cpu_tss_rw, cpu);
        oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1567,7 +1648,7 @@ void cpu_init(void)
         * set up and load the per-CPU TSS
         */
        if (!oist->ist[0]) {
-               char *estacks = per_cpu(exception_stacks, cpu);
+               char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
 
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
                        estacks += exception_stack_sizes[v];
@@ -1578,7 +1659,7 @@ void cpu_init(void)
                }
        }
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
        /*
         * <= is required because the CPU will access up to
@@ -1594,11 +1675,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, me);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_SYSENTER_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);
 
@@ -1610,7 +1692,6 @@ void cpu_init(void)
        if (is_uv_system())
                uv_cpu_init();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 
@@ -1620,7 +1701,7 @@ void cpu_init(void)
 {
        int cpu = smp_processor_id();
        struct task_struct *curr = current;
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
 
        wait_for_master_cpu(cpu);
 
@@ -1655,12 +1736,12 @@ void cpu_init(void)
         * Initialize the TSS.  Don't bother initializing sp0, as the initial
         * task never enters user mode.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
 
        load_mm_ldt(&init_mm);
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
 #ifdef CONFIG_DOUBLEFAULT
        /* Set up doublefault TSS pointer in the GDT */
@@ -1672,7 +1753,6 @@ void cpu_init(void)
 
        fpu__init_cpu();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 #endif
index c6daec4bdba5b180e45c5f78019fcba7b2880428..330b8462d426faad0dccdc480f34eec34cd8b92f 100644 (file)
@@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
+#define F17H_MPB_MAX_SIZE 3200
 
        switch (family) {
        case 0x14:
@@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        case 0x16:
                max_size = F16H_MPB_MAX_SIZE;
                break;
+       case 0x17:
+               max_size = F17H_MPB_MAX_SIZE;
+               break;
        default:
                max_size = F1XH_MPB_MAX_SIZE;
                break;
index 0e662c55ae902fedd5c78c1ed87a972b35a79856..0b8cedb20d6d92f2875a49292680c8cfecd5b044 100644 (file)
@@ -50,25 +50,23 @@ static void doublefault_fn(void)
                cpu_relax();
 }
 
-struct tss_struct doublefault_tss __cacheline_aligned = {
-       .x86_tss = {
-               .sp0            = STACK_START,
-               .ss0            = __KERNEL_DS,
-               .ldt            = 0,
-               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
-
-               .ip             = (unsigned long) doublefault_fn,
-               /* 0x2 bit is always set */
-               .flags          = X86_EFLAGS_SF | 0x2,
-               .sp             = STACK_START,
-               .es             = __USER_DS,
-               .cs             = __KERNEL_CS,
-               .ss             = __KERNEL_DS,
-               .ds             = __USER_DS,
-               .fs             = __KERNEL_PERCPU,
-
-               .__cr3          = __pa_nodebug(swapper_pg_dir),
-       }
+struct x86_hw_tss doublefault_tss __cacheline_aligned = {
+       .sp0            = STACK_START,
+       .ss0            = __KERNEL_DS,
+       .ldt            = 0,
+       .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+
+       .ip             = (unsigned long) doublefault_fn,
+       /* 0x2 bit is always set */
+       .flags          = X86_EFLAGS_SF | 0x2,
+       .sp             = STACK_START,
+       .es             = __USER_DS,
+       .cs             = __KERNEL_CS,
+       .ss             = __KERNEL_DS,
+       .ds             = __USER_DS,
+       .fs             = __KERNEL_PERCPU,
+
+       .__cr3          = __pa_nodebug(swapper_pg_dir),
 };
 
 /* dummy for do_double_fault() call */
index f13b4c00a5de4b7a7b36c40d27311672bcc9d05c..bbd6d986e2d0fc22b5b3c23c794ade410b9f9973 100644 (file)
@@ -43,6 +43,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
        return true;
 }
 
+bool in_sysenter_stack(unsigned long *stack, struct stack_info *info)
+{
+       struct SYSENTER_stack *ss = cpu_SYSENTER_stack(smp_processor_id());
+
+       void *begin = ss;
+       void *end = ss + 1;
+
+       if ((void *)stack < begin || (void *)stack >= end)
+               return false;
+
+       info->type      = STACK_TYPE_SYSENTER;
+       info->begin     = begin;
+       info->end       = end;
+       info->next_sp   = NULL;
+
+       return true;
+}
+
 static void printk_stack_address(unsigned long address, int reliable,
                                 char *log_lvl)
 {
@@ -50,6 +68,28 @@ static void printk_stack_address(unsigned long address, int reliable,
        printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
 }
 
+void show_iret_regs(struct pt_regs *regs)
+{
+       printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
+       printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
+               regs->sp, regs->flags);
+}
+
+static void show_regs_safe(struct stack_info *info, struct pt_regs *regs)
+{
+       if (on_stack(info, regs, sizeof(*regs)))
+               __show_regs(regs, 0);
+       else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
+                         IRET_FRAME_SIZE)) {
+               /*
+                * When an interrupt or exception occurs in entry code, the
+                * full pt_regs might not have been saved yet.  In that case
+                * just print the iret frame.
+                */
+               show_iret_regs(regs);
+       }
+}
+
 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
                        unsigned long *stack, char *log_lvl)
 {
@@ -71,31 +111,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
         * - task stack
         * - interrupt stack
         * - HW exception stacks (double fault, nmi, debug, mce)
+        * - SYSENTER stack
         *
-        * x86-32 can have up to three stacks:
+        * x86-32 can have up to four stacks:
         * - task stack
         * - softirq stack
         * - hardirq stack
+        * - SYSENTER stack
         */
        for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
                const char *stack_name;
 
-               /*
-                * If we overflowed the task stack into a guard page, jump back
-                * to the bottom of the usable stack.
-                */
-               if (task_stack_page(task) - (void *)stack < PAGE_SIZE)
-                       stack = task_stack_page(task);
-
-               if (get_stack_info(stack, task, &stack_info, &visit_mask))
-                       break;
+               if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
+                       /*
+                        * We weren't on a valid stack.  It's possible that
+                        * we overflowed a valid stack into a guard page.
+                        * See if the next page up is valid so that we can
+                        * generate some kind of backtrace if this happens.
+                        */
+                       stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
+                       if (get_stack_info(stack, task, &stack_info, &visit_mask))
+                               break;
+               }
 
                stack_name = stack_type_name(stack_info.type);
                if (stack_name)
                        printk("%s <%s>\n", log_lvl, stack_name);
 
-               if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                       __show_regs(regs, 0);
+               if (regs)
+                       show_regs_safe(&stack_info, regs);
 
                /*
                 * Scan the stack, printing any text addresses we find.  At the
@@ -119,7 +163,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
                        /*
                         * Don't print regs->ip again if it was already printed
-                        * by __show_regs() below.
+                        * by show_regs_safe() below.
                         */
                        if (regs && stack == &regs->ip)
                                goto next;
@@ -155,8 +199,8 @@ next:
 
                        /* if the frame has entry regs, print them */
                        regs = unwind_get_entry_regs(&state);
-                       if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                               __show_regs(regs, 0);
+                       if (regs)
+                               show_regs_safe(&stack_info, regs);
                }
 
                if (stack_name)
index daefae83a3aa86c59602b75bd3e6734c6e3b1030..5ff13a6b368069f68505099ce94267b8bf0f45b9 100644 (file)
@@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_SOFTIRQ)
                return "SOFTIRQ";
 
+       if (type == STACK_TYPE_SYSENTER)
+               return "SYSENTER";
+
        return NULL;
 }
 
@@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (task != current)
                goto unknown;
 
+       if (in_sysenter_stack(stack, info))
+               goto recursion_check;
+
        if (in_hardirq_stack(stack, info))
                goto recursion_check;
 
index 88ce2ffdb110303502ad33e64d357d8af5afd8c6..abc828f8c29785b4fae8398ec19775015447ee22 100644 (file)
@@ -37,6 +37,9 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_IRQ)
                return "IRQ";
 
+       if (type == STACK_TYPE_SYSENTER)
+               return "SYSENTER";
+
        if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
                return exception_stack_names[type - STACK_TYPE_EXCEPTION];
 
@@ -115,6 +118,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (in_irq_stack(stack, info))
                goto recursion_check;
 
+       if (in_sysenter_stack(stack, info))
+               goto recursion_check;
+
        goto unknown;
 
 recursion_check:
index 3feb648781c470a7a49ee26749712ba7da891fe9..2f723301eb58fc5ad0d6796b342446ae2ee0c9e6 100644 (file)
@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
         * because the ->io_bitmap_max value must match the bitmap
         * contents:
         */
-       tss = &per_cpu(cpu_tss, get_cpu());
+       tss = &per_cpu(cpu_tss_rw, get_cpu());
 
        if (turn_on)
                bitmap_clear(t->io_bitmap_ptr, from, num);
index 49cfd9fe7589fa5ef2bef5d4a5d6431b7007836f..68e1867cca8045d0ed728ffc6b75a866c25484ed 100644 (file)
@@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        /* high bit used in ret_from_ code  */
        unsigned vector = ~regs->orig_ax;
 
-       /*
-        * NB: Unlike exception entries, IRQ entries do not reliably
-        * handle context tracking in the low-level entry code.  This is
-        * because syscall entries execute briefly with IRQs on before
-        * updating context tracking state, so we can take an IRQ from
-        * kernel mode with CONTEXT_USER.  The low-level entry code only
-        * updates the context if we came from user mode, so we won't
-        * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
-        * code is cleaned up enough that we can cleanly defer enabling
-        * IRQs.
-        */
-
        entering_irq();
 
        /* entering_irq() tells RCU that we're not quiescent.  Check it. */
index 020efbf5786b35d343a8632cd14ac4f800465d9b..d86e344f5b3debfed504b72a7c0f83f36fe16387 100644 (file)
@@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
        if (regs->sp >= estack_top && regs->sp <= estack_bottom)
                return;
 
-       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
+       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n",
                current->comm, curbase, regs->sp,
                irq_stack_top, irq_stack_bottom,
-               estack_top, estack_bottom);
+               estack_top, estack_bottom, (void *)regs->ip);
 
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
index 410c5dadcee31930f8c30ddaeb1b4c940835a081..3a4b12809ab5f810f5a8657bfcc3f7521450e645 100644 (file)
@@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
 }
 
 static unsigned long mpf_base;
+static bool mpf_found;
 
 static unsigned long __init get_mpc_size(unsigned long physptr)
 {
@@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early)
        if (!smp_found_config)
                return;
 
-       if (!mpf_base)
+       if (!mpf_found)
                return;
 
        if (acpi_lapic && early)
@@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
                        smp_found_config = 1;
 #endif
                        mpf_base = base;
+                       mpf_found = true;
 
                        pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
                                base, base + sizeof(*mpf) - 1, mpf);
@@ -858,7 +860,7 @@ static int __init update_mp_table(void)
        if (!enable_update_mptable)
                return 0;
 
-       if (!mpf_base)
+       if (!mpf_found)
                return 0;
 
        mpf = early_memremap(mpf_base, sizeof(*mpf));
index ac0be8283325edfdc2752f862b4c0cef208a931c..9edadabf04f66c657f8a29bb56fe994b2559d5cf 100644 (file)
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_mmu_ops, read_cr2);
                PATCH_SITE(pv_mmu_ops, read_cr3);
                PATCH_SITE(pv_mmu_ops, write_cr3);
-               PATCH_SITE(pv_mmu_ops, flush_tlb_single);
                PATCH_SITE(pv_cpu_ops, wbinvd);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
                case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
index 97fb3e5737f5d0b5d50f8d9232726923c2692e65..aed9d94bd46f41bb049b8e0153a44a43d97e80b4 100644 (file)
@@ -47,7 +47,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
        .x86_tss = {
                /*
                 * .sp0 is only used when entering ring 0 from a lower
@@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
                 * Poison it.
                 */
                .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+               /*
+                * .sp1 is cpu_current_top_of_stack.  The init task never
+                * runs user code, but cpu_current_top_of_stack should still
+                * be well defined before the first context switch.
+                */
+               .sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
@@ -71,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
          */
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
-       .SYSENTER_stack_canary  = STACK_END_MAGIC,
-#endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -104,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+               struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
@@ -299,7 +306,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
        }
 
        if ((tifp ^ tifn) & _TIF_NOTSC)
-               cr4_toggle_bits(X86_CR4_TSD);
+               cr4_toggle_bits_irqsoff(X86_CR4_TSD);
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
index 45bf0c5f93e15103060d67d5245756ab72ce8fe5..5224c609918416337b97440eb2d515d8052463ae 100644 (file)
@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
index eeeb34f85c250e8c01188b6d32cf5a62bd1af8a0..c754662320163107ca3a254362ce0e404a8d3c11 100644 (file)
@@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all)
        unsigned int fsindex, gsindex;
        unsigned int ds, cs, es;
 
-       printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip);
-       printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
-               regs->sp, regs->flags);
+       show_iret_regs(regs);
+
        if (regs->orig_ax != -1)
                pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
        else
@@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
+       if (!all)
+               return;
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
@@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all)
        rdmsrl(MSR_GS_BASE, gs);
        rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 
-       if (!all)
-               return;
-
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = __read_cr3();
@@ -400,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
                     this_cpu_read(irq_count) != -1);
@@ -462,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch the PDA and FPU contexts.
         */
        this_cpu_write(current_task, next_p);
+       this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
        /* Reload sp0. */
        update_sp0(next_p);
index 5f59e6bee123ffb324ec12a02d1921a7029ddc96..35cb20994e32d2bf05f0b1510ccc26cc7e7590a5 100644 (file)
@@ -101,15 +101,12 @@ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
 /* Logical package management. We might want to allocate that dynamically */
-static int *physical_to_logical_pkg __read_mostly;
-static unsigned long *physical_package_map __read_mostly;;
-static unsigned int max_physical_pkg_id __read_mostly;
 unsigned int __max_logical_packages __read_mostly;
 EXPORT_SYMBOL(__max_logical_packages);
 static unsigned int logical_packages __read_mostly;
 
 /* Maximum number of SMT threads on any online core */
-int __max_smt_threads __read_mostly;
+int __read_mostly __max_smt_threads = 1;
 
 /* Flag to indicate if a complete sched domain rebuild is required */
 bool x86_topology_update;
@@ -240,7 +237,7 @@ static void notrace start_secondary(void *unused)
        load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 #endif
-
+       load_current_idt();
        cpu_init();
        x86_cpuinit.early_percpu_clock_init();
        preempt_disable();
@@ -280,6 +277,25 @@ static void notrace start_secondary(void *unused)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
+/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+ */
+int topology_phys_to_logical_pkg(unsigned int phys_pkg)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+               if (c->initialized && c->phys_proc_id == phys_pkg)
+                       return c->logical_proc_id;
+       }
+       return -1;
+}
+EXPORT_SYMBOL(topology_phys_to_logical_pkg);
+
 /**
  * topology_update_package_map - Update the physical to logical package map
  * @pkg:       The physical package id as retrieved via CPUID
@@ -287,102 +303,23 @@ static void notrace start_secondary(void *unused)
  */
 int topology_update_package_map(unsigned int pkg, unsigned int cpu)
 {
-       unsigned int new;
-
-       /* Called from early boot ? */
-       if (!physical_package_map)
-               return 0;
-
-       if (pkg >= max_physical_pkg_id)
-               return -EINVAL;
+       int new;
 
-       /* Set the logical package id */
-       if (test_and_set_bit(pkg, physical_package_map))
+       /* Already available somewhere? */
+       new = topology_phys_to_logical_pkg(pkg);
+       if (new >= 0)
                goto found;
 
-       if (logical_packages >= __max_logical_packages) {
-               pr_warn("Package %u of CPU %u exceeds BIOS package data %u.\n",
-                       logical_packages, cpu, __max_logical_packages);
-               return -ENOSPC;
-       }
-
        new = logical_packages++;
        if (new != pkg) {
                pr_info("CPU %u Converting physical %u to logical package %u\n",
                        cpu, pkg, new);
        }
-       physical_to_logical_pkg[pkg] = new;
-
 found:
-       cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
+       cpu_data(cpu).logical_proc_id = new;
        return 0;
 }
 
-/**
- * topology_phys_to_logical_pkg - Map a physical package id to a logical
- *
- * Returns logical package id or -1 if not found
- */
-int topology_phys_to_logical_pkg(unsigned int phys_pkg)
-{
-       if (phys_pkg >= max_physical_pkg_id)
-               return -1;
-       return physical_to_logical_pkg[phys_pkg];
-}
-EXPORT_SYMBOL(topology_phys_to_logical_pkg);
-
-static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
-{
-       unsigned int ncpus;
-       size_t size;
-
-       /*
-        * Today neither Intel nor AMD support heterogenous systems. That
-        * might change in the future....
-        *
-        * While ideally we'd want '* smp_num_siblings' in the below @ncpus
-        * computation, this won't actually work since some Intel BIOSes
-        * report inconsistent HT data when they disable HT.
-        *
-        * In particular, they reduce the APIC-IDs to only include the cores,
-        * but leave the CPUID topology to say there are (2) siblings.
-        * This means we don't know how many threads there will be until
-        * after the APIC enumeration.
-        *
-        * By not including this we'll sometimes over-estimate the number of
-        * logical packages by the amount of !present siblings, but this is
-        * still better than MAX_LOCAL_APIC.
-        *
-        * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited
-        * on the command line leading to a similar issue as the HT disable
-        * problem because the hyperthreads are usually enumerated after the
-        * primary cores.
-        */
-       ncpus = boot_cpu_data.x86_max_cores;
-       if (!ncpus) {
-               pr_warn("x86_max_cores == zero !?!?");
-               ncpus = 1;
-       }
-
-       __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
-       logical_packages = 0;
-
-       /*
-        * Possibly larger than what we need as the number of apic ids per
-        * package can be smaller than the actual used apic ids.
-        */
-       max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
-       size = max_physical_pkg_id * sizeof(unsigned int);
-       physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
-       memset(physical_to_logical_pkg, 0xff, size);
-       size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
-       physical_package_map = kzalloc(size, GFP_KERNEL);
-
-       pr_info("Max logical packages: %u\n", __max_logical_packages);
-
-       topology_update_package_map(c->phys_proc_id, cpu);
-}
-
 void __init smp_store_boot_cpu_info(void)
 {
        int id = 0; /* CPU 0 */
@@ -390,7 +327,8 @@ void __init smp_store_boot_cpu_info(void)
 
        *c = boot_cpu_data;
        c->cpu_index = id;
-       smp_init_package_map(c, id);
+       topology_update_package_map(c->phys_proc_id, id);
+       c->initialized = true;
 }
 
 /*
@@ -401,13 +339,16 @@ void smp_store_cpu_info(int id)
 {
        struct cpuinfo_x86 *c = &cpu_data(id);
 
-       *c = boot_cpu_data;
+       /* Copy boot_cpu_data only on the first bringup */
+       if (!c->initialized)
+               *c = boot_cpu_data;
        c->cpu_index = id;
        /*
         * During boot time, CPU0 has this setup already. Save the info when
         * bringing up AP or offlined CPU0.
         */
        identify_secondary_cpu(c);
+       c->initialized = true;
 }
 
 static bool
@@ -1356,7 +1297,16 @@ void __init native_smp_prepare_boot_cpu(void)
 
 void __init native_smp_cpus_done(unsigned int max_cpus)
 {
+       int ncpus;
+
        pr_debug("Boot done\n");
+       /*
+        * Today neither Intel nor AMD support heterogenous systems so
+        * extrapolate the boot cpu's data to all packages.
+        */
+       ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
+       __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+       pr_info("Max logical packages: %u\n", __max_logical_packages);
 
        if (x86_has_numa_in_package)
                set_sched_topology(x86_numa_in_package_topology);
index a63fe77b32179662353ba51f31d8097980d6fa74..676774b9bb8d1300837c9771782ae6f28a4d8bca 100644 (file)
@@ -188,6 +188,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       /* No address checking. See comment at mmap_address_hint_valid() */
        if (flags & MAP_FIXED)
                return addr;
 
@@ -197,12 +198,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
        /* requesting a specific address */
        if (addr) {
-               addr = PAGE_ALIGN(addr);
+               addr &= PAGE_MASK;
+               if (!mmap_address_hint_valid(addr, len))
+                       goto get_unmapped_area;
+
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+               if (!vma || addr + len <= vm_start_gap(vma))
                        return addr;
        }
+get_unmapped_area:
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
index 989514c94a55d8fa93a07192edd199be1a607bf8..e98f8b66a460b98b31d262cff23fa063be33ac5a 100644 (file)
@@ -348,9 +348,15 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
        /*
         * If IRET takes a non-IST fault on the espfix64 stack, then we
-        * end up promoting it to a doublefault.  In that case, modify
-        * the stack to make it look like we just entered the #GP
-        * handler from user space, similar to bad_iret.
+        * end up promoting it to a doublefault.  In that case, take
+        * advantage of the fact that we're not using the normal (TSS.sp0)
+        * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
+        * and then modify our own IRET frame so that, when we return,
+        * we land directly at the #GP(0) vector with the stack already
+        * set up according to its expectations.
+        *
+        * The net result is that our #GP handler will think that we
+        * entered from usermode with the bad user context.
         *
         * No need for ist_enter here because we don't use RCU.
         */
@@ -358,13 +364,26 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
                regs->cs == __KERNEL_CS &&
                regs->ip == (unsigned long)native_irq_return_iret)
        {
-               struct pt_regs *normal_regs = task_pt_regs(current);
+               struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
-               /* Fake a #GP(0) from userspace. */
-               memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
-               normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
+               /*
+                * regs->sp points to the failing IRET frame on the
+                * ESPFIX64 stack.  Copy it to the entry stack.  This fills
+                * in gpregs->ss through gpregs->ip.
+                *
+                */
+               memmove(&gpregs->ip, (void *)regs->sp, 5*8);
+               gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
+
+               /*
+                * Adjust our frame so that we return straight to the #GP
+                * vector with the expected RSP value.  This is safe because
+                * we won't enable interupts or schedule before we invoke
+                * general_protection, so nothing will clobber the stack
+                * frame we just set up.
+                */
                regs->ip = (unsigned long)general_protection;
-               regs->sp = (unsigned long)&normal_regs->orig_ax;
+               regs->sp = (unsigned long)&gpregs->orig_ax;
 
                return;
        }
@@ -389,7 +408,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
         *
         *   Processors update CR2 whenever a page fault is detected. If a
         *   second page fault occurs while an earlier page fault is being
-        *   delivered, the faulting linear address of the second fault will
+        *   delivered, the faulting linear address of the second fault will
         *   overwrite the contents of CR2 (replacing the previous
         *   address). These updates to CR2 occur even if the page fault
         *   results in a double fault or occurs during the delivery of a
@@ -605,14 +624,15 @@ NOKPROBE_SYMBOL(do_int3);
 
 #ifdef CONFIG_X86_64
 /*
- * Help handler running on IST stack to switch off the IST stack if the
- * interrupted code was in user mode. The actual stack switch is done in
- * entry_64.S
+ * Help handler running on a per-cpu (IST or entry trampoline) stack
+ * to switch to the normal thread stack if the interrupted code was in
+ * user mode. The actual stack switch is done in entry_64.S
  */
 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
-       struct pt_regs *regs = task_pt_regs(current);
-       *regs = *eregs;
+       struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
+       if (regs != eregs)
+               *regs = *eregs;
        return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
@@ -628,13 +648,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
        /*
         * This is called from entry_64.S early in handling a fault
         * caused by a bad iret to user mode.  To handle the fault
-        * correctly, we want move our stack frame to task_pt_regs
-        * and we want to pretend that the exception came from the
-        * iret target.
+        * correctly, we want to move our stack frame to where it would
+        * be had we entered directly on the entry stack (rather than
+        * just below the IRET frame) and we want to pretend that the
+        * exception came from the IRET target.
         */
        struct bad_iret_stack *new_stack =
-               container_of(task_pt_regs(current),
-                            struct bad_iret_stack, regs);
+               (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
        /* Copy the IRET target to the new stack. */
        memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
@@ -795,14 +815,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        debug_stack_usage_dec();
 
 exit:
-#if defined(CONFIG_X86_32)
-       /*
-        * This is the most likely code path that involves non-trivial use
-        * of the SYSENTER stack.  Check that we haven't overrun it.
-        */
-       WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
-            "Overran or corrupted SYSENTER stack\n");
-#endif
        ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
@@ -929,6 +941,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 void __init trap_init(void)
 {
+       /* Init cpu_entry_area before IST entries are set up */
+       setup_cpu_entry_areas();
+
        idt_setup_traps();
 
        /*
index 6ba82be68cffa25ed9beef59162de6d9318078bf..f44ce0fb35832aa0cfd7619fff7ad27b1742540e 100644 (file)
 
 #define        UMIP_INST_SGDT  0       /* 0F 01 /0 */
 #define        UMIP_INST_SIDT  1       /* 0F 01 /1 */
-#define        UMIP_INST_SMSW  3       /* 0F 01 /4 */
+#define        UMIP_INST_SMSW  2       /* 0F 01 /4 */
+#define        UMIP_INST_SLDT  3       /* 0F 00 /0 */
+#define        UMIP_INST_STR   4       /* 0F 00 /1 */
+
+const char * const umip_insns[5] = {
+       [UMIP_INST_SGDT] = "SGDT",
+       [UMIP_INST_SIDT] = "SIDT",
+       [UMIP_INST_SMSW] = "SMSW",
+       [UMIP_INST_SLDT] = "SLDT",
+       [UMIP_INST_STR] = "STR",
+};
+
+#define umip_pr_err(regs, fmt, ...) \
+       umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__)
+#define umip_pr_warning(regs, fmt, ...) \
+       umip_printk(regs, KERN_WARNING, fmt,  ##__VA_ARGS__)
+
+/**
+ * umip_printk() - Print a rate-limited message
+ * @regs:      Register set with the context in which the warning is printed
+ * @log_level: Kernel log level to print the message
+ * @fmt:       The text string to print
+ *
+ * Print the text contained in @fmt. The print rate is limited to bursts of 5
+ * messages every two minutes. The purpose of this customized version of
+ * printk() is to print messages when user space processes use any of the
+ * UMIP-protected instructions. Thus, the printed text is prepended with the
+ * task name and process ID number of the current task as well as the
+ * instruction and stack pointers in @regs as seen when entering kernel mode.
+ *
+ * Returns:
+ *
+ * None.
+ */
+static __printf(3, 4)
+void umip_printk(const struct pt_regs *regs, const char *log_level,
+                const char *fmt, ...)
+{
+       /* Bursts of 5 messages every two minutes */
+       static DEFINE_RATELIMIT_STATE(ratelimit, 2 * 60 * HZ, 5);
+       struct task_struct *tsk = current;
+       struct va_format vaf;
+       va_list args;
+
+       if (!__ratelimit(&ratelimit))
+               return;
+
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk("%s" pr_fmt("%s[%d] ip:%lx sp:%lx: %pV"), log_level, tsk->comm,
+              task_pid_nr(tsk), regs->ip, regs->sp, &vaf);
+       va_end(args);
+}
 
 /**
  * identify_insn() - Identify a UMIP-protected instruction
@@ -118,10 +171,16 @@ static int identify_insn(struct insn *insn)
                default:
                        return -EINVAL;
                }
+       } else if (insn->opcode.bytes[1] == 0x0) {
+               if (X86_MODRM_REG(insn->modrm.value) == 0)
+                       return UMIP_INST_SLDT;
+               else if (X86_MODRM_REG(insn->modrm.value) == 1)
+                       return UMIP_INST_STR;
+               else
+                       return -EINVAL;
+       } else {
+               return -EINVAL;
        }
-
-       /* SLDT AND STR are not emulated */
-       return -EINVAL;
 }
 
 /**
@@ -228,10 +287,8 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs)
        if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV)))
                return;
 
-       pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n",
-                          tsk->comm, task_pid_nr(tsk), regs->ip,
-                          regs->sp, X86_PF_USER | X86_PF_WRITE,
-                          regs->ip);
+       umip_pr_err(regs, "segfault in emulation. error%x\n",
+                   X86_PF_USER | X86_PF_WRITE);
 }
 
 /**
@@ -262,15 +319,11 @@ bool fixup_umip_exception(struct pt_regs *regs)
        unsigned char buf[MAX_INSN_SIZE];
        void __user *uaddr;
        struct insn insn;
-       char seg_defs;
+       int seg_defs;
 
        if (!regs)
                return false;
 
-       /* Do not emulate 64-bit processes. */
-       if (user_64bit_mode(regs))
-               return false;
-
        /*
         * If not in user-space long mode, a custom code segment could be in
         * use. This is true in protected mode (if the process defined a local
@@ -322,6 +375,15 @@ bool fixup_umip_exception(struct pt_regs *regs)
        if (umip_inst < 0)
                return false;
 
+       umip_pr_warning(regs, "%s instruction cannot be used by applications.\n",
+                       umip_insns[umip_inst]);
+
+       /* Do not emulate SLDT, STR or user long mode processes. */
+       if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs))
+               return false;
+
+       umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n");
+
        if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size))
                return false;
 
index a3f973b2c97a03b121fe0173dbdc9298216721e6..be86a865087a6b9dc8e04031dbf2e2fbeeda1ed5 100644 (file)
@@ -253,22 +253,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
        return NULL;
 }
 
-static bool stack_access_ok(struct unwind_state *state, unsigned long addr,
+static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
                            size_t len)
 {
        struct stack_info *info = &state->stack_info;
+       void *addr = (void *)_addr;
 
-       /*
-        * If the address isn't on the current stack, switch to the next one.
-        *
-        * We may have to traverse multiple stacks to deal with the possibility
-        * that info->next_sp could point to an empty stack and the address
-        * could be on a subsequent stack.
-        */
-       while (!on_stack(info, (void *)addr, len))
-               if (get_stack_info(info->next_sp, state->task, info,
-                                  &state->stack_mask))
-                       return false;
+       if (!on_stack(info, addr, len) &&
+           (get_stack_info(addr, state->task, info, &state->stack_mask)))
+               return false;
 
        return true;
 }
@@ -283,42 +276,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
        return true;
 }
 
-#define REGS_SIZE (sizeof(struct pt_regs))
-#define SP_OFFSET (offsetof(struct pt_regs, sp))
-#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip))
-#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip))
-
 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
-                            unsigned long *ip, unsigned long *sp, bool full)
+                            unsigned long *ip, unsigned long *sp)
 {
-       size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE;
-       size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET;
-       struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE);
-
-       if (IS_ENABLED(CONFIG_X86_64)) {
-               if (!stack_access_ok(state, addr, regs_size))
-                       return false;
+       struct pt_regs *regs = (struct pt_regs *)addr;
 
-               *ip = regs->ip;
-               *sp = regs->sp;
+       /* x86-32 support will be more complicated due to the &regs->sp hack */
+       BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
 
-               return true;
-       }
-
-       if (!stack_access_ok(state, addr, sp_offset))
+       if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
                return false;
 
        *ip = regs->ip;
+       *sp = regs->sp;
+       return true;
+}
 
-       if (user_mode(regs)) {
-               if (!stack_access_ok(state, addr + sp_offset,
-                                    REGS_SIZE - SP_OFFSET))
-                       return false;
+static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
+                                 unsigned long *ip, unsigned long *sp)
+{
+       struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
 
-               *sp = regs->sp;
-       } else
-               *sp = (unsigned long)&regs->sp;
+       if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
+               return false;
 
+       *ip = regs->ip;
+       *sp = regs->sp;
        return true;
 }
 
@@ -327,7 +310,6 @@ bool unwind_next_frame(struct unwind_state *state)
        unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
        enum stack_type prev_type = state->stack_info.type;
        struct orc_entry *orc;
-       struct pt_regs *ptregs;
        bool indirect = false;
 
        if (unwind_done(state))
@@ -435,7 +417,7 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
+               if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
@@ -447,20 +429,14 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS_IRET:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
+               if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference iret registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
                }
 
-               ptregs = container_of((void *)sp, struct pt_regs, ip);
-               if ((unsigned long)ptregs >= prev_sp &&
-                   on_stack(&state->stack_info, ptregs, REGS_SIZE)) {
-                       state->regs = ptregs;
-                       state->full_regs = false;
-               } else
-                       state->regs = NULL;
-
+               state->regs = (void *)sp - IRET_FRAME_OFFSET;
+               state->full_regs = false;
                state->signal = true;
                break;
 
@@ -553,8 +529,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        }
 
        if (get_stack_info((unsigned long *)state->sp, state->task,
-                          &state->stack_info, &state->stack_mask))
-               return;
+                          &state->stack_info, &state->stack_mask)) {
+               /*
+                * We weren't on a valid stack.  It's possible that
+                * we overflowed a valid stack into a guard page.
+                * See if the next page up is valid so that we can
+                * generate some kind of backtrace if this happens.
+                */
+               void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
+               if (get_stack_info(next_page, state->task, &state->stack_info,
+                                  &state->stack_mask))
+                       return;
+       }
 
        /*
         * The caller can provide the address of the first frame directly
index a4009fb9be8725ce7bda96cd5e8160e524903266..d2a8b5a24a44a554e2f81f3b30309ef39aba0d8a 100644 (file)
@@ -107,6 +107,15 @@ SECTIONS
                SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
+
+#ifdef CONFIG_X86_64
+               . = ALIGN(PAGE_SIZE);
+               _entry_trampoline = .;
+               *(.entry_trampoline)
+               . = ALIGN(PAGE_SIZE);
+               ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
+#endif
+
                /* End of text section */
                _etext = .;
        } :text = 0x9090
index cdc70a3a65838b10d558c3d0b14bcdf4d9e996d2..c2cea6651279f706f488cf51a523301e6de4ae77 100644 (file)
@@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
        [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
        [CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
        [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
-       [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+       [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
        [CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
        [CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
        [CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
index 8079d141792af91994421d15c19c26d3bd386c59..b514b2b2845a334d4b53f28ed0b73c96f12d0e6a 100644 (file)
@@ -1046,7 +1046,6 @@ static void fetch_register_operand(struct operand *op)
 
 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
        case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
@@ -1068,13 +1067,11 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
 #endif
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
                          int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
        case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
@@ -1096,12 +1093,10 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
 #endif
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
        case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
@@ -1113,12 +1108,10 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
        case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
 {
-       ctxt->ops->get_fpu(ctxt);
        switch (reg) {
        case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
        case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
@@ -1130,7 +1123,6 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
        case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
        default: BUG();
        }
-       ctxt->ops->put_fpu(ctxt);
 }
 
 static int em_fninit(struct x86_emulate_ctxt *ctxt)
@@ -1138,9 +1130,7 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt)
        if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
                return emulate_nm(ctxt);
 
-       ctxt->ops->get_fpu(ctxt);
        asm volatile("fninit");
-       ctxt->ops->put_fpu(ctxt);
        return X86EMUL_CONTINUE;
 }
 
@@ -1151,9 +1141,7 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
        if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
                return emulate_nm(ctxt);
 
-       ctxt->ops->get_fpu(ctxt);
        asm volatile("fnstcw %0": "+m"(fcw));
-       ctxt->ops->put_fpu(ctxt);
 
        ctxt->dst.val = fcw;
 
@@ -1167,9 +1155,7 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
        if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
                return emulate_nm(ctxt);
 
-       ctxt->ops->get_fpu(ctxt);
        asm volatile("fnstsw %0": "+m"(fsw));
-       ctxt->ops->put_fpu(ctxt);
 
        ctxt->dst.val = fsw;
 
@@ -2404,9 +2390,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 }
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
-                                    u64 cr0, u64 cr4)
+                                   u64 cr0, u64 cr3, u64 cr4)
 {
        int bad;
+       u64 pcid;
+
+       /* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
+       pcid = 0;
+       if (cr4 & X86_CR4_PCIDE) {
+               pcid = cr3 & 0xfff;
+               cr3 &= ~0xfff;
+       }
+
+       bad = ctxt->ops->set_cr(ctxt, 3, cr3);
+       if (bad)
+               return X86EMUL_UNHANDLEABLE;
 
        /*
         * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2425,6 +2423,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                bad = ctxt->ops->set_cr(ctxt, 4, cr4);
                if (bad)
                        return X86EMUL_UNHANDLEABLE;
+               if (pcid) {
+                       bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
+                       if (bad)
+                               return X86EMUL_UNHANDLEABLE;
+               }
+
        }
 
        return X86EMUL_CONTINUE;
@@ -2435,11 +2439,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        struct desc_struct desc;
        struct desc_ptr dt;
        u16 selector;
-       u32 val, cr0, cr4;
+       u32 val, cr0, cr3, cr4;
        int i;
 
        cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
+       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
        ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
        ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
 
@@ -2481,14 +2485,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
-       return rsm_enter_protected_mode(ctxt, cr0, cr4);
+       return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
-       u64 val, cr0, cr4;
+       u64 val, cr0, cr3, cr4;
        u32 base3;
        u16 selector;
        int i, r;
@@ -2505,7 +2509,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
        cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
+       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
        cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
        val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2533,7 +2537,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+       r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
        if (r != X86EMUL_CONTINUE)
                return r;
 
@@ -4001,12 +4005,8 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       ctxt->ops->get_fpu(ctxt);
-
        rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
 
-       ctxt->ops->put_fpu(ctxt);
-
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
@@ -4014,6 +4014,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
                                   fxstate_size(ctxt));
 }
 
+/*
+ * FXRSTOR might restore XMM registers not provided by the guest. Fill
+ * in the host registers (via FXSAVE) instead, so they won't be modified.
+ * (preemption has to stay disabled until FXRSTOR).
+ *
+ * Use noinline to keep the stack for other functions called by callers small.
+ */
+static noinline int fxregs_fixup(struct fxregs_state *fx_state,
+                                const size_t used_size)
+{
+       struct fxregs_state fx_tmp;
+       int rc;
+
+       rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
+       memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
+              __fxstate_size(16) - used_size);
+
+       return rc;
+}
+
 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
 {
        struct fxregs_state fx_state;
@@ -4024,19 +4044,17 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       ctxt->ops->get_fpu(ctxt);
-
        size = fxstate_size(ctxt);
+       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+
        if (size < __fxstate_size(16)) {
-               rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+               rc = fxregs_fixup(&fx_state, size);
                if (rc != X86EMUL_CONTINUE)
                        goto out;
        }
 
-       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
-       if (rc != X86EMUL_CONTINUE)
-               goto out;
-
        if (fx_state.mxcsr >> 16) {
                rc = emulate_gp(ctxt, 0);
                goto out;
@@ -4046,8 +4064,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
                rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
 
 out:
-       ctxt->ops->put_fpu(ctxt);
-
        return rc;
 }
 
@@ -5000,6 +5016,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        bool op_prefix = false;
        bool has_seg_override = false;
        struct opcode opcode;
+       u16 dummy;
+       struct desc_struct desc;
 
        ctxt->memop.type = OP_NONE;
        ctxt->memopp = NULL;
@@ -5018,6 +5036,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        switch (mode) {
        case X86EMUL_MODE_REAL:
        case X86EMUL_MODE_VM86:
+               def_op_bytes = def_ad_bytes = 2;
+               ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
+               if (desc.d)
+                       def_op_bytes = def_ad_bytes = 4;
+               break;
        case X86EMUL_MODE_PROT16:
                def_op_bytes = def_ad_bytes = 2;
                break;
@@ -5290,9 +5313,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
 {
        int rc;
 
-       ctxt->ops->get_fpu(ctxt);
        rc = asm_safe("fwait");
-       ctxt->ops->put_fpu(ctxt);
 
        if (unlikely(rc != X86EMUL_CONTINUE))
                return emulate_exception(ctxt, MF_VECTOR, 0, false);
index bdff437acbcb7ebc3307523edd848fb7db009c39..4e822ad363f37f613d14ab94f35609bcf3539bf7 100644 (file)
@@ -209,12 +209,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
 
        old_irr = ioapic->irr;
        ioapic->irr |= mask;
-       if (edge)
+       if (edge) {
                ioapic->irr_delivered &= ~mask;
-       if ((edge && old_irr == ioapic->irr) ||
-           (!edge && entry.fields.remote_irr)) {
-               ret = 0;
-               goto out;
+               if (old_irr == ioapic->irr) {
+                       ret = 0;
+                       goto out;
+               }
        }
 
        ret = ioapic_service(ioapic, irq, line_status);
@@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
                    index == RTC_GSI) {
                        if (kvm_apic_match_dest(vcpu, NULL, 0,
                                     e->fields.dest_id, e->fields.dest_mode) ||
-                           (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
-                            kvm_apic_pending_eoi(vcpu, e->fields.vector)))
+                           kvm_apic_pending_eoi(vcpu, e->fields.vector))
                                __set_bit(e->fields.vector,
                                          ioapic_handled_vectors);
                }
@@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 {
        unsigned index;
        bool mask_before, mask_after;
+       int old_remote_irr, old_delivery_status;
        union kvm_ioapic_redirect_entry *e;
 
        switch (ioapic->ioregsel) {
@@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                        return;
                e = &ioapic->redirtbl[index];
                mask_before = e->fields.mask;
+               /* Preserve read-only fields */
+               old_remote_irr = e->fields.remote_irr;
+               old_delivery_status = e->fields.delivery_status;
                if (ioapic->ioregsel & 1) {
                        e->bits &= 0xffffffff;
                        e->bits |= (u64) val << 32;
                } else {
                        e->bits &= ~0xffffffffULL;
                        e->bits |= (u32) val;
-                       e->fields.remote_irr = 0;
                }
+               e->fields.remote_irr = old_remote_irr;
+               e->fields.delivery_status = old_delivery_status;
+
+               /*
+                * Some OSes (Linux, Xen) assume that Remote IRR bit will
+                * be cleared by IOAPIC hardware when the entry is configured
+                * as edge-triggered. This behavior is used to simulate an
+                * explicit EOI on IOAPICs that don't have the EOI register.
+                */
+               if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
+                       e->fields.remote_irr = 0;
+
                mask_after = e->fields.mask;
                if (mask_before != mask_after)
                        kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
@@ -324,7 +338,9 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
        struct kvm_lapic_irq irqe;
        int ret;
 
-       if (entry->fields.mask)
+       if (entry->fields.mask ||
+           (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
+           entry->fields.remote_irr))
                return -1;
 
        ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
index 943acbf00c69d8f423289116bc363159144f883a..e2c1fb8d35cea28af684d4ba76d70a5e2e12e9a5 100644 (file)
@@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
        recalculate_apic_map(apic->vcpu->kvm);
 }
 
+static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
+{
+       return ((id >> 4) << 16) | (1 << (id & 0xf));
+}
+
 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 {
-       u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+       u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 
        WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
 
@@ -2245,6 +2250,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
 {
        if (apic_x2apic_mode(vcpu->arch.apic)) {
                u32 *id = (u32 *)(s->regs + APIC_ID);
+               u32 *ldr = (u32 *)(s->regs + APIC_LDR);
 
                if (vcpu->kvm->arch.x2apic_format) {
                        if (*id != vcpu->vcpu_id)
@@ -2255,6 +2261,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
                        else
                                *id <<= 24;
                }
+
+               /* In x2APIC mode, the LDR is fixed and based on the id */
+               if (set)
+                       *ldr = kvm_apic_calc_x2apic_ldr(*id);
        }
 
        return 0;
index e5e66e5c664057bb5cc5ad2660008ccbf19b69e5..c4deb1f34faa6ce7ffe6bcaaebddc3e87b2a9a69 100644 (file)
@@ -3395,7 +3395,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if(make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, 0, 0,
                                vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
@@ -3410,7 +3410,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                        spin_lock(&vcpu->kvm->mmu_lock);
                        if (make_mmu_pages_available(vcpu) < 0) {
                                spin_unlock(&vcpu->kvm->mmu_lock);
-                               return 1;
+                               return -ENOSPC;
                        }
                        sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
                                        i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
@@ -3487,7 +3487,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
                                      0, ACC_ALL);
index b71daed3cca29dc8b1c4ccc4ba34e927dccbd5ea..eb714f1cdf7eee4ca9036005c3ab72ef9228ae9b 100644 (file)
@@ -361,6 +361,7 @@ static void recalc_intercepts(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *c, *h;
        struct nested_state *g;
+       u32 h_intercept_exceptions;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 
@@ -371,9 +372,14 @@ static void recalc_intercepts(struct vcpu_svm *svm)
        h = &svm->nested.hsave->control;
        g = &svm->nested;
 
+       /* No need to intercept #UD if L1 doesn't intercept it */
+       h_intercept_exceptions =
+               h->intercept_exceptions & ~(1U << UD_VECTOR);
+
        c->intercept_cr = h->intercept_cr | g->intercept_cr;
        c->intercept_dr = h->intercept_dr | g->intercept_dr;
-       c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+       c->intercept_exceptions =
+               h_intercept_exceptions | g->intercept_exceptions;
        c->intercept = h->intercept | g->intercept;
 }
 
@@ -2196,7 +2202,10 @@ static int ud_interception(struct vcpu_svm *svm)
 {
        int er;
 
+       WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
        er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+       if (er == EMULATE_USER_EXIT)
+               return 0;
        if (er != EMULATE_DONE)
                kvm_queue_exception(&svm->vcpu, UD_VECTOR);
        return 1;
@@ -3671,6 +3680,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        u32 ecx = msr->index;
        u64 data = msr->data;
        switch (ecx) {
+       case MSR_IA32_CR_PAT:
+               if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+                       return 1;
+               vcpu->arch.pat = data;
+               svm->vmcb->save.g_pat = data;
+               mark_dirty(svm->vmcb, VMCB_NPT);
+               break;
        case MSR_IA32_TSC:
                kvm_write_tsc(vcpu, msr);
                break;
index 7c3522a989d0b37713a802be82ee1f265fe64c9a..023afa0c8887002d6a79a8b121b46996feec1a61 100644 (file)
@@ -70,6 +70,9 @@ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
 static bool __read_mostly enable_vpid = 1;
 module_param_named(vpid, enable_vpid, bool, 0444);
 
+static bool __read_mostly enable_vnmi = 1;
+module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
+
 static bool __read_mostly flexpriority_enabled = 1;
 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
 
@@ -202,6 +205,10 @@ struct loaded_vmcs {
        bool nmi_known_unmasked;
        unsigned long vmcs_host_cr3;    /* May not match real cr3 */
        unsigned long vmcs_host_cr4;    /* May not match real cr4 */
+       /* Support for vnmi-less CPUs */
+       int soft_vnmi_blocked;
+       ktime_t entry_time;
+       s64 vnmi_blocked_time;
        struct list_head loaded_vmcss_on_cpu_link;
 };
 
@@ -1291,6 +1298,11 @@ static inline bool cpu_has_vmx_invpcid(void)
                SECONDARY_EXEC_ENABLE_INVPCID;
 }
 
+static inline bool cpu_has_virtual_nmis(void)
+{
+       return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
+}
+
 static inline bool cpu_has_vmx_wbinvd_exit(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -1348,11 +1360,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
                (vmcs12->secondary_vm_exec_control & bit);
 }
 
-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
-{
-       return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
-}
-
 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
 {
        return vmcs12->pin_based_vm_exec_control &
@@ -1880,7 +1887,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 {
        u32 eb;
 
-       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+       eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) |
             (1u << DB_VECTOR) | (1u << AC_VECTOR);
        if ((vcpu->guest_debug &
             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1898,6 +1905,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
         */
        if (is_guest_mode(vcpu))
                eb |= get_vmcs12(vcpu)->exception_bitmap;
+       else
+               eb |= 1u << UD_VECTOR;
 
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -2293,7 +2302,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * processors.  See 22.2.4.
                 */
                vmcs_writel(HOST_TR_BASE,
-                           (unsigned long)this_cpu_ptr(&cpu_tss));
+                           (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
                vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
                /*
@@ -3712,9 +3721,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                                &_vmexit_control) < 0)
                return -EIO;
 
-       min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
-               PIN_BASED_VIRTUAL_NMIS;
-       opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
+       min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
+       opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
+                PIN_BASED_VMX_PREEMPTION_TIMER;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
                                &_pin_based_exec_control) < 0)
                return -EIO;
@@ -5232,6 +5241,10 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
 
        if (!kvm_vcpu_apicv_active(&vmx->vcpu))
                pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
+
+       if (!enable_vnmi)
+               pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
+
        /* Enable the preemption timer dynamically */
        pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
        return pin_based_exec_ctrl;
@@ -5589,7 +5602,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
        }
 
-       vmcs_writel(GUEST_RFLAGS, 0x02);
+       kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
        kvm_rip_write(vcpu, 0xfff0);
 
        vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -5666,7 +5679,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
 
 static void enable_nmi_window(struct kvm_vcpu *vcpu)
 {
-       if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+       if (!enable_vnmi ||
+           vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
                enable_irq_window(vcpu);
                return;
        }
@@ -5706,6 +5720,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (!enable_vnmi) {
+               /*
+                * Tracking the NMI-blocked state in software is built upon
+                * finding the next open IRQ window. This, in turn, depends on
+                * well-behaving guests: They have to keep IRQs disabled at
+                * least as long as the NMI handler runs. Otherwise we may
+                * cause NMI nesting, maybe breaking the guest. But as this is
+                * highly unlikely, we can live with the residual risk.
+                */
+               vmx->loaded_vmcs->soft_vnmi_blocked = 1;
+               vmx->loaded_vmcs->vnmi_blocked_time = 0;
+       }
+
        ++vcpu->stat.nmi_injections;
        vmx->loaded_vmcs->nmi_known_unmasked = false;
 
@@ -5724,6 +5751,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        bool masked;
 
+       if (!enable_vnmi)
+               return vmx->loaded_vmcs->soft_vnmi_blocked;
        if (vmx->loaded_vmcs->nmi_known_unmasked)
                return false;
        masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
@@ -5735,13 +5764,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       vmx->loaded_vmcs->nmi_known_unmasked = !masked;
-       if (masked)
-               vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-                             GUEST_INTR_STATE_NMI);
-       else
-               vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
-                               GUEST_INTR_STATE_NMI);
+       if (!enable_vnmi) {
+               if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
+                       vmx->loaded_vmcs->soft_vnmi_blocked = masked;
+                       vmx->loaded_vmcs->vnmi_blocked_time = 0;
+               }
+       } else {
+               vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+               if (masked)
+                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                     GUEST_INTR_STATE_NMI);
+               else
+                       vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                       GUEST_INTR_STATE_NMI);
+       }
 }
 
 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -5749,6 +5785,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
        if (to_vmx(vcpu)->nested.nested_run_pending)
                return 0;
 
+       if (!enable_vnmi &&
+           to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+               return 0;
+
        return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
                  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
                   | GUEST_INTR_STATE_NMI));
@@ -5877,11 +5917,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_invalid_opcode(intr_info)) {
-               if (is_guest_mode(vcpu)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
-                       return 1;
-               }
+               WARN_ON_ONCE(is_guest_mode(vcpu));
                er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+               if (er == EMULATE_USER_EXIT)
+                       return 0;
                if (er != EMULATE_DONE)
                        kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
@@ -6476,6 +6515,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
         * AAK134, BY25.
         */
        if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+                       enable_vnmi &&
                        (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
 
@@ -6535,6 +6575,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 
 static int handle_nmi_window(struct kvm_vcpu *vcpu)
 {
+       WARN_ON_ONCE(!enable_vnmi);
        vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
                        CPU_BASED_VIRTUAL_NMI_PENDING);
        ++vcpu->stat.nmi_window_exits;
@@ -6562,7 +6603,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
-               err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
+               err = emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_USER_EXIT) {
                        ++vcpu->stat.mmio_exits;
@@ -6710,16 +6751,10 @@ static __init int hardware_setup(void)
                        goto out;
        }
 
-       vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
        memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
        memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
 
-       /*
-        * Allow direct access to the PC debug port (it is often used for I/O
-        * delays, but the vmexits simply slow things down).
-        */
        memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-       clear_bit(0x80, vmx_io_bitmap_a);
 
        memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
 
@@ -6758,6 +6793,9 @@ static __init int hardware_setup(void)
        if (!cpu_has_vmx_flexpriority())
                flexpriority_enabled = 0;
 
+       if (!cpu_has_virtual_nmis())
+               enable_vnmi = 0;
+
        /*
         * set_apic_access_page_addr() is used to reload apic access
         * page upon invalidation.  No need to do anything if not
@@ -6962,7 +7000,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
        }
 
        /* Create a new VMCS */
-       item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
+       item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
        if (!item)
                return NULL;
        item->vmcs02.vmcs = alloc_vmcs();
@@ -7371,10 +7409,11 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
  */
 static void free_nested(struct vcpu_vmx *vmx)
 {
-       if (!vmx->nested.vmxon)
+       if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
        vmx->nested.vmxon = false;
+       vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = -1ull;
@@ -7979,6 +8018,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
         * "blocked by NMI" bit has to be set before next VM entry.
         */
        if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+                       enable_vnmi &&
                        (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
                                GUEST_INTR_STATE_NMI);
@@ -8823,6 +8863,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
                return 0;
        }
 
+       if (unlikely(!enable_vnmi &&
+                    vmx->loaded_vmcs->soft_vnmi_blocked)) {
+               if (vmx_interrupt_allowed(vcpu)) {
+                       vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+               } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
+                          vcpu->arch.nmi_pending) {
+                       /*
+                        * This CPU don't support us in finding the end of an
+                        * NMI-blocked window if the guest runs with IRQs
+                        * disabled. So we pull the trigger after 1 s of
+                        * futile waiting, but inform the user about this.
+                        */
+                       printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
+                              "state on VCPU %d after 1 s timeout\n",
+                              __func__, vcpu->vcpu_id);
+                       vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+               }
+       }
+
        if (exit_reason < kvm_vmx_max_exit_handlers
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu);
@@ -9105,33 +9164,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
 
        idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
-       if (vmx->loaded_vmcs->nmi_known_unmasked)
-               return;
-       /*
-        * Can't use vmx->exit_intr_info since we're not sure what
-        * the exit reason is.
-        */
-       exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-       unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
-       vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
-       /*
-        * SDM 3: 27.7.1.2 (September 2008)
-        * Re-set bit "block by NMI" before VM entry if vmexit caused by
-        * a guest IRET fault.
-        * SDM 3: 23.2.2 (September 2008)
-        * Bit 12 is undefined in any of the following cases:
-        *  If the VM exit sets the valid bit in the IDT-vectoring
-        *   information field.
-        *  If the VM exit is due to a double fault.
-        */
-       if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
-           vector != DF_VECTOR && !idtv_info_valid)
-               vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-                             GUEST_INTR_STATE_NMI);
-       else
-               vmx->loaded_vmcs->nmi_known_unmasked =
-                       !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
-                         & GUEST_INTR_STATE_NMI);
+       if (enable_vnmi) {
+               if (vmx->loaded_vmcs->nmi_known_unmasked)
+                       return;
+               /*
+                * Can't use vmx->exit_intr_info since we're not sure what
+                * the exit reason is.
+                */
+               exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+               unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
+               vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+               /*
+                * SDM 3: 27.7.1.2 (September 2008)
+                * Re-set bit "block by NMI" before VM entry if vmexit caused by
+                * a guest IRET fault.
+                * SDM 3: 23.2.2 (September 2008)
+                * Bit 12 is undefined in any of the following cases:
+                *  If the VM exit sets the valid bit in the IDT-vectoring
+                *   information field.
+                *  If the VM exit is due to a double fault.
+                */
+               if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
+                   vector != DF_VECTOR && !idtv_info_valid)
+                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                     GUEST_INTR_STATE_NMI);
+               else
+                       vmx->loaded_vmcs->nmi_known_unmasked =
+                               !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
+                                 & GUEST_INTR_STATE_NMI);
+       } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
+               vmx->loaded_vmcs->vnmi_blocked_time +=
+                       ktime_to_ns(ktime_sub(ktime_get(),
+                                             vmx->loaded_vmcs->entry_time));
 }
 
 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
@@ -9248,6 +9312,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long debugctlmsr, cr3, cr4;
 
+       /* Record the guest's net vcpu time for enforced NMI injections. */
+       if (unlikely(!enable_vnmi &&
+                    vmx->loaded_vmcs->soft_vnmi_blocked))
+               vmx->loaded_vmcs->entry_time = ktime_get();
+
        /* Don't enter VMX if guest state is invalid, let the exit handler
           start emulation until we arrive back to a valid state */
        if (vmx->emulation_required)
@@ -9727,8 +9796,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
        cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
        cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
        cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
-       /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
-       cr4_fixed1_update(bit(11),            ecx, bit(2));
+       cr4_fixed1_update(X86_CR4_UMIP,       ecx, bit(X86_FEATURE_UMIP));
 
 #undef cr4_fixed1_update
 }
@@ -10802,6 +10870,11 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                        return 1;
        }
 
+       if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+               (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+               (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+                       return 1;
+
        return 0;
 }
 
@@ -11026,13 +11099,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qual;
-
-       if (kvm_event_needs_reinjection(vcpu))
-               return -EBUSY;
+       bool block_nested_events =
+           vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
 
        if (vcpu->arch.exception.pending &&
                nested_vmx_check_exception(vcpu, &exit_qual)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
                vcpu->arch.exception.pending = false;
@@ -11041,14 +11113,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
            vmx->nested.preemption_timer_expired) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
                return 0;
        }
 
        if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
                                  NMI_VECTOR | INTR_TYPE_NMI_INTR |
@@ -11064,7 +11136,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
            nested_exit_on_intr(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
                return 0;
@@ -11251,6 +11323,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        kvm_clear_interrupt_queue(vcpu);
 }
 
+static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
+                       struct vmcs12 *vmcs12)
+{
+       u32 entry_failure_code;
+
+       nested_ept_uninit_mmu_context(vcpu);
+
+       /*
+        * Only PDPTE load can fail as the value of cr3 was checked on entry and
+        * couldn't have changed.
+        */
+       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+       if (!enable_ept)
+               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+}
+
 /*
  * A part of what we need to when the nested L2 guest exits and we want to
  * run its L1 parent, is to reset L1's guest state to the host state specified
@@ -11264,7 +11354,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                                   struct vmcs12 *vmcs12)
 {
        struct kvm_segment seg;
-       u32 entry_failure_code;
 
        if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
                vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -11291,17 +11380,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
        vmx_set_cr4(vcpu, vmcs12->host_cr4);
 
-       nested_ept_uninit_mmu_context(vcpu);
-
-       /*
-        * Only PDPTE load can fail as the value of cr3 was checked on entry and
-        * couldn't have changed.
-        */
-       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
-               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
-       if (!enable_ept)
-               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
 
        if (enable_vpid) {
                /*
@@ -11531,6 +11610,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
         * accordingly.
         */
        nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
+
        /*
         * The emulated instruction was already skipped in
         * nested_vmx_run, but the updated RIP was never
index 34c85aa2e2d1d40ffc65f461d45b50d8666b5491..1cec2c62a0b08405d2bd7c8908d6b7f33de3b63c 100644 (file)
@@ -107,6 +107,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
 static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
+static bool __read_mostly report_ignored_msrs = true;
+module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
+
 unsigned int min_timer_period_us = 500;
 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
 
@@ -1795,10 +1798,13 @@ u64 get_kvmclock_ns(struct kvm *kvm)
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
 
-       kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
-                          &hv_clock.tsc_shift,
-                          &hv_clock.tsc_to_system_mul);
-       ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       if (__this_cpu_read(cpu_tsc_khz)) {
+               kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+                                  &hv_clock.tsc_shift,
+                                  &hv_clock.tsc_to_system_mul);
+               ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       } else
+               ret = ktime_get_boot_ns() + ka->kvmclock_offset;
 
        put_cpu();
 
@@ -1830,6 +1836,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
         */
        BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
+       if (guest_hv_clock.version & 1)
+               ++guest_hv_clock.version;  /* first time write, random junk */
+
        vcpu->hv_clock.version = guest_hv_clock.version + 1;
        kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
                                &vcpu->hv_clock,
@@ -2322,7 +2331,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                /* Drop writes to this legacy MSR -- see rdmsr
                 * counterpart for further detail.
                 */
-               vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
+               if (report_ignored_msrs)
+                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
+                               msr, data);
                break;
        case MSR_AMD64_OSVW_ID_LENGTH:
                if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
@@ -2359,8 +2370,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                    msr, data);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
-                                   msr, data);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu,
+                                       "ignored wrmsr: 0x%x data 0x%llx\n",
+                                       msr, data);
                        break;
                }
        }
@@ -2578,7 +2591,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                               msr_info->index);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
+                                       msr_info->index);
                        msr_info->data = 0;
                }
                break;
@@ -2922,7 +2937,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        pagefault_enable();
        kvm_x86_ops->vcpu_put(vcpu);
-       kvm_put_guest_fpu(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
 }
 
@@ -4370,7 +4384,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
                                         addr, n, v))
                    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
                handled += n;
                addr += n;
                len -= n;
@@ -4629,7 +4643,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 {
        if (vcpu->mmio_read_completed) {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-                              vcpu->mmio_fragments[0].gpa, *(u64 *)val);
+                              vcpu->mmio_fragments[0].gpa, val);
                vcpu->mmio_read_completed = 0;
                return 1;
        }
@@ -4651,14 +4665,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
        return vcpu_mmio_write(vcpu, gpa, bytes, val);
 }
 
 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
                          void *val, int bytes)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
        return X86EMUL_IO_NEEDED;
 }
 
@@ -5237,17 +5251,6 @@ static void emulator_halt(struct x86_emulate_ctxt *ctxt)
        emul_to_vcpu(ctxt)->arch.halt_request = 1;
 }
 
-static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
-{
-       preempt_disable();
-       kvm_load_guest_fpu(emul_to_vcpu(ctxt));
-}
-
-static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
-{
-       preempt_enable();
-}
-
 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
                              struct x86_instruction_info *info,
                              enum x86_intercept_stage stage)
@@ -5325,8 +5328,6 @@ static const struct x86_emulate_ops emulate_ops = {
        .halt                = emulator_halt,
        .wbinvd              = emulator_wbinvd,
        .fix_hypercall       = emulator_fix_hypercall,
-       .get_fpu             = emulator_get_fpu,
-       .put_fpu             = emulator_put_fpu,
        .intercept           = emulator_intercept,
        .get_cpuid           = emulator_get_cpuid,
        .set_nmi_mask        = emulator_set_nmi_mask,
@@ -5430,7 +5431,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
                vcpu->run->internal.ndata = 0;
-               r = EMULATE_FAIL;
+               r = EMULATE_USER_EXIT;
        }
        kvm_queue_exception(vcpu, UD_VECTOR);
 
@@ -5722,6 +5723,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                        if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
                                                emulation_type))
                                return EMULATE_DONE;
+                       if (ctxt->have_exception && inject_emulated_exception(vcpu))
+                               return EMULATE_DONE;
                        if (emulation_type & EMULTYPE_SKIP)
                                return EMULATE_FAIL;
                        return handle_emulation_failure(vcpu);
@@ -6761,6 +6764,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
        kvm_x86_ops->tlb_flush(vcpu);
 }
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end)
+{
+       unsigned long apic_address;
+
+       /*
+        * The physical address of apic access page is stored in the VMCS.
+        * Update it when it becomes invalid.
+        */
+       apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+       if (start <= apic_address && apic_address < end)
+               kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+}
+
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
        struct page *page = NULL;
@@ -6935,7 +6952,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
-       kvm_load_guest_fpu(vcpu);
 
        /*
         * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
@@ -7248,14 +7264,11 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-       struct fpu *fpu = &current->thread.fpu;
        int r;
-       sigset_t sigsaved;
 
-       fpu__initialize(fpu);
+       kvm_sigset_activate(vcpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_load_guest_fpu(vcpu);
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
@@ -7297,9 +7310,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                r = vcpu_run(vcpu);
 
 out:
+       kvm_put_guest_fpu(vcpu);
        post_kvm_run_save(vcpu);
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
@@ -7367,7 +7380,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 #endif
 
        kvm_rip_write(vcpu, regs->rip);
-       kvm_set_rflags(vcpu, regs->rflags);
+       kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
 
        vcpu->arch.exception.pending = false;
 
@@ -7481,6 +7494,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
 
+int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
+               /*
+                * When EFER.LME and CR0.PG are set, the processor is in
+                * 64-bit mode (though maybe in a 32-bit code segment).
+                * CR4.PAE and EFER.LMA must be set.
+                */
+               if (!(sregs->cr4 & X86_CR4_PAE_BIT)
+                   || !(sregs->efer & EFER_LMA))
+                       return -EINVAL;
+       } else {
+               /*
+                * Not in 64-bit mode: EFER.LMA is clear and the code
+                * segment cannot be 64-bit.
+                */
+               if (sregs->efer & EFER_LMA || sregs->cs.l)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
@@ -7493,6 +7529,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                        (sregs->cr4 & X86_CR4_OSXSAVE))
                return -EINVAL;
 
+       if (kvm_valid_sregs(vcpu, sregs))
+               return -EINVAL;
+
        apic_base_msr.data = sregs->apic_base;
        apic_base_msr.host_initiated = true;
        if (kvm_set_apic_base(vcpu, &apic_base_msr))
@@ -7690,32 +7729,25 @@ static void fx_init(struct kvm_vcpu *vcpu)
        vcpu->arch.cr0 |= X86_CR0_ET;
 }
 
+/* Swap (qemu) user FPU context for the guest FPU context. */
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->guest_fpu_loaded)
-               return;
-
-       /*
-        * Restore all possible states in the guest,
-        * and assume host would use all available bits.
-        * Guest xcr0 would be loaded later.
-        */
-       vcpu->guest_fpu_loaded = 1;
-       __kernel_fpu_begin();
+       preempt_disable();
+       copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
        /* PKRU is separately restored in kvm_x86_ops->run.  */
        __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
                                ~XFEATURE_MASK_PKRU);
+       preempt_enable();
        trace_kvm_fpu(1);
 }
 
+/* When vcpu_run ends, restore user space FPU context. */
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->guest_fpu_loaded)
-               return;
-
-       vcpu->guest_fpu_loaded = 0;
+       preempt_disable();
        copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
-       __kernel_fpu_end();
+       copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+       preempt_enable();
        ++vcpu->stat.fpu_reload;
        trace_kvm_fpu(0);
 }
@@ -7832,7 +7864,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                 * To avoid have the INIT path from kvm_apic_has_events() that be
                 * called with loaded FPU and does not let userspace fix the state.
                 */
-               kvm_put_guest_fpu(vcpu);
+               if (init_event)
+                       kvm_put_guest_fpu(vcpu);
                mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
                                        XFEATURE_MASK_BNDREGS);
                if (mpx_state_buffer)
@@ -7841,6 +7874,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                                        XFEATURE_MASK_BNDCSR);
                if (mpx_state_buffer)
                        memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
+               if (init_event)
+                       kvm_load_guest_fpu(vcpu);
        }
 
        if (!init_event) {
index 553f8fd23cc4733d0edafa862b95446f7a04bab1..4846eff7e4c8b1505501d7f1dcb64127d0a4c67c 100644 (file)
@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops)
                delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
 
                /*
-                * Use cpu_tss as a cacheline-aligned, seldomly
+                * Use cpu_tss_rw as a cacheline-aligned, seldomly
                 * accessed per-cpu variable as the monitor target.
                 */
-               __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
+               __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
                 * AMD, like Intel, supports the EAX hint and EAX=0xf
index 35625d279458f478b7d14ea47461e8a085924b13..9119d8e41f1ff59e2c8584a36f0f03d000bb1bbe 100644 (file)
@@ -733,11 +733,11 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
  *
  * Returns:
  *
- * A signed 8-bit value containing the default parameters on success.
+ * An int containing ORed-in default parameters on success.
  *
  * -EINVAL on error.
  */
-char insn_get_code_seg_params(struct pt_regs *regs)
+int insn_get_code_seg_params(struct pt_regs *regs)
 {
        struct desc_struct *desc;
        short sel;
index 12e377184ee4ad0c55d00c3784f08b393764a2bc..e0b85930dd773e87417e2b4957b8af61221b04c0 100644 (file)
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
 fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
 fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
 fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
 EndTable
 
 Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
 7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
 80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
 82: INVPCID Gy,Mdq (66)
 83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
 88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
 EndTable
 
 GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
 EndTable
 
 # Grp11A and Grp11B are expressed as Grp11 in Intel SDM
index 3321b446b66cdb99f16fe145bf9bad50f9d1fd01..9fe656c42aa5b16560e139cebba247ca52756c80 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/extable.h>
 #include <linux/uaccess.h>
 #include <linux/sched/debug.h>
+#include <xen/xen.h>
 
 #include <asm/fpu/internal.h>
 #include <asm/traps.h>
@@ -82,7 +83,7 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
 
        return true;
 }
-EXPORT_SYMBOL_GPL(ex_handler_refcount);
+EXPORT_SYMBOL(ex_handler_refcount);
 
 /*
  * Handler for when we fail to restore a task's FPU state.  We should never get
@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
         * Old CPUs leave the high bits of CS on the stack
         * undefined.  I'm not sure which CPUs do this, but at least
         * the 486 DX works this way.
+        * Xen pv domains are not using the default __KERNEL_CS.
         */
-       if (regs->cs != __KERNEL_CS)
+       if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
                goto fail;
 
        /*
index 78ca9a8ee4548a270045e81841ef6380ed6d260a..06fe3d51d385b88111961c0b5addc673fcd597a2 100644 (file)
@@ -701,7 +701,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
        else
                printk(KERN_CONT "paging request");
 
-       printk(KERN_CONT " at %p\n", (void *) address);
+       printk(KERN_CONT " at %px\n", (void *) address);
        printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip);
 
        dump_pagetable(address);
@@ -860,7 +860,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), address,
                (void *)regs->ip, (void *)regs->sp, error_code);
index 8ae0000cbdb34d8c6db0efacc566fb3a5b78d2d3..00b296617ca436c3cea79edcbb0a94d034ee52a3 100644 (file)
@@ -158,6 +158,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       /* No address checking. See comment at mmap_address_hint_valid() */
        if (flags & MAP_FIXED) {
                if (prepare_hugepage_range(file, addr, len))
                        return -EINVAL;
@@ -165,12 +166,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        }
 
        if (addr) {
-               addr = ALIGN(addr, huge_page_size(h));
+               addr &= huge_page_mask(h);
+               if (!mmap_address_hint_valid(addr, len))
+                       goto get_unmapped_area;
+
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+               if (!vma || addr + len <= vm_start_gap(vma))
                        return addr;
        }
+
+get_unmapped_area:
        if (mm->get_unmapped_area == arch_get_unmapped_area)
                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
                                pgoff, flags);
index 6e4573b1da341bd41095b11a692afcba51e4b850..c45b6ec5357bcd2e9f6626bd738c700cccd0a173 100644 (file)
@@ -404,11 +404,11 @@ void iounmap(volatile void __iomem *addr)
                return;
        }
 
+       mmiotrace_iounmap(addr);
+
        addr = (volatile void __iomem *)
                (PAGE_MASK & (unsigned long __force)addr);
 
-       mmiotrace_iounmap(addr);
-
        /* Use the vm area unlocked, assuming the caller
           ensures there isn't another iounmap for the same address
           in parallel. Reuse of the virtual address is prevented by
index 99dfed6dfef8b2f9028f82b89ab8dc2bde8173c4..9ec70d780f1f4172e3c69068f55722d13f003b06 100644 (file)
@@ -277,6 +277,7 @@ void __init kasan_early_init(void)
 void __init kasan_init(void)
 {
        int i;
+       void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
 
 #ifdef CONFIG_KASAN_INLINE
        register_die_notifier(&kasan_die_notifier);
@@ -329,8 +330,23 @@ void __init kasan_init(void)
                              (unsigned long)kasan_mem_to_shadow(_end),
                              early_pfn_to_nid(__pa(_stext)));
 
+       shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM);
+       shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
+       shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
+                                               PAGE_SIZE);
+
+       shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE);
+       shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
+       shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
+                                       PAGE_SIZE);
+
        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
-                       (void *)KASAN_SHADOW_END);
+                                  shadow_cpu_entry_begin);
+
+       kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
+                             (unsigned long)shadow_cpu_entry_end, 0);
+
+       kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END);
 
        load_cr3(init_top_pgt);
        __flush_tlb_all();
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index c21c2ed046120c8e12d439e71848200f11c41bd5..58477ec3d66d08acf07c1bc21bb9a55a78fcaa28 100644 (file)
@@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
        unsigned long flags;
        int ret = 0;
        unsigned long size = 0;
+       unsigned long addr = p->addr & PAGE_MASK;
        const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
        unsigned int l;
        pte_t *pte;
 
        spin_lock_irqsave(&kmmio_lock, flags);
-       if (get_kmmio_probe(p->addr)) {
+       if (get_kmmio_probe(addr)) {
                ret = -EEXIST;
                goto out;
        }
 
-       pte = lookup_address(p->addr, &l);
+       pte = lookup_address(addr, &l);
        if (!pte) {
                ret = -EINVAL;
                goto out;
@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
        kmmio_count++;
        list_add_rcu(&p->list, &kmmio_probes);
        while (size < size_lim) {
-               if (add_kmmio_fault_page(p->addr + size))
+               if (add_kmmio_fault_page(addr + size))
                        pr_err("Unable to set page fault.\n");
                size += page_level_size(l);
        }
@@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
 {
        unsigned long flags;
        unsigned long size = 0;
+       unsigned long addr = p->addr & PAGE_MASK;
        const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
        struct kmmio_fault_page *release_list = NULL;
        struct kmmio_delayed_release *drelease;
        unsigned int l;
        pte_t *pte;
 
-       pte = lookup_address(p->addr, &l);
+       pte = lookup_address(addr, &l);
        if (!pte)
                return;
 
        spin_lock_irqsave(&kmmio_lock, flags);
        while (size < size_lim) {
-               release_kmmio_fault_page(p->addr + size, &release_list);
+               release_kmmio_fault_page(addr + size, &release_list);
                size += page_level_size(l);
        }
        list_del_rcu(&p->list);
index a9967982684649155cfcdc921d5247c8fbfe70d6..155ecbac9e28f10c2f83cdbf48037a2f8f6a44fe 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/compat.h>
 #include <asm/elf.h>
 
+#include "physaddr.h"
+
 struct va_alignment __read_mostly va_align = {
        .flags = -1,
 };
@@ -174,3 +176,63 @@ const char *arch_vma_name(struct vm_area_struct *vma)
                return "[mpx]";
        return NULL;
 }
+
+/**
+ * mmap_address_hint_valid - Validate the address hint of mmap
+ * @addr:      Address hint
+ * @len:       Mapping length
+ *
+ * Check whether @addr and @addr + @len result in a valid mapping.
+ *
+ * On 32bit this only checks whether @addr + @len is <= TASK_SIZE.
+ *
+ * On 64bit with 5-level page tables another sanity check is required
+ * because mappings requested by mmap(@addr, 0) which cross the 47-bit
+ * virtual address boundary can cause the following theoretical issue:
+ *
+ *  An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr
+ *  is below the border of the 47-bit address space and @addr + @len is
+ *  above the border.
+ *
+ *  With 4-level paging this request succeeds, but the resulting mapping
+ *  address will always be within the 47-bit virtual address space, because
+ *  the hint address does not result in a valid mapping and is
+ *  ignored. Hence applications which are not prepared to handle virtual
+ *  addresses above 47-bit work correctly.
+ *
+ *  With 5-level paging this request would be granted and result in a
+ *  mapping which crosses the border of the 47-bit virtual address
+ *  space. If the application cannot handle addresses above 47-bit this
+ *  will lead to misbehaviour and hard to diagnose failures.
+ *
+ * Therefore ignore address hints which would result in a mapping crossing
+ * the 47-bit virtual address boundary.
+ *
+ * Note, that in the same scenario with MAP_FIXED the behaviour is
+ * different. The request with @addr < 47-bit and @addr + @len > 47-bit
+ * fails on a 4-level paging machine but succeeds on a 5-level paging
+ * machine. It is reasonable to expect that an application does not rely on
+ * the failure of such a fixed mapping request, so the restriction is not
+ * applied.
+ */
+bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
+{
+       if (TASK_SIZE - len < addr)
+               return false;
+
+       return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW);
+}
+
+/* Can we access it for direct reading/writing? Must be RAM: */
+int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+       return addr + count <= __pa(high_memory);
+}
+
+/* Can we access it through mmap? Must be a valid physical address: */
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
+       phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
+
+       return phys_addr_valid(addr + count - 1);
+}
index bb461cfd01abc78cdc45c6e69f013128e04ccdb4..526536c81ddc41d395fd971d909a3b687e46d989 100644 (file)
@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
         * We should get host bridge information from ACPI unless the BIOS
         * doesn't support it.
         */
-       if (acpi_os_get_root_pointer())
+       if (!acpi_disabled && acpi_os_get_root_pointer())
                return 0;
 #endif
 
index 1e996df687a3bc47fb796c4fe8025bcb148a6890..e663d6bf1328ebe2327c990f9d19557a49a2124a 100644 (file)
@@ -665,6 +665,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
        unsigned i;
        u32 base, limit, high;
        struct resource *res, *conflict;
+       struct pci_dev *other;
+
+       /* Check that we are the only device of that type */
+       other = pci_get_device(dev->vendor, dev->device, NULL);
+       if (other != dev ||
+           (other = pci_get_device(dev->vendor, dev->device, other))) {
+               /* This is a multi-socket system, don't touch it for now */
+               pci_dev_put(other);
+               return;
+       }
 
        for (i = 0; i < 8; i++) {
                pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
@@ -696,8 +706,13 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
        res->end = 0xfd00000000ull - 1;
 
        /* Just grab the free area behind system memory for this */
-       while ((conflict = request_resource_conflict(&iomem_resource, res)))
+       while ((conflict = request_resource_conflict(&iomem_resource, res))) {
+               if (conflict->end >= res->end) {
+                       kfree(res);
+                       return;
+               }
                res->start = conflict->end + 1;
+       }
 
        dev_info(&dev->dev, "adding root bus resource %pR\n", res);
 
@@ -714,10 +729,10 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
 
        pci_bus_add_resource(dev->bus, res, 0);
 }
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
 
 #endif
index c34bd8233f7c81ddff649e970f79802b9ae7ef27..5f64f30873e257757091b88f8e263711d8db548f 100644 (file)
@@ -905,7 +905,7 @@ static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
 /*
  * UV NMI handler
  */
-int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
+static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
 {
        struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
        int cpu = smp_processor_id();
@@ -1013,7 +1013,7 @@ void uv_nmi_init(void)
 }
 
 /* Setup HUB NMI info */
-void __init uv_nmi_setup_common(bool hubbed)
+static void __init uv_nmi_setup_common(bool hubbed)
 {
        int size = sizeof(void *) * (1 << NODES_SHIFT);
        int cpu;
index 84fcfde53f8f3f5bb4b85efc20ab106c419dcc11..a7d966964c6f20577c927cf5e618bc86b3331977 100644 (file)
@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt)
        /*
         * descriptor tables
         */
-#ifdef CONFIG_X86_32
        store_idt(&ctxt->idt);
-#else
-/* CONFIG_X86_64 */
-       store_idt((struct desc_ptr *)&ctxt->idt_limit);
-#endif
+
        /*
         * We save it here, but restore it only in the hibernate case.
         * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
@@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
        /*
         * segment registers
         */
-#ifdef CONFIG_X86_32
-       savesegment(es, ctxt->es);
-       savesegment(fs, ctxt->fs);
+#ifdef CONFIG_X86_32_LAZY_GS
        savesegment(gs, ctxt->gs);
-       savesegment(ss, ctxt->ss);
-#else
-/* CONFIG_X86_64 */
-       asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
-       asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
-       asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
-       asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
-       asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
+#endif
+#ifdef CONFIG_X86_64
+       savesegment(gs, ctxt->gs);
+       savesegment(fs, ctxt->fs);
+       savesegment(ds, ctxt->ds);
+       savesegment(es, ctxt->es);
 
        rdmsrl(MSR_FS_BASE, ctxt->fs_base);
-       rdmsrl(MSR_GS_BASE, ctxt->gs_base);
-       rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+       rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+       rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
        mtrr_save_fixed_ranges(NULL);
 
        rdmsrl(MSR_EFER, ctxt->efer);
@@ -160,17 +152,19 @@ static void do_fpu_end(void)
 static void fix_processor_context(void)
 {
        int cpu = smp_processor_id();
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
 #ifdef CONFIG_X86_64
        struct desc_struct *desc = get_cpu_gdt_rw(cpu);
        tss_desc tss;
 #endif
-       set_tss_desc(cpu, t);   /*
-                                * This just modifies memory; should not be
-                                * necessary. But... This is necessary, because
-                                * 386 hardware has concept of busy TSS or some
-                                * similar stupidity.
-                                */
+
+       /*
+        * We need to reload TR, which requires that we change the
+        * GDT entry to indicate "available" first.
+        *
+        * XXX: This could probably all be replaced by a call to
+        * force_reload_TR().
+        */
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
 #ifdef CONFIG_X86_64
        memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
@@ -178,6 +172,9 @@ static void fix_processor_context(void)
        write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
 
        syscall_init();                         /* This sets MSR_*STAR and related */
+#else
+       if (boot_cpu_has(X86_FEATURE_SEP))
+               enable_sep_cpu();
 #endif
        load_TR_desc();                         /* This does ltr */
        load_mm_ldt(current->active_mm);        /* This does lldt */
@@ -190,9 +187,12 @@ static void fix_processor_context(void)
 }
 
 /**
- *     __restore_processor_state - restore the contents of CPU registers saved
- *             by __save_processor_state()
- *     @ctxt - structure to load the registers contents from
+ * __restore_processor_state - restore the contents of CPU registers saved
+ *                             by __save_processor_state()
+ * @ctxt - structure to load the registers contents from
+ *
+ * The asm code that gets us here will have restored a usable GDT, although
+ * it will be pointing to the wrong alias.
  */
 static void notrace __restore_processor_state(struct saved_context *ctxt)
 {
@@ -215,46 +215,52 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        write_cr2(ctxt->cr2);
        write_cr0(ctxt->cr0);
 
+       /* Restore the IDT. */
+       load_idt(&ctxt->idt);
+
        /*
-        * now restore the descriptor tables to their proper values
-        * ltr is done i fix_processor_context().
+        * Just in case the asm code got us here with the SS, DS, or ES
+        * out of sync with the GDT, update them.
         */
-#ifdef CONFIG_X86_32
-       load_idt(&ctxt->idt);
+       loadsegment(ss, __KERNEL_DS);
+       loadsegment(ds, __USER_DS);
+       loadsegment(es, __USER_DS);
+
+       /*
+        * Restore percpu access.  Percpu access can happen in exception
+        * handlers or in complicated helpers like load_gs_index().
+        */
+#ifdef CONFIG_X86_64
+       wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
 #else
-/* CONFIG_X86_64 */
-       load_idt((const struct desc_ptr *)&ctxt->idt_limit);
+       loadsegment(fs, __KERNEL_PERCPU);
+       loadsegment(gs, __KERNEL_STACK_CANARY);
 #endif
 
+       /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
+       fix_processor_context();
+
        /*
-        * segment registers
+        * Now that we have descriptor tables fully restored and working
+        * exception handling, restore the usermode segments.
         */
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_64
+       loadsegment(ds, ctxt->es);
        loadsegment(es, ctxt->es);
        loadsegment(fs, ctxt->fs);
-       loadsegment(gs, ctxt->gs);
-       loadsegment(ss, ctxt->ss);
+       load_gs_index(ctxt->gs);
 
        /*
-        * sysenter MSRs
+        * Restore FSBASE and GSBASE after restoring the selectors, since
+        * restoring the selectors clobbers the bases.  Keep in mind
+        * that MSR_KERNEL_GS_BASE is horribly misnamed.
         */
-       if (boot_cpu_has(X86_FEATURE_SEP))
-               enable_sep_cpu();
-#else
-/* CONFIG_X86_64 */
-       asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
-       asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
-       asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
-       load_gs_index(ctxt->gs);
-       asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
-
        wrmsrl(MSR_FS_BASE, ctxt->fs_base);
-       wrmsrl(MSR_GS_BASE, ctxt->gs_base);
-       wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+       wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+#elif defined(CONFIG_X86_32_LAZY_GS)
+       loadsegment(gs, ctxt->gs);
 #endif
 
-       fix_processor_context();
-
        do_fpu_end();
        tsc_verify_tsc_adjust(true);
        x86_platform.restore_sched_clock_state();
index 6b830d4cb4c8e8e78c44dd87a12f642234533b4e..de58533d3664cdf067aedecfc81af2729c3b539b 100644 (file)
@@ -57,7 +57,7 @@ static u32 xen_apic_read(u32 reg)
                return 0;
 
        if (reg == APIC_LVR)
-               return 0x10;
+               return 0x14;
 #ifdef CONFIG_X86_32
        if (reg == APIC_LDR)
                return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
index 5b2b3f3f653112fbe00484f5dcae0de02543df3c..7beeee1443b32a3fbcf3ba6ad57594d46c7a359f 100644 (file)
@@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = {
        { simd_coprocessor_error,      xen_simd_coprocessor_error,      false },
 };
 
-static bool get_trap_addr(void **addr, unsigned int ist)
+static bool __ref get_trap_addr(void **addr, unsigned int ist)
 {
        unsigned int nr;
        bool ist_okay = false;
@@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
                }
        }
 
+       if (nr == ARRAY_SIZE(trap_array) &&
+           *addr >= (void *)early_idt_handler_array[0] &&
+           *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
+               nr = (*addr - (void *)early_idt_handler_array[0]) /
+                    EARLY_IDT_HANDLER_SIZE;
+               *addr = (void *)xen_early_idt_handler_array[nr];
+       }
+
        if (WARN_ON(ist != 0 && !ist_okay))
                return false;
 
@@ -818,7 +826,7 @@ static void xen_load_sp0(unsigned long sp0)
        mcs = xen_mc_entry(0);
        MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
        xen_mc_issue(PARAVIRT_LAZY_CPU);
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 void xen_set_iopl_mask(unsigned mask)
@@ -1262,6 +1270,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
        xen_setup_gdt(0);
 
        xen_init_irq_ops();
+
+       /* Let's presume PV guests always boot on vCPU with id 0. */
+       per_cpu(xen_vcpu_id, 0) = 0;
+
+       /*
+        * Setup xen_vcpu early because idt_setup_early_handler needs it for
+        * local_irq_disable(), irqs_disabled().
+        *
+        * Don't do the full vcpu_info placement stuff until we have
+        * the cpu_possible_mask and a non-dummy shared_info.
+        */
+       xen_vcpu_info_reset(0);
+
+       idt_setup_early_handler();
+
        xen_init_capabilities();
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -1295,18 +1318,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
         */
        acpi_numa = -1;
 #endif
-       /* Let's presume PV guests always boot on vCPU with id 0. */
-       per_cpu(xen_vcpu_id, 0) = 0;
-
-       /*
-        * Setup xen_vcpu early because start_kernel needs it for
-        * local_irq_disable(), irqs_disabled().
-        *
-        * Don't do the full vcpu_info placement stuff until we have
-        * the cpu_possible_mask and a non-dummy shared_info.
-        */
-       xen_vcpu_info_reset(0);
-
        WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
 
        local_irq_disable();
index fc048ec686e7699b263254c79b482ccf935c21ef..6cf801ca11428fa5fd9c2d3c9931354f28575580 100644 (file)
@@ -2272,7 +2272,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
        case FIX_TEXT_POKE0:
        case FIX_TEXT_POKE1:
-       case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
+       case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM:
                /* All local page mappings */
                pte = pfn_pte(phys, prot);
                break;
index 8a10c9a9e2b50651b2c8dd322956298402e79e7d..417b339e5c8e1aadedd20231c9be82ac93dbe728 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <xen/interface/xen.h>
 
+#include <linux/init.h>
 #include <linux/linkage.h>
 
 .macro xen_pv_trap name
@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
 #endif
 xen_pv_trap hypervisor_callback
 
+       __INIT
+ENTRY(xen_early_idt_handler_array)
+       i = 0
+       .rept NUM_EXCEPTION_VECTORS
+       pop %rcx
+       pop %r11
+       jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
+       i = i + 1
+       .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+       .endr
+END(xen_early_idt_handler_array)
+       __FINIT
+
 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
 /*
  * Xen64 iret frame:
index a5bcdfb890f1b77fa8f5f2b349a05d7f759fb661..837d4dd7678545dec75f53c21bcf5e2dea500202 100644 (file)
@@ -2,6 +2,7 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
 generic-y += errno.h
 generic-y += fcntl.h
 generic-y += ioctl.h
index 228229f3bb76d2f9770b2191cee67328b6f9e983..9ef6cf3addb38cae822d0e5c5ef18ba9e98cd2d7 100644 (file)
@@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
        bio->bi_disk = bio_src->bi_disk;
        bio->bi_partno = bio_src->bi_partno;
        bio_set_flag(bio, BIO_CLONED);
+       if (bio_flagged(bio_src, BIO_THROTTLED))
+               bio_set_flag(bio, BIO_THROTTLED);
        bio->bi_opf = bio_src->bi_opf;
        bio->bi_write_hint = bio_src->bi_write_hint;
        bio->bi_iter = bio_src->bi_iter;
@@ -1819,7 +1821,7 @@ EXPORT_SYMBOL(bio_endio);
 struct bio *bio_split(struct bio *bio, int sectors,
                      gfp_t gfp, struct bio_set *bs)
 {
-       struct bio *split = NULL;
+       struct bio *split;
 
        BUG_ON(sectors <= 0);
        BUG_ON(sectors >= bio_sectors(bio));
index 1038706edd87f3e142e6023da1c3a130bfaf1330..b8881750a3acd705789050c845c942673da999a8 100644 (file)
@@ -863,9 +863,9 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-static void blk_rq_timed_out_timer(unsigned long data)
+static void blk_rq_timed_out_timer(struct timer_list *t)
 {
-       struct request_queue *q = (struct request_queue *)data;
+       struct request_queue *q = from_timer(q, t, timeout);
 
        kblockd_schedule_work(&q->timeout_work);
 }
@@ -901,9 +901,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info->name = "block";
        q->node = node_id;
 
-       setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
-                   laptop_mode_timer_fn, (unsigned long) q);
-       setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
+                   laptop_mode_timer_fn, 0);
+       timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
        INIT_WORK(&q->timeout_work, NULL);
        INIT_LIST_HEAD(&q->queue_head);
        INIT_LIST_HEAD(&q->timeout_list);
index b21f8e86f1207f9b76bf3e2083fcf72b5062f0b7..d3a94719f03fb2af81d6270d6fc9ed58f0dde373 100644 (file)
 #include "blk.h"
 
 /*
- * Append a bio to a passthrough request.  Only works can be merged into
- * the request based on the driver constraints.
+ * Append a bio to a passthrough request.  Only works if the bio can be merged
+ * into the request based on the driver constraints.
  */
-int blk_rq_append_bio(struct request *rq, struct bio *bio)
+int blk_rq_append_bio(struct request *rq, struct bio **bio)
 {
-       blk_queue_bounce(rq->q, &bio);
+       struct bio *orig_bio = *bio;
+
+       blk_queue_bounce(rq->q, bio);
 
        if (!rq->bio) {
-               blk_rq_bio_prep(rq->q, rq, bio);
+               blk_rq_bio_prep(rq->q, rq, *bio);
        } else {
-               if (!ll_back_merge_fn(rq->q, rq, bio))
+               if (!ll_back_merge_fn(rq->q, rq, *bio)) {
+                       if (orig_bio != *bio) {
+                               bio_put(*bio);
+                               *bio = orig_bio;
+                       }
                        return -EINVAL;
+               }
 
-               rq->biotail->bi_next = bio;
-               rq->biotail = bio;
-               rq->__data_len += bio->bi_iter.bi_size;
+               rq->biotail->bi_next = *bio;
+               rq->biotail = *bio;
+               rq->__data_len += (*bio)->bi_iter.bi_size;
        }
 
        return 0;
@@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq,
         * We link the bounce buffer in and could have to traverse it
         * later so we have to get a ref to prevent it from being freed
         */
-       ret = blk_rq_append_bio(rq, bio);
-       bio_get(bio);
+       ret = blk_rq_append_bio(rq, &bio);
        if (ret) {
-               bio_endio(bio);
                __blk_rq_unmap_user(orig_bio);
-               bio_put(bio);
                return ret;
        }
+       bio_get(bio);
 
        return 0;
 }
@@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        int reading = rq_data_dir(rq) == READ;
        unsigned long addr = (unsigned long) kbuf;
        int do_copy = 0;
-       struct bio *bio;
+       struct bio *bio, *orig_bio;
        int ret;
 
        if (len > (queue_max_hw_sectors(q) << 9))
@@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (do_copy)
                rq->rq_flags |= RQF_COPY_USER;
 
-       ret = blk_rq_append_bio(rq, bio);
+       orig_bio = bio;
+       ret = blk_rq_append_bio(rq, &bio);
        if (unlikely(ret)) {
                /* request is too big */
-               bio_put(bio);
+               bio_put(orig_bio);
                return ret;
        }
 
index 3a2f3c96f3672e102d0498848e6086d9ad7184f8..28003bf9941c701ce8d534bbd8c8faaf58066eb9 100644 (file)
@@ -79,9 +79,9 @@ void blk_stat_add(struct request *rq)
        rcu_read_unlock();
 }
 
-static void blk_stat_timer_fn(unsigned long data)
+static void blk_stat_timer_fn(struct timer_list *t)
 {
-       struct blk_stat_callback *cb = (void *)data;
+       struct blk_stat_callback *cb = from_timer(cb, t, timer);
        unsigned int bucket;
        int cpu;
 
@@ -130,7 +130,7 @@ blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
        cb->bucket_fn = bucket_fn;
        cb->data = data;
        cb->buckets = buckets;
-       setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
+       timer_setup(&cb->timer, blk_stat_timer_fn, 0);
 
        return cb;
 }
index e54be402899daa18dc45ef35130d7e191d459bb0..870484eaed1f64b586ccb0fd4dd34c3f9462a29d 100644 (file)
@@ -450,12 +450,9 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
                ret = wbt_init(q);
                if (ret)
                        return ret;
-
-               rwb = q->rq_wb;
-               if (!rwb)
-                       return -EINVAL;
        }
 
+       rwb = q->rq_wb;
        if (val == -1)
                rwb->min_lat_nsec = wbt_default_latency_nsec(q);
        else if (val >= 0)
index 96ad32623427d4794ad7563369bc9f89bb85fd26..d19f416d61012ac032c49608f0afe463c948e8bc 100644 (file)
@@ -225,7 +225,7 @@ struct throtl_data
        bool track_bio_latency;
 };
 
-static void throtl_pending_timer_fn(unsigned long arg);
+static void throtl_pending_timer_fn(struct timer_list *t);
 
 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 {
@@ -478,8 +478,7 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq)
        INIT_LIST_HEAD(&sq->queued[0]);
        INIT_LIST_HEAD(&sq->queued[1]);
        sq->pending_tree = RB_ROOT;
-       setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
-                   (unsigned long)sq);
+       timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
 }
 
 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
@@ -1249,9 +1248,9 @@ static bool throtl_can_upgrade(struct throtl_data *td,
  * the top-level service_tree is reached, throtl_data->dispatch_work is
  * kicked so that the ready bio's are issued.
  */
-static void throtl_pending_timer_fn(unsigned long arg)
+static void throtl_pending_timer_fn(struct timer_list *t)
 {
-       struct throtl_service_queue *sq = (void *)arg;
+       struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
        struct throtl_grp *tg = sq_to_tg(sq);
        struct throtl_data *td = sq_to_td(sq);
        struct request_queue *q = td->queue;
@@ -2227,13 +2226,7 @@ again:
 out_unlock:
        spin_unlock_irq(q->queue_lock);
 out:
-       /*
-        * As multiple blk-throtls may stack in the same issue path, we
-        * don't want bios to leave with the flag set.  Clear the flag if
-        * being issued.
-        */
-       if (!throttled)
-               bio_clear_flag(bio, BIO_THROTTLED);
+       bio_set_flag(bio, BIO_THROTTLED);
 
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
        if (throttled || !td->track_bio_latency)
index b252da0e4c11051f7c78be797122448e231a0bde..ae8de9780085ae7b8e99237ed16fc9cd02b233a5 100644 (file)
@@ -178,12 +178,11 @@ void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
 
                if (wbt_is_read(stat))
                        wb_timestamp(rwb, &rwb->last_comp);
-               wbt_clear_state(stat);
        } else {
                WARN_ON_ONCE(stat == rwb->sync_cookie);
                __wbt_done(rwb, wbt_stat_to_mask(stat));
-               wbt_clear_state(stat);
        }
+       wbt_clear_state(stat);
 }
 
 /*
@@ -482,7 +481,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 
        /*
         * At this point we know it's a buffered write. If this is
-        * kswapd trying to free memory, or REQ_SYNC is set, set, then
+        * kswapd trying to free memory, or REQ_SYNC is set, then
         * it's WB_SYNC_ALL writeback, and we'll use the max limit for
         * that. If the write is marked as a background write, then use
         * the idle limit, or go to normal if we haven't had competing
@@ -723,8 +722,6 @@ int wbt_init(struct request_queue *q)
                init_waitqueue_head(&rwb->rq_wait[i].wait);
        }
 
-       rwb->wc = 1;
-       rwb->queue_depth = RWB_DEF_DEPTH;
        rwb->last_comp = rwb->last_issue = jiffies;
        rwb->queue = q;
        rwb->win_nsec = RWB_WINDOW_NSEC;
index fceb1a96480bfb9600e4664fa2b4992c8bb64210..1d05c422c932ad56d705f94deed6cce0891ff9d3 100644 (file)
@@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        unsigned i = 0;
        bool bounce = false;
        int sectors = 0;
+       bool passthrough = bio_is_passthrough(*bio_orig);
 
        bio_for_each_segment(from, *bio_orig, iter) {
                if (i++ < BIO_MAX_PAGES)
@@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        if (!bounce)
                return;
 
-       if (sectors < bio_sectors(*bio_orig)) {
+       if (!passthrough && sectors < bio_sectors(*bio_orig)) {
                bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
                bio_chain(bio, *bio_orig);
                generic_make_request(*bio_orig);
                *bio_orig = bio;
        }
-       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
+       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
+                       bounce_bio_set);
 
        bio_for_each_segment_all(to, bio, i) {
                struct page *page = to->bv_page;
index c2223f12a8051411d4e89a0bc2850e03d7d0427e..96a66f67172045d571be9fe18248dbefc99766ef 100644 (file)
@@ -671,10 +671,13 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
                disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
                disk->flags |= GENHD_FL_NO_PART_SCAN;
        } else {
+               int ret;
+
                /* Register BDI before referencing it from bdev */
                disk_to_dev(disk)->devt = devt;
-               bdi_register_owner(disk->queue->backing_dev_info,
-                               disk_to_dev(disk));
+               ret = bdi_register_owner(disk->queue->backing_dev_info,
+                                               disk_to_dev(disk));
+               WARN_ON(ret);
                blk_register_region(disk_devt(disk), disk->minors, NULL,
                                    exact_match, exact_lock, disk);
        }
@@ -1389,7 +1392,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
 
        if (minors > DISK_MAX_PARTS) {
                printk(KERN_ERR
-                       "block: can't allocated more than %d partitions\n",
+                       "block: can't allocate more than %d partitions\n",
                        DISK_MAX_PARTS);
                minors = DISK_MAX_PARTS;
        }
index b4df317c291692f01138b91608dc6c80f71bb9aa..f95c60774ce8ca613417d3ccf54bee52010752ee 100644 (file)
@@ -100,9 +100,13 @@ struct kyber_hctx_data {
        unsigned int cur_domain;
        unsigned int batching;
        wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+       struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
        atomic_t wait_index[KYBER_NUM_DOMAINS];
 };
 
+static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+                            void *key);
+
 static int rq_sched_domain(const struct request *rq)
 {
        unsigned int op = rq->cmd_flags;
@@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 
        for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
                INIT_LIST_HEAD(&khd->rqs[i]);
+               init_waitqueue_func_entry(&khd->domain_wait[i],
+                                         kyber_domain_wake);
+               khd->domain_wait[i].private = hctx;
                INIT_LIST_HEAD(&khd->domain_wait[i].entry);
                atomic_set(&khd->wait_index[i], 0);
        }
@@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
        int nr;
 
        nr = __sbitmap_queue_get(domain_tokens);
-       if (nr >= 0)
-               return nr;
 
        /*
         * If we failed to get a domain token, make sure the hardware queue is
         * run when one becomes available. Note that this is serialized on
         * khd->lock, but we still need to be careful about the waker.
         */
-       if (list_empty_careful(&wait->entry)) {
-               init_waitqueue_func_entry(wait, kyber_domain_wake);
-               wait->private = hctx;
+       if (nr < 0 && list_empty_careful(&wait->entry)) {
                ws = sbq_wait_ptr(domain_tokens,
                                  &khd->wait_index[sched_domain]);
+               khd->domain_ws[sched_domain] = ws;
                add_wait_queue(&ws->wait, wait);
 
                /*
                 * Try again in case a token was freed before we got on the wait
-                * queue. The waker may have already removed the entry from the
-                * wait queue, but list_del_init() is okay with that.
+                * queue.
                 */
                nr = __sbitmap_queue_get(domain_tokens);
-               if (nr >= 0) {
-                       unsigned long flags;
+       }
 
-                       spin_lock_irqsave(&ws->wait.lock, flags);
-                       list_del_init(&wait->entry);
-                       spin_unlock_irqrestore(&ws->wait.lock, flags);
-               }
+       /*
+        * If we got a token while we were on the wait queue, remove ourselves
+        * from the wait queue to ensure that all wake ups make forward
+        * progress. It's possible that the waker already deleted the entry
+        * between the !list_empty_careful() check and us grabbing the lock, but
+        * list_del_init() is okay with that.
+        */
+       if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+               ws = khd->domain_ws[sched_domain];
+               spin_lock_irq(&ws->wait.lock);
+               list_del_init(&wait->entry);
+               spin_unlock_irq(&ws->wait.lock);
        }
+
        return nr;
 }
 
index 85cea9de324a4f577bcbfb273d8f3a8917ca60e6..415a54ced4d6a490ae1e09170c8b80ef3eef135e 100644 (file)
@@ -672,14 +672,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
        }
 
        tsgl = areq->tsgl;
-       for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
-               if (!sg_page(sg))
-                       continue;
-               put_page(sg_page(sg));
-       }
+       if (tsgl) {
+               for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
+                       if (!sg_page(sg))
+                               continue;
+                       put_page(sg_page(sg));
+               }
 
-       if (areq->tsgl && areq->tsgl_entries)
                sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
+       }
 }
 EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
 
@@ -1020,6 +1021,18 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(af_alg_sendpage);
 
+/**
+ * af_alg_free_resources - release resources required for crypto request
+ */
+void af_alg_free_resources(struct af_alg_async_req *areq)
+{
+       struct sock *sk = areq->sk;
+
+       af_alg_free_areq_sgls(areq);
+       sock_kfree_s(sk, areq, areq->areqlen);
+}
+EXPORT_SYMBOL_GPL(af_alg_free_resources);
+
 /**
  * af_alg_async_cb - AIO callback handler
  *
@@ -1036,18 +1049,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
        struct kiocb *iocb = areq->iocb;
        unsigned int resultlen;
 
-       lock_sock(sk);
-
        /* Buffer size written by crypto operation. */
        resultlen = areq->outlen;
 
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
-       __sock_put(sk);
+       af_alg_free_resources(areq);
+       sock_put(sk);
 
        iocb->ki_complete(iocb, err ? err : resultlen, 0);
-
-       release_sock(sk);
 }
 EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
index aacae0837aff73507165194a43d75d159d14fbdb..48b34e9c68342c55610ad83900557dc1c785af41 100644 (file)
@@ -101,10 +101,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
        struct aead_tfm *aeadc = pask->private;
        struct crypto_aead *tfm = aeadc->aead;
        struct crypto_skcipher *null_tfm = aeadc->null_tfm;
-       unsigned int as = crypto_aead_authsize(tfm);
+       unsigned int i, as = crypto_aead_authsize(tfm);
        struct af_alg_async_req *areq;
-       struct af_alg_tsgl *tsgl;
-       struct scatterlist *src;
+       struct af_alg_tsgl *tsgl, *tmp;
+       struct scatterlist *rsgl_src, *tsgl_src = NULL;
        int err = 0;
        size_t used = 0;                /* [in]  TX bufs to be en/decrypted */
        size_t outlen = 0;              /* [out] RX bufs produced by kernel */
@@ -178,7 +178,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
        }
 
        processed = used + ctx->aead_assoclen;
-       tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
+       list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+               for (i = 0; i < tsgl->cur; i++) {
+                       struct scatterlist *process_sg = tsgl->sg + i;
+
+                       if (!(process_sg->length) || !sg_page(process_sg))
+                               continue;
+                       tsgl_src = process_sg;
+                       break;
+               }
+               if (tsgl_src)
+                       break;
+       }
+       if (processed && !tsgl_src) {
+               err = -EFAULT;
+               goto free;
+       }
 
        /*
         * Copy of AAD from source to destination
@@ -194,7 +209,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
         */
 
        /* Use the RX SGL as source (and destination) for crypto op. */
-       src = areq->first_rsgl.sgl.sg;
+       rsgl_src = areq->first_rsgl.sgl.sg;
 
        if (ctx->enc) {
                /*
@@ -207,7 +222,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                 *          v      v
                 * RX SGL: AAD || PT || Tag
                 */
-               err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+               err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
                                           areq->first_rsgl.sgl.sg, processed);
                if (err)
                        goto free;
@@ -225,7 +240,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                 */
 
                 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
-               err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+               err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
                                           areq->first_rsgl.sgl.sg, outlen);
                if (err)
                        goto free;
@@ -257,23 +272,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                                 areq->tsgl);
                } else
                        /* no RX SGL present (e.g. authentication only) */
-                       src = areq->tsgl;
+                       rsgl_src = areq->tsgl;
        }
 
        /* Initialize the crypto operation */
-       aead_request_set_crypt(&areq->cra_u.aead_req, src,
+       aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
                               areq->first_rsgl.sgl.sg, used, ctx->iv);
        aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
        aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
 
        if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
                /* AIO operation */
+               sock_hold(sk);
                areq->iocb = msg->msg_iocb;
                aead_request_set_callback(&areq->cra_u.aead_req,
                                          CRYPTO_TFM_REQ_MAY_BACKLOG,
                                          af_alg_async_cb, areq);
                err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
                                 crypto_aead_decrypt(&areq->cra_u.aead_req);
+
+               /* AIO operation in progress */
+               if (err == -EINPROGRESS || err == -EBUSY) {
+                       /* Remember output size that will be generated. */
+                       areq->outlen = outlen;
+
+                       return -EIOCBQUEUED;
+               }
+
+               sock_put(sk);
        } else {
                /* Synchronous operation */
                aead_request_set_callback(&areq->cra_u.aead_req,
@@ -285,19 +311,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                                &ctx->wait);
        }
 
-       /* AIO operation in progress */
-       if (err == -EINPROGRESS) {
-               sock_hold(sk);
-
-               /* Remember output size that will be generated. */
-               areq->outlen = outlen;
-
-               return -EIOCBQUEUED;
-       }
 
 free:
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
+       af_alg_free_resources(areq);
 
        return err ? err : outlen;
 }
@@ -487,6 +503,7 @@ static void aead_release(void *private)
        struct aead_tfm *tfm = private;
 
        crypto_free_aead(tfm->aead);
+       crypto_put_default_null_skcipher2();
        kfree(tfm);
 }
 
@@ -519,7 +536,6 @@ static void aead_sock_destruct(struct sock *sk)
        unsigned int ivlen = crypto_aead_ivsize(tfm);
 
        af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
-       crypto_put_default_null_skcipher2();
        sock_kzfree_s(sk, ctx->iv, ivlen);
        sock_kfree_s(sk, ctx, ctx->len);
        af_alg_release_parent(sk);
index 9954b078f0b9cc7ccd853650f85920e29f159973..30cff827dd8fff048fa3e2ca7de770ab73022749 100644 (file)
@@ -117,6 +117,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 
        if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
                /* AIO operation */
+               sock_hold(sk);
                areq->iocb = msg->msg_iocb;
                skcipher_request_set_callback(&areq->cra_u.skcipher_req,
                                              CRYPTO_TFM_REQ_MAY_SLEEP,
@@ -124,6 +125,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                err = ctx->enc ?
                        crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
                        crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+
+               /* AIO operation in progress */
+               if (err == -EINPROGRESS || err == -EBUSY) {
+                       /* Remember output size that will be generated. */
+                       areq->outlen = len;
+
+                       return -EIOCBQUEUED;
+               }
+
+               sock_put(sk);
        } else {
                /* Synchronous operation */
                skcipher_request_set_callback(&areq->cra_u.skcipher_req,
@@ -136,19 +147,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                                                 &ctx->wait);
        }
 
-       /* AIO operation in progress */
-       if (err == -EINPROGRESS) {
-               sock_hold(sk);
-
-               /* Remember output size that will be generated. */
-               areq->outlen = len;
-
-               return -EIOCBQUEUED;
-       }
 
 free:
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
+       af_alg_free_resources(areq);
 
        return err ? err : len;
 }
index c1ca1e86f5c4f86d1110343aa5194e2c3147f4d5..a6dcaa659aa8c162e2593df43c52259a0f2900c4 100644 (file)
@@ -148,8 +148,10 @@ struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen)
        }
 
        ret = pkcs7_check_authattrs(ctx->msg);
-       if (ret < 0)
+       if (ret < 0) {
+               msg = ERR_PTR(ret);
                goto out;
+       }
 
        msg = ctx->msg;
        ctx->msg = NULL;
index f6a009d88a33fb550654b11d2cfc4460c733a049..1f4e25f10049c2645c2a421b07bb21fdaf1c0857 100644 (file)
@@ -69,7 +69,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
                 /* Self-signed certificates form roots of their own, and if we
                  * don't know them, then we can't accept them.
                  */
-               if (x509->next == x509) {
+               if (x509->signer == x509) {
                        kleave(" = -ENOKEY [unknown self-signed]");
                        return -ENOKEY;
                }
index 2d93d9eccb4d0c46d7b93306f50f590709d1b21f..39e6de0c2761fb03efab8f144fa1d1693e7cb035 100644 (file)
@@ -59,11 +59,8 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
        /* Digest the message [RFC2315 9.3] */
-       ret = crypto_shash_init(desc);
-       if (ret < 0)
-               goto error;
-       ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len,
-                                sig->digest);
+       ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
+                                 sig->digest);
        if (ret < 0)
                goto error;
        pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest);
@@ -150,7 +147,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
                pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
                         sinfo->index, certix);
 
-               if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
+               if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
                        pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
                                sinfo->index);
                        continue;
index bc3035ef27a22b3ca593532dedc8020bb2773a2a..de996586762a83c0b3214aaf5fa561bde92f65c0 100644 (file)
@@ -73,7 +73,7 @@ int public_key_verify_signature(const struct public_key *pkey,
        char alg_name_buf[CRYPTO_MAX_ALG_NAME];
        void *output;
        unsigned int outlen;
-       int ret = -ENOMEM;
+       int ret;
 
        pr_devel("==>%s()\n", __func__);
 
@@ -99,6 +99,7 @@ int public_key_verify_signature(const struct public_key *pkey,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
+       ret = -ENOMEM;
        req = akcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req)
                goto error_free_tfm;
@@ -127,7 +128,7 @@ int public_key_verify_signature(const struct public_key *pkey,
         * signature and returns that to us.
         */
        ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
-       if (ret < 0)
+       if (ret)
                goto out_free_output;
 
        /* Do the actual verification step. */
@@ -142,6 +143,8 @@ error_free_req:
 error_free_tfm:
        crypto_free_akcipher(tfm);
        pr_devel("<==%s() = %d\n", __func__, ret);
+       if (WARN_ON_ONCE(ret > 0))
+               ret = -EINVAL;
        return ret;
 }
 EXPORT_SYMBOL_GPL(public_key_verify_signature);
index dd03fead1ca358fc4f21cdb0c5c65b9896a39b38..ce2df8c9c583970be0177d2a50de85316fb1c2b2 100644 (file)
@@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
        ctx->cert->pub->pkey_algo = "rsa";
 
        /* Discard the BIT STRING metadata */
+       if (vlen < 1 || *(const u8 *)value != 0)
+               return -EBADMSG;
        ctx->key = value + 1;
        ctx->key_size = vlen - 1;
        return 0;
index c9013582c026748a10b09eeb09d5c71f0571ea7a..9338b4558cdc52b70a86c820ef2d39186bf1e3c3 100644 (file)
@@ -79,11 +79,7 @@ int x509_get_sig_params(struct x509_certificate *cert)
        desc->tfm = tfm;
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       ret = crypto_shash_init(desc);
-       if (ret < 0)
-               goto error_2;
-       might_sleep();
-       ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
+       ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
        if (ret < 0)
                goto error_2;
 
@@ -135,7 +131,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
        }
 
        ret = -EKEYREJECTED;
-       if (cert->pub->pkey_algo != cert->sig->pkey_algo)
+       if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
                goto out;
 
        ret = public_key_verify_signature(cert->pub, cert->sig);
index 92871dc2a63ec66ca628df3a44b025b7ef6f247e..e74730224f0a5f6346bb8ae7b80f3ed5e6cb6281 100644 (file)
@@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
        salg = shash_attr_alg(tb[1], 0, 0);
        if (IS_ERR(salg))
                return PTR_ERR(salg);
+       alg = &salg->base;
 
+       /* The underlying hash algorithm must be unkeyed */
        err = -EINVAL;
+       if (crypto_shash_alg_has_setkey(salg))
+               goto out_put_alg;
+
        ds = salg->digestsize;
        ss = salg->statesize;
-       alg = &salg->base;
        if (ds > alg->cra_blocksize ||
            ss < alg->cra_blocksize)
                goto out_put_alg;
index 0b66dc8246068aa084dd0b44210b04dee5f2bccb..cad395d70d78e18527866bf1a3f6452c338e4c7c 100644 (file)
@@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
                return -EINVAL;
 
        if (fips_enabled) {
-               while (!*ptr && n_sz) {
+               while (n_sz && !*ptr) {
                        ptr++;
                        n_sz--;
                }
index f550b5d9463074b16670129341de59e069f8509c..d7da0eea5622af96f63300ad45c35450951551e1 100644 (file)
@@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
 
        salsa20_ivsetup(ctx, walk.iv);
 
-       if (likely(walk.nbytes == nbytes))
-       {
-               salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
-                                     walk.src.virt.addr, nbytes);
-               return blkcipher_walk_done(desc, &walk, 0);
-       }
-
        while (walk.nbytes >= 64) {
                salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
                                      walk.src.virt.addr,
index 325a14da58278f01b8c1ffd92bdd8990db2860c4..e849d3ee2e2728d346df1f21f6a8d4db57fc42c5 100644 (file)
 
 static const struct crypto_type crypto_shash_type;
 
-static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
-                          unsigned int keylen)
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+                   unsigned int keylen)
 {
        return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(shash_no_setkey);
 
 static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
                                  unsigned int keylen)
index d5692e35fab1f069376f7c54358ff5e5f0cb352e..778e0ff42bfa801eda5be848da9e6747ebbc2626 100644 (file)
@@ -522,6 +522,9 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
        scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
 
+       scatterwalk_done(&walk->in, 0, walk->total);
+       scatterwalk_done(&walk->out, 0, walk->total);
+
        walk->iv = req->iv;
        walk->oiv = req->iv;
 
index 1d034b6804310a1d740b4e8aa40c649922748d77..e06f7f633f73ff285c64e19b89041d17e2be23ce 100644 (file)
@@ -105,6 +105,7 @@ obj-$(CONFIG_TC)            += tc/
 obj-$(CONFIG_UWB)              += uwb/
 obj-$(CONFIG_USB_PHY)          += usb/
 obj-$(CONFIG_USB)              += usb/
+obj-$(CONFIG_USB_SUPPORT)      += usb/
 obj-$(CONFIG_PCI)              += usb/
 obj-$(CONFIG_USB_GADGET)       += usb/
 obj-$(CONFIG_OF)               += usb/
index 6742f6c68034c5e833505d294902dd97c274c1b0..9bff853e85f37831d8d053a2aa363f139537c9b5 100644 (file)
@@ -1007,7 +1007,7 @@ skip:
        /* The record may be cleared by others, try read next record */
        if (len == -ENOENT)
                goto skip;
-       else if (len < sizeof(*rcd)) {
+       else if (len < 0 || len < sizeof(*rcd)) {
                rc = -EIO;
                goto out;
        }
index 21c28433c590a4aec323bdcea19a0e1426c751e4..06ea4749ebd9826a3d7b8b0a9798a1cc797f4d61 100644 (file)
@@ -949,7 +949,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
        }
 
        *val = 0;
-       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
+       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                vaddr = reg_res->sys_mem_vaddr;
@@ -988,7 +988,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
        struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 
-       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
+       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                vaddr = reg_res->sys_mem_vaddr;
@@ -1035,14 +1035,15 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
                *lowest_non_linear_reg, *nominal_reg;
        u64 high, low, nom, min_nonlinear;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        int ret = 0, regs_in_pcc = 0;
 
-       if (!cpc_desc) {
+       if (!cpc_desc || pcc_ss_id < 0) {
                pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
                return -ENODEV;
        }
 
+       pcc_ss_data = pcc_data[pcc_ss_id];
        highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
        lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
        lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
@@ -1095,15 +1096,16 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
        struct cpc_register_resource *delivered_reg, *reference_reg,
                *ref_perf_reg, *ctr_wrap_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        u64 delivered, reference, ref_perf, ctr_wrap_time;
        int ret = 0, regs_in_pcc = 0;
 
-       if (!cpc_desc) {
+       if (!cpc_desc || pcc_ss_id < 0) {
                pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
                return -ENODEV;
        }
 
+       pcc_ss_data = pcc_data[pcc_ss_id];
        delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
        reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
        ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
@@ -1169,14 +1171,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
        struct cpc_register_resource *desired_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        int ret = 0;
 
-       if (!cpc_desc) {
+       if (!cpc_desc || pcc_ss_id < 0) {
                pr_debug("No CPC descriptor for CPU:%d\n", cpu);
                return -ENODEV;
        }
 
+       pcc_ss_data = pcc_data[pcc_ss_id];
        desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
 
        /*
@@ -1301,7 +1304,7 @@ unsigned int cppc_get_transition_latency(int cpu_num)
        struct cpc_desc *cpc_desc;
        struct cpc_register_resource *desired_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
 
        cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
        if (!cpc_desc)
@@ -1311,6 +1314,10 @@ unsigned int cppc_get_transition_latency(int cpu_num)
        if (!CPC_IN_PCC(desired_reg))
                return CPUFREQ_ETERNAL;
 
+       if (pcc_ss_id < 0)
+               return CPUFREQ_ETERNAL;
+
+       pcc_ss_data = pcc_data[pcc_ss_id];
        if (pcc_ss_data->pcc_mpar)
                latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
 
index e4ffaeec9ec204110f7e50c5cff49172a486f081..a4c8ad98560dc4a3a4e21bd1bbb825bd73d237b7 100644 (file)
@@ -1138,7 +1138,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
         * skip all of the subsequent "thaw" callbacks for the device.
         */
        if (dev_pm_smart_suspend_and_suspended(dev)) {
-               dev->power.direct_complete = true;
+               dev_pm_skip_next_resume_phases(dev);
                return 0;
        }
 
index 24418932612eeabc62d72185df50980ed55ee5bd..a041689e5701d7e1ed2a2677a30982827ee2276a 100644 (file)
@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
        int count;
        struct acpi_hardware_id *id;
 
+       /* Avoid unnecessarily loading modules for non present devices. */
+       if (!acpi_device_is_present(acpi_dev))
+               return 0;
+
        /*
         * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
         * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
index da176c95aa2cb3a4fd89e7d0b100d0ff7d161405..0252c9b9af3d3a099ff955b9deea730bf270a098 100644 (file)
@@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device)
 {
        struct acpi_ec *ec = NULL;
        int ret;
+       bool is_ecdt = false;
+       acpi_status status;
 
        strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_EC_CLASS);
 
-       ec = acpi_ec_alloc();
-       if (!ec)
-               return -ENOMEM;
-       if (ec_parse_device(device->handle, 0, ec, NULL) !=
-               AE_CTRL_TERMINATE) {
+       if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
+               is_ecdt = true;
+               ec = boot_ec;
+       } else {
+               ec = acpi_ec_alloc();
+               if (!ec)
+                       return -ENOMEM;
+               status = ec_parse_device(device->handle, 0, ec, NULL);
+               if (status != AE_CTRL_TERMINATE) {
                        ret = -EINVAL;
                        goto err_alloc;
+               }
        }
 
        if (acpi_is_boot_ec(ec)) {
-               boot_ec_is_ecdt = false;
-               /*
-                * Trust PNP0C09 namespace location rather than ECDT ID.
-                *
-                * But trust ECDT GPE rather than _GPE because of ASUS quirks,
-                * so do not change boot_ec->gpe to ec->gpe.
-                */
-               boot_ec->handle = ec->handle;
-               acpi_handle_debug(ec->handle, "duplicated.\n");
-               acpi_ec_free(ec);
-               ec = boot_ec;
-               ret = acpi_config_boot_ec(ec, ec->handle, true, false);
+               boot_ec_is_ecdt = is_ecdt;
+               if (!is_ecdt) {
+                       /*
+                        * Trust PNP0C09 namespace location rather than
+                        * ECDT ID. But trust ECDT GPE rather than _GPE
+                        * because of ASUS quirks, so do not change
+                        * boot_ec->gpe to ec->gpe.
+                        */
+                       boot_ec->handle = ec->handle;
+                       acpi_handle_debug(ec->handle, "duplicated.\n");
+                       acpi_ec_free(ec);
+                       ec = boot_ec;
+               }
+               ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
        } else
                ret = acpi_ec_setup(ec, true);
        if (ret)
@@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device)
        ret = !!request_region(ec->command_addr, 1, "EC cmd");
        WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
-       /* Reprobe devices depending on the EC */
-       acpi_walk_dep_device_list(ec->handle);
+       if (!is_ecdt) {
+               /* Reprobe devices depending on the EC */
+               acpi_walk_dep_device_list(ec->handle);
+       }
        acpi_handle_debug(ec->handle, "enumerated.\n");
        return 0;
 
@@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
 
 static const struct acpi_device_id ec_device_ids[] = {
        {"PNP0C09", 0},
+       {ACPI_ECDT_HID, 0},
        {"", 0},
 };
 
@@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void)
         * Note: ec->handle can be valid if this function is called after
         * acpi_ec_add(), hence the fast path.
         */
-       if (boot_ec->handle != ACPI_ROOT_OBJECT)
-               handle = boot_ec->handle;
-       else if (!acpi_ec_ecdt_get_handle(&handle))
-               return -ENODEV;
-       return acpi_config_boot_ec(boot_ec, handle, true, true);
+       if (boot_ec->handle == ACPI_ROOT_OBJECT) {
+               if (!acpi_ec_ecdt_get_handle(&handle))
+                       return -ENODEV;
+               boot_ec->handle = handle;
+       }
+
+       /* Register to ACPI bus with PM ops attached */
+       return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
 }
 
 #if 0
@@ -2022,6 +2037,12 @@ int __init acpi_ec_init(void)
 
        /* Drivers must be started after acpi_ec_query_init() */
        dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+       /*
+        * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
+        * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
+        * settings but invalid DSDT settings.
+        * https://bugzilla.kernel.org/show_bug.cgi?id=196847
+        */
        ecdt_fail = acpi_ec_ecdt_start();
        return ecdt_fail && dsdt_fail ? -ENODEV : 0;
 }
index fc8c43e767074c177c56c1c97adc6a3a077d5336..7f43423de43cebdd0788f2a9d75eed24722bc1a7 100644 (file)
@@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev);
 bool acpi_device_is_battery(struct acpi_device *adev);
 bool acpi_device_is_first_physical_node(struct acpi_device *adev,
                                        const struct device *dev);
+int acpi_bus_register_early_device(int type);
 
 /* --------------------------------------------------------------------------
                      Device Matching and Notification
index e14e964bfe6d79ad0a686f1769181f7cff527b27..b0fe5272c76aadfa59493bc954c6a545bbbc2008 100644 (file)
@@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device)
        case ACPI_BUS_TYPE_SLEEP_BUTTON:
                strcpy(device->pnp.bus_id, "SLPF");
                break;
+       case ACPI_BUS_TYPE_ECDT_EC:
+               strcpy(device->pnp.bus_id, "ECDT");
+               break;
        default:
                acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
                /* Clean up trailing underscores (if any) */
@@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
        case ACPI_BUS_TYPE_SLEEP_BUTTON:
                acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
                break;
+       case ACPI_BUS_TYPE_ECDT_EC:
+               acpi_add_id(pnp, ACPI_ECDT_HID);
+               break;
        }
 }
 
@@ -2046,6 +2052,21 @@ void acpi_bus_trim(struct acpi_device *adev)
 }
 EXPORT_SYMBOL_GPL(acpi_bus_trim);
 
+int acpi_bus_register_early_device(int type)
+{
+       struct acpi_device *device = NULL;
+       int result;
+
+       result = acpi_add_single_object(&device, NULL,
+                                       type, ACPI_STA_DEFAULT);
+       if (result)
+               return result;
+
+       device->flags.match_driver = true;
+       return device_attach(&device->dev);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
+
 static int acpi_bus_scan_fixed(void)
 {
        int result = 0;
index a73596a4f804c817bfd277d3ff51eb42ca969bae..bccec9de05330b2fe6822369e5c7a409e8759e95 100644 (file)
@@ -1947,6 +1947,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
        }
 }
 
+/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t:         transaction that needs to be cleaned up
+ * @reason:    reason the transaction wasn't delivered
+ * @error_code:        error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+                                      const char *reason,
+                                      uint32_t error_code)
+{
+       if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+               binder_send_failed_reply(t, error_code);
+       } else {
+               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                       "undelivered transaction %d, %s\n",
+                       t->debug_id, reason);
+               binder_free_transaction(t);
+       }
+}
+
 /**
  * binder_validate_object() - checks for a valid metadata object in a buffer.
  * @buffer:    binder_buffer that we're parsing.
@@ -4015,12 +4035,20 @@ retry:
                if (put_user(cmd, (uint32_t __user *)ptr)) {
                        if (t_from)
                                binder_thread_dec_tmpref(t_from);
+
+                       binder_cleanup_transaction(t, "put_user failed",
+                                                  BR_FAILED_REPLY);
+
                        return -EFAULT;
                }
                ptr += sizeof(uint32_t);
                if (copy_to_user(ptr, &tr, sizeof(tr))) {
                        if (t_from)
                                binder_thread_dec_tmpref(t_from);
+
+                       binder_cleanup_transaction(t, "copy_to_user failed",
+                                                  BR_FAILED_REPLY);
+
                        return -EFAULT;
                }
                ptr += sizeof(tr);
@@ -4090,15 +4118,9 @@ static void binder_release_work(struct binder_proc *proc,
                        struct binder_transaction *t;
 
                        t = container_of(w, struct binder_transaction, work);
-                       if (t->buffer->target_node &&
-                           !(t->flags & TF_ONE_WAY)) {
-                               binder_send_failed_reply(t, BR_DEAD_REPLY);
-                       } else {
-                               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                                       "undelivered transaction %d\n",
-                                       t->debug_id);
-                               binder_free_transaction(t);
-                       }
+
+                       binder_cleanup_transaction(t, "process died.",
+                                                  BR_DEAD_REPLY);
                } break;
                case BINDER_WORK_RETURN_ERROR: {
                        struct binder_error *e = container_of(
index 80854f71559a319fc8fb0ca4b0dd15eb7aec6460..0ae6971c2a4cfcd3bba93856335f4a031137385c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * MeidaTek AHCI SATA driver
+ * MediaTek AHCI SATA driver
  *
  * Copyright (c) 2017 MediaTek Inc.
  * Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -25,7 +25,7 @@
 #include <linux/reset.h>
 #include "ahci.h"
 
-#define DRV_NAME               "ahci"
+#define DRV_NAME               "ahci-mtk"
 
 #define SYS_CFG                        0x14
 #define SYS_CFG_SATA_MSK       GENMASK(31, 30)
@@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = {
 };
 module_platform_driver(mtk_ahci_driver);
 
-MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver");
+MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
 MODULE_LICENSE("GPL v2");
index b6b0bf76dfc7bb7fe90f45418aab974bf73b6f87..2685f28160f70764ee4013930239566031c9b058 100644 (file)
@@ -35,6 +35,8 @@
 
 /* port register default value */
 #define AHCI_PORT_PHY_1_CFG    0xa003fffe
+#define AHCI_PORT_PHY2_CFG     0x28184d1f
+#define AHCI_PORT_PHY3_CFG     0x0e081509
 #define AHCI_PORT_TRANS_CFG    0x08000029
 #define AHCI_PORT_AXICC_CFG    0x3fffffff
 
@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
                writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
                                qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
 
        case AHCI_LS2080A:
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
                writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
                                qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
                writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
                       qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
 
        case AHCI_LS2088A:
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+               writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+               writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
                        writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
index 2a882929de4aa3cfcce6b46de459ee7f7360a075..8193b38a1cae7a8d738fb4c29828654dd02f0572 100644 (file)
@@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
        bit = fls(mask) - 1;
        mask &= ~(1 << bit);
 
-       /* Mask off all speeds higher than or equal to the current
-        * one.  Force 1.5Gbps if current SPD is not available.
+       /*
+        * Mask off all speeds higher than or equal to the current one.  At
+        * this point, if current SPD is not available and we previously
+        * recorded the link speed from SStatus, the driver has already
+        * masked off the highest bit so mask should already be 1 or 0.
+        * Otherwise, we should not force 1.5Gbps on a link where we have
+        * not previously recorded speed from SStatus.  Just return in this
+        * case.
         */
        if (spd > 1)
                mask &= (1 << (spd - 1)) - 1;
        else
-               mask &= 1;
+               return -EINVAL;
 
        /* were we already at the bottom? */
        if (!mask)
index ffd8d33c6e0f044dece4e4bcc4560aebd68290ac..6db2e34bd52f2e5521e72ac2b17e1ee1fe43cc44 100644 (file)
@@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
  * is issued to the device. However, if the controller clock is 133MHz,
  * the following tables must be used.
  */
-static struct pdc2027x_pio_timing {
+static const struct pdc2027x_pio_timing {
        u8 value0, value1, value2;
 } pdc2027x_pio_timing_tbl[] = {
        { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
@@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing {
        { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
 };
 
-static struct pdc2027x_mdma_timing {
+static const struct pdc2027x_mdma_timing {
        u8 value0, value1;
 } pdc2027x_mdma_timing_tbl[] = {
        { 0xdf, 0x5f }, /* MDMA mode 0 */
@@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing {
        { 0x69, 0x25 }, /* MDMA mode 2 */
 };
 
-static struct pdc2027x_udma_timing {
+static const struct pdc2027x_udma_timing {
        u8 value0, value1, value2;
 } pdc2027x_udma_timing_tbl[] = {
        { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
@@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
  * @host: target ATA host
  * @board_idx: board identifier
  */
-static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
+static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
 {
        long pll_clock;
 
@@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
 
        /* Adjust PLL control register */
        pdc_adjust_pll(host, pll_clock, board_idx);
-
-       return 0;
 }
 
 /**
@@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
        //pci_enable_intx(pdev);
 
        /* initialize adapter */
-       if (pdc_hardware_init(host, board_idx) != 0)
-               return -EIO;
+       pdc_hardware_init(host, board_idx);
 
        pci_set_master(pdev);
        return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
@@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
        else
                board_idx = PDC_UDMA_133;
 
-       if (pdc_hardware_init(host, board_idx))
-               return -EIO;
+       pdc_hardware_init(host, board_idx);
 
        ata_host_resume(host);
        return 0;
index bfc514015b0bc675c5901111b29f037e4f3cfbff..9287ec958b7095f0385c33c095ed14ac9feba445 100644 (file)
@@ -293,7 +293,7 @@ static inline void __init show_version (void) {
   
 */
 
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
 /********** globals **********/
 
 static unsigned short debug = 0;
@@ -1493,8 +1493,8 @@ static const struct atmdev_ops amb_ops = {
 };
 
 /********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
-  amb_dev * dev = (amb_dev *) arg;
+static void do_housekeeping (struct timer_list *t) {
+  amb_dev * dev = from_timer(dev, t, housekeeping);
   
   // could collect device-specific (not driver/atm-linux) stats here
       
@@ -2267,8 +2267,7 @@ static int amb_probe(struct pci_dev *pci_dev,
        dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
        dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
 
-       setup_timer(&dev->housekeeping, do_housekeeping,
-                   (unsigned long)dev);
+       timer_setup(&dev->housekeeping, do_housekeeping, 0);
        mod_timer(&dev->housekeeping, jiffies);
 
        // enable host interrupts
index 6b6368a565261b5037d5d5f83899a9d664671222..d97c05690faa99363ac709fb237335a0678af4c0 100644 (file)
@@ -1656,9 +1656,9 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
 
 
 #ifdef FS_POLL_FREQ
-static void fs_poll (unsigned long data)
+static void fs_poll (struct timer_list *t)
 {
-       struct fs_dev *dev = (struct fs_dev *) data;
+       struct fs_dev *dev = from_timer(dev, t, timer);
   
        fs_irq (0, dev);
        dev->timer.expires = jiffies + FS_POLL_FREQ;
@@ -1885,9 +1885,7 @@ static int fs_init(struct fs_dev *dev)
        }
 
 #ifdef FS_POLL_FREQ
-       init_timer (&dev->timer);
-       dev->timer.data = (unsigned long) dev;
-       dev->timer.function = fs_poll;
+       timer_setup(&dev->timer, fs_poll, 0);
        dev->timer.expires = jiffies + FS_POLL_FREQ;
        add_timer (&dev->timer);
 #endif
index e121b84857310836742690c001dec6502dc68db2..5ddc203206b8fb739b2f2552e67d17d2545dfe02 100644 (file)
@@ -357,7 +357,7 @@ static inline void __init show_version (void) {
 
 /********** globals **********/
 
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
 
 static unsigned short debug = 0;
 static unsigned short vpi_bits = 0;
@@ -1418,9 +1418,9 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
 
 /********** housekeeping **********/
 
-static void do_housekeeping (unsigned long arg) {
+static void do_housekeeping (struct timer_list *t) {
   // just stats at the moment
-  hrz_dev * dev = (hrz_dev *) arg;
+  hrz_dev * dev = from_timer(dev, t, housekeeping);
 
   // collect device-specific (not driver/atm-linux) stats here
   dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
@@ -2796,7 +2796,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
        dev->atm_dev->ci_range.vpi_bits = vpi_bits;
        dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
 
-       setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
+       timer_setup(&dev->housekeeping, do_housekeeping, 0);
        mod_timer(&dev->housekeeping, jiffies);
 
 out:
index 909744eb7bab419eec2dc71e2c79c87231812ce3..0a67487c0b1d3f0fbc85215eeb9d733966482b53 100644 (file)
@@ -45,8 +45,8 @@ static DEFINE_SPINLOCK(idt77105_priv_lock);
 #define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg)
 #define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg)
 
-static void idt77105_stats_timer_func(unsigned long);
-static void idt77105_restart_timer_func(unsigned long);
+static void idt77105_stats_timer_func(struct timer_list *);
+static void idt77105_restart_timer_func(struct timer_list *);
 
 
 static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func);
@@ -80,7 +80,7 @@ static u16 get_counter(struct atm_dev *dev, int counter)
  * a separate copy of the stats allows implementation of
  * an ioctl which gathers the stats *without* zero'ing them.
  */
-static void idt77105_stats_timer_func(unsigned long dummy)
+static void idt77105_stats_timer_func(struct timer_list *unused)
 {
        struct idt77105_priv *walk;
        struct atm_dev *dev;
@@ -109,7 +109,7 @@ static void idt77105_stats_timer_func(unsigned long dummy)
  * interrupts need to be disabled when the cable is pulled out
  * to avoid lots of spurious cell error interrupts.
  */
-static void idt77105_restart_timer_func(unsigned long dummy)
+static void idt77105_restart_timer_func(struct timer_list *unused)
 {
        struct idt77105_priv *walk;
        struct atm_dev *dev;
index 0e3b9c44c8089c3d82ad2d518ce0c280834bb86a..0277f36be85b94479c5aa303e2fc25b258451586 100644 (file)
@@ -1528,9 +1528,9 @@ idt77252_tx(struct idt77252_dev *card)
 
 
 static void
-tst_timer(unsigned long data)
+tst_timer(struct timer_list *t)
 {
-       struct idt77252_dev *card = (struct idt77252_dev *)data;
+       struct idt77252_dev *card = from_timer(card, t, tst_timer);
        unsigned long base, idle, jump;
        unsigned long flags;
        u32 pc;
@@ -3634,7 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
        spin_lock_init(&card->cmd_lock);
        spin_lock_init(&card->tst_lock);
 
-       setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
+       timer_setup(&card->tst_timer, tst_timer, 0);
 
        /* Do the I/O remapping... */
        card->membase = ioremap(membase, 1024);
index 12f646760b6827e3ddf8a63290cfb22a08e51d0b..98a3a43484c8b410f3a217cd8cb11bd8d6750370 100644 (file)
@@ -75,7 +75,7 @@ static void desc_dbg(IADEV *iadev);
 static IADEV *ia_dev[8];
 static struct atm_dev *_ia_dev[8];
 static int iadev_count;
-static void ia_led_timer(unsigned long arg);
+static void ia_led_timer(struct timer_list *unused);
 static DEFINE_TIMER(ia_timer, ia_led_timer);
 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
@@ -2432,7 +2432,7 @@ static void ia_update_stats(IADEV *iadev) {
     return;
 }
   
-static void ia_led_timer(unsigned long arg) {
+static void ia_led_timer(struct timer_list *unused) {
        unsigned long flags;
        static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
         u_char i;
index 7e4c2ea3e3f268bcc47a6c75f4f1eeec2355d530..5f8e009b2da1cb02d71fa608a2f13cdcb39efd51 100644 (file)
@@ -1761,9 +1761,9 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
 }
 #endif /* !DEBUG_RW */
 
-static void lanai_timed_poll(unsigned long arg)
+static void lanai_timed_poll(struct timer_list *t)
 {
-       struct lanai_dev *lanai = (struct lanai_dev *) arg;
+       struct lanai_dev *lanai = from_timer(lanai, t, timer);
 #ifndef DEBUG_RW
        unsigned long flags;
 #ifdef USE_POWERDOWN
@@ -1790,10 +1790,8 @@ static void lanai_timed_poll(unsigned long arg)
 
 static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
 {
-       init_timer(&lanai->timer);
+       timer_setup(&lanai->timer, lanai_timed_poll, 0);
        lanai->timer.expires = jiffies + LANAI_POLL_PERIOD;
-       lanai->timer.data = (unsigned long) lanai;
-       lanai->timer.function = lanai_timed_poll;
        add_timer(&lanai->timer);
 }
 
index a9702836cbaeb10b233c6e1b7b818e527f2d1542..cbec9adc01c768e95cf8a3ad000697019f38f65c 100644 (file)
@@ -145,7 +145,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
 #ifdef EXTRA_DEBUG
 static void which_list(ns_dev * card, struct sk_buff *skb);
 #endif
-static void ns_poll(unsigned long arg);
+static void ns_poll(struct timer_list *unused);
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
                       unsigned long addr);
 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -284,10 +284,8 @@ static int __init nicstar_init(void)
        XPRINTK("nicstar: nicstar_init() returned.\n");
 
        if (!error) {
-               init_timer(&ns_timer);
+               timer_setup(&ns_timer, ns_poll, 0);
                ns_timer.expires = jiffies + NS_POLL_PERIOD;
-               ns_timer.data = 0UL;
-               ns_timer.function = ns_poll;
                add_timer(&ns_timer);
        }
 
@@ -2681,7 +2679,7 @@ static void which_list(ns_dev * card, struct sk_buff *skb)
 }
 #endif /* EXTRA_DEBUG */
 
-static void ns_poll(unsigned long arg)
+static void ns_poll(struct timer_list *unused)
 {
        int i;
        ns_dev *card;
index d7d21118d3e0f5fc23bad06ccbaaf5c2b409e9e1..2c2ed9cf879626692706bdf86f447cb4d8c05c26 100644 (file)
@@ -136,6 +136,7 @@ config CFAG12864B_RATE
 
 config IMG_ASCII_LCD
        tristate "Imagination Technologies ASCII LCD Display"
+       depends on HAS_IOMEM
        default y if MIPS_MALTA || MIPS_SEAD3
        select SYSCON
        help
index 2f6614c9a229ab213f8a140a3e0bcc272dcc1bfd..bdc87907d6a1b297276e9c4acbb89175f69d0d08 100644 (file)
@@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL
        depends on FW_LOADER
        default y
        help
-         The kernel source tree includes a number of firmware 'blobs'
-         that are used by various drivers. The recommended way to
-         use these is to run "make firmware_install", which, after
-         converting ihex files to binary, copies all of the needed
-         binary files in firmware/ to /lib/firmware/ on your system so
-         that they can be loaded by userspace helpers on request.
+         Various drivers in the kernel source tree may require firmware,
+         which is generally available in your distribution's linux-firmware
+         package.
+
+         The linux-firmware package should install firmware into
+         /lib/firmware/ on your system, so they can be loaded by userspace
+         helpers on request.
 
          Enabling this option will build each required firmware blob
-         into the kernel directly, where request_firmware() will find
-         them without having to call out to userspace. This may be
-         useful if your root file system requires a device that uses
-         such firmware and do not wish to use an initrd.
+         specified by EXTRA_FIRMWARE into the kernel directly, where
+         request_firmware() will find them without having to call out to
+         userspace. This may be useful if your root file system requires a
+         device that uses such firmware and you do not wish to use an
+         initrd.
 
          This single option controls the inclusion of firmware for
-         every driver that uses request_firmware() and ships its
-         firmware in the kernel source tree, which avoids a
+         every driver that uses request_firmware(), which avoids a
          proliferation of 'Include firmware for xxx device' options.
 
          Say 'N' and let firmware be loaded from userspace.
index cd6ccdcf9df0c5ef2a37fabb2b4b7bbfa3c88755..372d10af26009dfae317affd625c756a2700fef1 100644 (file)
@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->probe)
+       if (isa_driver && isa_driver->probe)
                return isa_driver->probe(dev, to_isa_dev(dev)->id);
 
        return 0;
@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->remove)
+       if (isa_driver && isa_driver->remove)
                return isa_driver->remove(dev, to_isa_dev(dev)->id);
 
        return 0;
@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->shutdown)
+       if (isa_driver && isa_driver->shutdown)
                isa_driver->shutdown(dev, to_isa_dev(dev)->id);
 }
 
@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->suspend)
+       if (isa_driver && isa_driver->suspend)
                return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
 
        return 0;
@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
-       if (isa_driver->resume)
+       if (isa_driver && isa_driver->resume)
                return isa_driver->resume(dev, to_isa_dev(dev)->id);
 
        return 0;
index db2f044159274a35457f4f1e5b3a55d226f08cd2..08744b572af6a25184d274ba91304e19ec2be732 100644 (file)
@@ -525,6 +525,21 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 
 /*------------------------- Resume routines -------------------------*/
 
+/**
+ * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * @dev: Target device.
+ *
+ * Make the core skip the "early resume" and "resume" phases for @dev.
+ *
+ * This function can be called by middle-layer code during the "noirq" phase of
+ * system resume if necessary, but not by device drivers.
+ */
+void dev_pm_skip_next_resume_phases(struct device *dev)
+{
+       dev->power.is_late_suspended = false;
+       dev->power.is_suspended = false;
+}
+
 /**
  * device_resume_noirq - Execute a "noirq resume" callback for given device.
  * @dev: Device to handle.
index 027d159ac3810e9b5b190345c1bee60dbbc13790..6e89b51ea3d92b40eba2059bfdb47703916454d9 100644 (file)
@@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev)
                        continue;
 
                retval = pm_runtime_get_sync(link->supplier);
-               if (retval < 0) {
+               /* Ignore suppliers with disabled runtime PM. */
+               if (retval < 0 && retval != -EACCES) {
                        pm_runtime_put_noidle(link->supplier);
                        return retval;
                }
index 680ee1d36ac9a7f31b182b977f062aed1a590c8b..38559f04db2cfdf2ad9d71b562ce6825317aa8e1 100644 (file)
@@ -481,7 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
         * Use timer struct to check if the given source is initialized
         * by wakeup_source_add.
         */
-       return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
+       return ws->timer.function != pm_wakeup_timer_fn;
 }
 
 /*
index 255591ab37168dd8d6f5c9780ce61e339be81810..442e777bdfb2ba84449c3c273997be71d36ffba2 100644 (file)
@@ -3079,11 +3079,10 @@ DAC960_InitializeController(DAC960_Controller_T *Controller)
       /*
        Initialize the Monitoring Timer.
       */
-      init_timer(&Controller->MonitoringTimer);
+      timer_setup(&Controller->MonitoringTimer,
+                  DAC960_MonitoringTimerFunction, 0);
       Controller->MonitoringTimer.expires =
        jiffies + DAC960_MonitoringTimerInterval;
-      Controller->MonitoringTimer.data = (unsigned long) Controller;
-      Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
       add_timer(&Controller->MonitoringTimer);
       Controller->ControllerInitialized = true;
       return true;
@@ -5620,9 +5619,9 @@ static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
   the status of DAC960 Controllers.
 */
 
-static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
+static void DAC960_MonitoringTimerFunction(struct timer_list *t)
 {
-  DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
+  DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
   DAC960_Command_T *Command;
   unsigned long flags;
 
index 85fa9bb6375964f04bedcc644d7a8185518c69b7..6a6226a2b9320eb275839962a048b530bcc3fd29 100644 (file)
@@ -4406,7 +4406,7 @@ static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
 static irqreturn_t DAC960_P_InterruptHandler(int, void *);
 static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
 static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(unsigned long);
+static void DAC960_MonitoringTimerFunction(struct timer_list *);
 static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
                           DAC960_Controller_T *, ...);
 static void DAC960_CreateProcEntries(DAC960_Controller_T *);
index 55ab25f79a08991536359206dad387f2bfc7e52d..812fed069708f181c4b16ce972e0919c4541a803 100644 (file)
@@ -1429,7 +1429,7 @@ aoecmd_ata_id(struct aoedev *d)
 
        d->rttavg = RTTAVG_INIT;
        d->rttdev = RTTDEV_INIT;
-       d->timer.function = (TIMER_FUNC_TYPE)rexmit_timer;
+       d->timer.function = rexmit_timer;
 
        skb = skb_clone(skb, GFP_ATOMIC);
        if (skb) {
index ae596e55bcb67c18594477b7f8b27ec02d82fcc6..8bc3b9fd8dd2be0df64d166dbc4c2b65eef03305 100644 (file)
@@ -342,8 +342,8 @@ static int NeedSeek = 0;
 static void fd_select_side( int side );
 static void fd_select_drive( int drive );
 static void fd_deselect( void );
-static void fd_motor_off_timer( unsigned long dummy );
-static void check_change( unsigned long dummy );
+static void fd_motor_off_timer(struct timer_list *unused);
+static void check_change(struct timer_list *unused);
 static irqreturn_t floppy_irq (int irq, void *dummy);
 static void fd_error( void );
 static int do_format(int drive, int type, struct atari_format_descr *desc);
@@ -353,12 +353,12 @@ static void fd_calibrate_done( int status );
 static void fd_seek( void );
 static void fd_seek_done( int status );
 static void fd_rwsec( void );
-static void fd_readtrack_check( unsigned long dummy );
+static void fd_readtrack_check(struct timer_list *unused);
 static void fd_rwsec_done( int status );
 static void fd_rwsec_done1(int status);
 static void fd_writetrack( void );
 static void fd_writetrack_done( int status );
-static void fd_times_out( unsigned long dummy );
+static void fd_times_out(struct timer_list *unused);
 static void finish_fdc( void );
 static void finish_fdc_done( int dummy );
 static void setup_req_params( int drive );
@@ -479,7 +479,7 @@ static void fd_deselect( void )
  * counts the index signals, which arrive only if one drive is selected.
  */
 
-static void fd_motor_off_timer( unsigned long dummy )
+static void fd_motor_off_timer(struct timer_list *unused)
 {
        unsigned char status;
 
@@ -515,7 +515,7 @@ static void fd_motor_off_timer( unsigned long dummy )
  * as possible) and keep track of the current state of the write protection.
  */
 
-static void check_change( unsigned long dummy )
+static void check_change(struct timer_list *unused)
 {
        static int    drive = 0;
 
@@ -966,7 +966,7 @@ static void fd_rwsec( void )
 }
 
     
-static void fd_readtrack_check( unsigned long dummy )
+static void fd_readtrack_check(struct timer_list *unused)
 {
        unsigned long flags, addr, addr2;
 
@@ -1237,7 +1237,7 @@ static void fd_writetrack_done( int status )
        fd_error();
 }
 
-static void fd_times_out( unsigned long dummy )
+static void fd_times_out(struct timer_list *unused)
 {
        atari_disable_irq( IRQ_MFP_FDC );
        if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
index c61960deb74aac4277d7994f8020fc8f7a65d3bd..ad0477ae820f040affe54f4368d3a02d9da63350 100644 (file)
@@ -35,13 +35,13 @@ static inline u64 mb_per_tick(int mbps)
 struct nullb_cmd {
        struct list_head list;
        struct llist_node ll_list;
-       call_single_data_t csd;
+       struct __call_single_data csd;
        struct request *rq;
        struct bio *bio;
        unsigned int tag;
+       blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
-       blk_status_t error;
 };
 
 struct nullb_queue {
@@ -471,7 +471,6 @@ static void nullb_device_release(struct config_item *item)
 {
        struct nullb_device *dev = to_nullb_device(item);
 
-       badblocks_exit(&dev->badblocks);
        null_free_device_storage(dev, false);
        null_free_dev(dev);
 }
@@ -582,6 +581,10 @@ static struct nullb_device *null_alloc_dev(void)
 
 static void null_free_dev(struct nullb_device *dev)
 {
+       if (!dev)
+               return;
+
+       badblocks_exit(&dev->badblocks);
        kfree(dev);
 }
 
index 926dce9c452faf927d79fb0b99fd1a4e473aea6e..c148e83e4ed72b0b430853c779ce835f62bc310f 100644 (file)
@@ -203,9 +203,9 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
        return 0;
 }
 
-static void creg_cmd_timed_out(unsigned long data)
+static void creg_cmd_timed_out(struct timer_list *t)
 {
-       struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+       struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
        struct creg_cmd *cmd;
 
        spin_lock(&card->creg_ctrl.lock);
@@ -745,8 +745,7 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
        mutex_init(&card->creg_ctrl.reset_lock);
        INIT_LIST_HEAD(&card->creg_ctrl.queue);
        spin_lock_init(&card->creg_ctrl.lock);
-       setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
-                   (unsigned long) card);
+       timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
 
        return 0;
 }
index 6a1b2177951c1521f50b9364739bd433a16a577e..beaccf197a5a85f41eaf1798862f32ce3eb0cd06 100644 (file)
@@ -354,9 +354,9 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
                rsxx_complete_dma(ctrl, dma, status);
 }
 
-static void dma_engine_stalled(unsigned long data)
+static void dma_engine_stalled(struct timer_list *t)
 {
-       struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+       struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
        int cnt;
 
        if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
@@ -838,8 +838,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
        mutex_init(&ctrl->work_lock);
        INIT_LIST_HEAD(&ctrl->queue);
 
-       setup_timer(&ctrl->activity_timer, dma_engine_stalled,
-                                       (unsigned long)ctrl);
+       timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
 
        ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
        if (!ctrl->issue_wq)
index 2819f23e8bf2fe8f18cac9ccae63a8e298adc73a..de0d08133c7ee071e0ce44167fda6e8a0e1d9c51 100644 (file)
@@ -707,9 +707,9 @@ static void skd_start_queue(struct work_struct *work)
        blk_mq_start_hw_queues(skdev->queue);
 }
 
-static void skd_timer_tick(ulong arg)
+static void skd_timer_tick(struct timer_list *t)
 {
-       struct skd_device *skdev = (struct skd_device *)arg;
+       struct skd_device *skdev = from_timer(skdev, t, timer);
        unsigned long reqflags;
        u32 state;
 
@@ -857,7 +857,7 @@ static int skd_start_timer(struct skd_device *skdev)
 {
        int rc;
 
-       setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+       timer_setup(&skdev->timer, skd_timer_tick, 0);
 
        rc = mod_timer(&skdev->timer, (jiffies + HZ));
        if (rc)
index ad9749463d4fa9a382afa7f24587bbbe3a2efcc9..5ca56bfae63cf69872cd18270fb8c980db4eddae 100644 (file)
@@ -81,7 +81,7 @@ struct vdc_port {
 
 static void vdc_ldc_reset(struct vdc_port *port);
 static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(unsigned long _arg);
+static void vdc_ldc_reset_timer(struct timer_list *t);
 
 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
 {
@@ -974,8 +974,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
         */
        ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
        port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
-       setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
-                   (unsigned long)port);
+       timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
        INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
 
        err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
@@ -1087,9 +1086,9 @@ static void vdc_queue_drain(struct vdc_port *port)
                __blk_end_request_all(req, BLK_STS_IOERR);
 }
 
-static void vdc_ldc_reset_timer(unsigned long _arg)
+static void vdc_ldc_reset_timer(struct timer_list *t)
 {
-       struct vdc_port *port = (struct vdc_port *) _arg;
+       struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
        struct vio_driver_state *vio = &port->vio;
        unsigned long flags;
 
index e620e423102b89f05fef575716b1e1657363d6f0..af51015d056eff1a6b1a2dfac264e30259c669d9 100644 (file)
@@ -397,7 +397,7 @@ static void set_timeout(struct floppy_state *fs, int nticks,
        if (fs->timeout_pending)
                del_timer(&fs->timeout);
        fs->timeout.expires = jiffies + nticks;
-       fs->timeout.function = (TIMER_FUNC_TYPE)proc;
+       fs->timeout.function = proc;
        add_timer(&fs->timeout);
        fs->timeout_pending = 1;
 }
index 0677d2514665c75c3c45c27b827b01d6f045fc07..8077123678ad8b27fb7807f230ca76c3b0a37e01 100644 (file)
@@ -718,7 +718,7 @@ static void check_batteries(struct cardinfo *card)
                set_fault_to_battery_status(card);
 }
 
-static void check_all_batteries(unsigned long ptr)
+static void check_all_batteries(struct timer_list *unused)
 {
        int i;
 
@@ -738,8 +738,7 @@ static void check_all_batteries(unsigned long ptr)
 
 static void init_battery_timer(void)
 {
-       init_timer(&battery_timer);
-       battery_timer.function = check_all_batteries;
+       timer_setup(&battery_timer, check_all_batteries, 0);
        battery_timer.expires = jiffies + (HZ * 60);
        add_timer(&battery_timer);
 }
index 14459d66ef0cd8ac223992f2f69ad2b725f8481c..c24589414c75926b934b9bb117b237bcb686e736 100644 (file)
@@ -770,9 +770,9 @@ static void ace_fsm_tasklet(unsigned long data)
        spin_unlock_irqrestore(&ace->lock, flags);
 }
 
-static void ace_stall_timer(unsigned long data)
+static void ace_stall_timer(struct timer_list *t)
 {
-       struct ace_device *ace = (void *)data;
+       struct ace_device *ace = from_timer(ace, t, stall_timer);
        unsigned long flags;
 
        dev_warn(ace->dev,
@@ -984,7 +984,7 @@ static int ace_setup(struct ace_device *ace)
         * Initialize the state machine tasklet and stall timer
         */
        tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
-       setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+       timer_setup(&ace->stall_timer, ace_stall_timer, 0);
 
        /*
         * Initialize the request queue
index 3c29d36702a8eceb4eabd5ed36cf3188adeb1694..5426c04fe24bc88faa88a2cff0624ccbce277ebe 100644 (file)
@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
        raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
        mutex_init(&cci_pmu->reserve_mutex);
        atomic_set(&cci_pmu->active_events, 0);
-       cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
+       cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
 
        ret = cci_pmu_init(cci_pmu, pdev);
-       if (ret)
+       if (ret) {
+               put_cpu();
                return ret;
+       }
 
        cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
                                         &cci_pmu->node);
+       put_cpu();
        pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
        return 0;
 }
index 3063f53123979d740c1f9c97099bcdb86103d43d..b52332e52ca5e9c9a9d4b4ede3667c3ec8066474 100644 (file)
@@ -262,7 +262,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
        NULL
 };
 
-static struct attribute_group arm_ccn_pmu_format_attr_group = {
+static const struct attribute_group arm_ccn_pmu_format_attr_group = {
        .name = "format",
        .attrs = arm_ccn_pmu_format_attrs,
 };
@@ -451,7 +451,7 @@ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
 static struct attribute
                *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
 
-static struct attribute_group arm_ccn_pmu_events_attr_group = {
+static const struct attribute_group arm_ccn_pmu_events_attr_group = {
        .name = "events",
        .is_visible = arm_ccn_pmu_events_is_visible,
        .attrs = arm_ccn_pmu_events_attrs,
@@ -548,7 +548,7 @@ static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
        NULL
 };
 
-static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
+static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
        .name = "cmp_mask",
        .attrs = arm_ccn_pmu_cmp_mask_attrs,
 };
@@ -569,7 +569,7 @@ static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
        NULL,
 };
 
-static struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
+static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
        .attrs = arm_ccn_pmu_cpumask_attrs,
 };
 
@@ -1268,10 +1268,12 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
        if (ccn->dt.id == 0) {
                name = "ccn";
        } else {
-               int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
-
-               name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
-               snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
+               name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
+                                     ccn->dt.id);
+               if (!name) {
+                       err = -ENOMEM;
+                       goto error_choose_name;
+               }
        }
 
        /* Perf driver registration */
@@ -1298,7 +1300,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
        }
 
        /* Pick one CPU which we will use to collect data from CCN... */
-       cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
+       cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
 
        /* Also make sure that the overflow interrupt is handled by this CPU */
        if (ccn->irq) {
@@ -1315,10 +1317,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
 
        cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
                                         &ccn->dt.node);
+       put_cpu();
        return 0;
 
 error_pmu_register:
 error_set_affinity:
+       put_cpu();
+error_choose_name:
        ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
        for (i = 0; i < ccn->num_xps; i++)
                writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1581,8 +1586,8 @@ static int __init arm_ccn_init(void)
 
 static void __exit arm_ccn_exit(void)
 {
-       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
        platform_driver_unregister(&arm_ccn_driver);
+       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
 }
 
 module_init(arm_ccn_init);
index 1a0385ed64171c2e62cc16c36b9a546a9bc24ed1..839ee61d352a218321ffedf3de339c51d02fbf74 100644 (file)
@@ -74,7 +74,7 @@
 #endif                         /* TRACING */
 
 static DEFINE_MUTEX(dtlk_mutex);
-static void dtlk_timer_tick(unsigned long data);
+static void dtlk_timer_tick(struct timer_list *unused);
 
 static int dtlk_major;
 static int dtlk_port_lpc;
@@ -259,7 +259,7 @@ static unsigned int dtlk_poll(struct file *file, poll_table * wait)
        return mask;
 }
 
-static void dtlk_timer_tick(unsigned long data)
+static void dtlk_timer_tick(struct timer_list *unused)
 {
        TRACE_TEXT(" dtlk_timer_tick");
        wake_up_interruptible(&dtlk_process_list);
index 5b8db2ed844d337ad51cc1868d2d34213f03dfe8..7700280717f28803a37a402b36fc48d0c3e08e56 100644 (file)
@@ -122,11 +122,11 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
 /* Last time scheduled */
 static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
 
-static void hangcheck_fire(unsigned long);
+static void hangcheck_fire(struct timer_list *);
 
 static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
 
-static void hangcheck_fire(unsigned long data)
+static void hangcheck_fire(struct timer_list *unused)
 {
        unsigned long long cur_tsc, tsc_diff;
 
index c4ef73c6f45538bc014a98492e2d077cd0c356d5..6edfaa72b98bb76d18ca25e193db2eb5f1d7a427 100644 (file)
@@ -367,9 +367,9 @@ static const struct file_operations bt_bmc_fops = {
        .unlocked_ioctl = bt_bmc_ioctl,
 };
 
-static void poll_timer(unsigned long data)
+static void poll_timer(struct timer_list *t)
 {
-       struct bt_bmc *bt_bmc = (void *)data;
+       struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
 
        bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
        wake_up(&bt_bmc->queue);
@@ -487,8 +487,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
                dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
        } else {
                dev_info(dev, "No IRQ; using timer\n");
-               setup_timer(&bt_bmc->poll_timer, poll_timer,
-                           (unsigned long)bt_bmc);
+               timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
                bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
                add_timer(&bt_bmc->poll_timer);
        }
index 9de189db2cc3c6486884decb5073d4c1e3830707..f45732a2cb3e06d4830fd27b1f926a0b8ef2d0ca 100644 (file)
@@ -4766,7 +4766,7 @@ static struct timer_list ipmi_timer;
 
 static atomic_t stop_operation;
 
-static void ipmi_timeout(unsigned long data)
+static void ipmi_timeout(struct timer_list *unused)
 {
        ipmi_smi_t intf;
        int nt = 0;
@@ -5172,7 +5172,7 @@ static int ipmi_init_msghandler(void)
 
 #endif /* CONFIG_IPMI_PROC_INTERFACE */
 
-       setup_timer(&ipmi_timer, ipmi_timeout, 0);
+       timer_setup(&ipmi_timer, ipmi_timeout, 0);
        mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
 
        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
index 71d33a1807e46fabd27984e4bcafe993e9ef0f13..71fad747c0c7c1052cc19ee3bad0568b4a80c55b 100644 (file)
@@ -199,6 +199,9 @@ struct smi_info {
        /* The timer for this si. */
        struct timer_list   si_timer;
 
+       /* This flag is set, if the timer can be set */
+       bool                timer_can_start;
+
        /* This flag is set, if the timer is running (timer_pending() isn't enough) */
        bool                timer_running;
 
@@ -355,6 +358,8 @@ out:
 
 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 {
+       if (!smi_info->timer_can_start)
+               return;
        smi_info->last_timeout_jiffies = jiffies;
        mod_timer(&smi_info->si_timer, new_val);
        smi_info->timer_running = true;
@@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
        smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 }
 
-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+static void start_check_enables(struct smi_info *smi_info)
 {
        unsigned char msg[2];
 
        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-       if (start_timer)
-               start_new_msg(smi_info, msg, 2);
-       else
-               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+       start_new_msg(smi_info, msg, 2);
        smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+static void start_clear_flags(struct smi_info *smi_info)
 {
        unsigned char msg[3];
 
@@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
        msg[2] = WDT_PRE_TIMEOUT_INT;
 
-       if (start_timer)
-               start_new_msg(smi_info, msg, 3);
-       else
-               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+       start_new_msg(smi_info, msg, 3);
        smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info)
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+static inline bool disable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = true;
-               start_check_enables(smi_info, start_timer);
+               start_check_enables(smi_info);
                return true;
        }
        return false;
@@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = false;
-               start_check_enables(smi_info, true);
+               start_check_enables(smi_info);
                return true;
        }
        return false;
@@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 
        msg = ipmi_alloc_smi_msg();
        if (!msg) {
-               if (!disable_si_irq(smi_info, true))
+               if (!disable_si_irq(smi_info))
                        smi_info->si_state = SI_NORMAL;
        } else if (enable_si_irq(smi_info)) {
                ipmi_free_smi_msg(msg);
@@ -483,7 +482,7 @@ retry:
                /* Watchdog pre-timeout */
                smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-               start_clear_flags(smi_info, true);
+               start_clear_flags(smi_info);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
                if (smi_info->intf)
                        ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -866,7 +865,7 @@ restart:
                 * disable and messages disabled.
                 */
                if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
-                       start_check_enables(smi_info, true);
+                       start_check_enables(smi_info);
                } else {
                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
                        if (!smi_info->curr_msg)
@@ -1091,9 +1090,9 @@ static void set_need_watch(void *send_info, bool enable)
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
 
-static void smi_timeout(unsigned long data)
+static void smi_timeout(struct timer_list *t)
 {
-       struct smi_info   *smi_info = (struct smi_info *) data;
+       struct smi_info   *smi_info = from_timer(smi_info, t, si_timer);
        enum si_sm_result smi_result;
        unsigned long     flags;
        unsigned long     jiffies_now;
@@ -1166,7 +1165,8 @@ static int smi_start_processing(void       *send_info,
        new_smi->intf = intf;
 
        /* Set up the timer that drives the interface. */
-       setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+       timer_setup(&new_smi->si_timer, smi_timeout, 0);
+       new_smi->timer_can_start = true;
        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
        /* Try to claim any interrupts. */
@@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
        check_set_rcv_irq(smi_info);
 }
 
-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
 {
        if (smi_info->thread != NULL)
                kthread_stop(smi_info->thread);
+
+       smi_info->timer_can_start = false;
        if (smi_info->timer_running)
                del_timer_sync(&smi_info->si_timer);
 }
@@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi)
         * Start clearing the flags before we enable interrupts or the
         * timer to avoid racing with the timer.
         */
-       start_clear_flags(new_smi, false);
+       start_clear_flags(new_smi);
 
        /*
         * IRQ is defined to be set when non-zero.  req_events will
@@ -2238,7 +2240,7 @@ out_err_remove_attrs:
        dev_set_drvdata(new_smi->io.dev, NULL);
 
 out_err_stop_timer:
-       wait_for_timer_and_thread(new_smi);
+       stop_timer_and_thread(new_smi);
 
 out_err:
        new_smi->interrupt_disabled = true;
@@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
         */
        if (to_clean->io.irq_cleanup)
                to_clean->io.irq_cleanup(&to_clean->io);
-       wait_for_timer_and_thread(to_clean);
+       stop_timer_and_thread(to_clean);
 
        /*
         * Timeouts are stopped, now make sure the interrupts are off
@@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
                schedule_timeout_uninterruptible(1);
        }
        if (to_clean->handlers)
-               disable_si_irq(to_clean, false);
+               disable_si_irq(to_clean);
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
index 090b073ab441961f5c530dd9f453f1f1290e5a08..6b10f0e18a95d7f846edd78d22e392ec0e10cc69 100644 (file)
@@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
 {
        struct si_sm_io io;
 
+       memset(&io, 0, sizeof(io));
+
        io.si_type      = SI_KCS;
        io.addr_source  = SI_DEVICETREE;
        io.addr_type    = IPMI_MEM_ADDR_SPACE;
index 99771f5cad07a7b25285f33cf0073739ca27e4ae..27dd11c49d2197aa098426ca2992565f75e45a32 100644 (file)
@@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
        io.addr_source_cleanup = ipmi_pci_cleanup;
        io.addr_source_data = pdev;
 
-       if (pci_resource_flags(pdev, 0) & IORESOURCE_IO)
+       if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
                io.addr_type = IPMI_IO_ADDR_SPACE;
-       else
+               io.io_setup = ipmi_si_port_setup;
+       } else {
                io.addr_type = IPMI_MEM_ADDR_SPACE;
+               io.io_setup = ipmi_si_mem_setup;
+       }
        io.addr_data = pci_resource_start(pdev, 0);
 
        io.regspacing = ipmi_pci_probe_regspacing(&io);
index 466b3a1c0adfd3fe419ee57338d279d18436eb7e..3cfaec728604d1956c8e8c9e21922e0bee9349c6 100644 (file)
@@ -551,9 +551,9 @@ static void start_get(struct ssif_info *ssif_info)
        }
 }
 
-static void retry_timeout(unsigned long data)
+static void retry_timeout(struct timer_list *t)
 {
-       struct ssif_info *ssif_info = (void *) data;
+       struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
        unsigned long oflags, *flags;
        bool waiting;
 
@@ -1691,8 +1691,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        spin_lock_init(&ssif_info->lock);
        ssif_info->ssif_state = SSIF_NORMAL;
-       setup_timer(&ssif_info->retry_timer, retry_timeout,
-                   (unsigned long)ssif_info);
+       timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
 
        for (i = 0; i < SSIF_NUM_STATS; i++)
                atomic_set(&ssif_info->stats[i], 0);
index 970e1242a282a097405cf30d0c87e7518bca67f4..6aefe5370e5b15c45bda8e156ccdfd1b99e9e3f5 100644 (file)
@@ -343,6 +343,10 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
        size_t size = vma->vm_end - vma->vm_start;
        phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 
+       /* Does it even fit in phys_addr_t? */
+       if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+               return -EINVAL;
+
        /* It's illegal to wrap around the end of the physical address space. */
        if (offset + (phys_addr_t)size - 1 < offset)
                return -EINVAL;
index 44006ed9558f20690bb71b9f96a0ca382a996d93..a7113b78251a52754aa25d20fc2be7ed1c3abb96 100644 (file)
@@ -23,7 +23,7 @@
 #define __NWBUTTON_C           /* Tell the header file who we are */
 #include "nwbutton.h"
 
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
 
 static int button_press_count;         /* The count of button presses */
 /* Times for the end of a sequence */
@@ -127,7 +127,7 @@ static void button_consume_callbacks (int bpcount)
  * any matching registered function callbacks, initiate reboot, etc.).
  */
 
-static void button_sequence_finished (unsigned long parameters)
+static void button_sequence_finished(struct timer_list *unused)
 {
        if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
            button_press_count == reboot_count)
index abee3ca748019bb8c5c71a9f547868b49584f17d..9dedfd7adc0e7f8b14d410454ae1b10abfbe80cc 100644 (file)
@@ -25,7 +25,7 @@ struct button_callback {
 
 /* Function prototypes: */
 
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
 static irqreturn_t button_handler (int irq, void *dev_id);
 int button_init (void);
 int button_add_callback (void (*callback) (void), int count);
index 616871e68e0901e147686ca6b3028151636fa8f8..5542a438bbd0ba50202917f874abe95c39e09762 100644 (file)
@@ -135,7 +135,7 @@ static struct fasync_struct *rtc_async_queue;
 static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
 
 #ifdef RTC_IRQ
-static void rtc_dropped_irq(unsigned long data);
+static void rtc_dropped_irq(struct timer_list *unused);
 
 static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
 #endif
@@ -1171,7 +1171,7 @@ module_exit(rtc_exit);
  *     for something that requires a steady > 1KHz signal anyways.)
  */
 
-static void rtc_dropped_irq(unsigned long data)
+static void rtc_dropped_irq(struct timer_list *unused)
 {
        unsigned long freq;
 
index 461bf0b8a09473dbadc89b9259473dc36be2cd77..230b99288024994b800c3e9adb42dfc8c0ea4c05 100644 (file)
@@ -22,9 +22,9 @@
 #include "tpm.h"
 #include "tpm-dev.h"
 
-static void user_reader_timeout(unsigned long ptr)
+static void user_reader_timeout(struct timer_list *t)
 {
-       struct file_priv *priv = (struct file_priv *)ptr;
+       struct file_priv *priv = from_timer(priv, t, user_read_timer);
 
        pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
                task_tgid_nr(current));
@@ -48,8 +48,7 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
        priv->chip = chip;
        atomic_set(&priv->data_pending, 0);
        mutex_init(&priv->buffer_mutex);
-       setup_timer(&priv->user_read_timer, user_reader_timeout,
-                       (unsigned long)priv);
+       timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
        INIT_WORK(&priv->work, timeout_work);
 
        file->private_data = priv;
index 7c64a5c1bfc13bb536bdefc1f3dab53273e315fb..a31990408153d13e33211363e2d4e3442f8b9d8e 100644 (file)
@@ -177,7 +177,14 @@ out_fail:
        return ret;
 }
 
-void timer_of_exit(struct timer_of *to)
+/**
+ * timer_of_cleanup - release timer_of ressources
+ * @to: timer_of structure
+ *
+ * Release the ressources that has been used in timer_of_init().
+ * This function should be called in init error cases
+ */
+void __init timer_of_cleanup(struct timer_of *to)
 {
        if (to->flags & TIMER_OF_IRQ)
                timer_irq_exit(&to->of_irq);
index 43f5ba3f8979d9005c09247f5cb87583bef773bf..3f708f1be43d8671359ab5b9d0bc7934b94aec5b 100644 (file)
@@ -68,6 +68,6 @@ static inline unsigned long timer_of_period(struct timer_of *to)
 extern int __init timer_of_init(struct device_node *np,
                                struct timer_of *to);
 
-extern void timer_of_exit(struct timer_of *to);
+extern void __init timer_of_cleanup(struct timer_of *to);
 
 #endif
index 4ebae43118effe98f4763618cbd0777060e8e134..d8addbce40bcc4f9c6a29e32c98cd0c15bac15b4 100644 (file)
@@ -275,6 +275,7 @@ config BMIPS_CPUFREQ
 
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
+       depends on LEMOTE_MACH2F
        help
          This option adds a CPUFreq driver for loongson processors which
          support software configurable cpu frequency.
@@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ
 
 config LOONGSON1_CPUFREQ
        tristate "Loongson1 CPUFreq Driver"
+       depends on LOONGSON1_LS1B
        help
          This option adds a CPUFreq driver for loongson1 processors which
          support software configurable cpu frequency.
index 58d4f4e1ad6a907991873a03027e6c7aa2f31fc4..ca38229b045ab288a2f250dddaf1b174e8c0572f 100644 (file)
@@ -22,6 +22,8 @@
 
 #include "cpufreq_governor.h"
 
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL      (2 * TICK_NSEC / NSEC_PER_USEC)
+
 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
 
 static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 {
        struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct policy_dbs_info *policy_dbs;
+       unsigned int sampling_interval;
        int ret;
-       ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
-       if (ret != 1)
+
+       ret = sscanf(buf, "%u", &sampling_interval);
+       if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
                return -EINVAL;
 
+       dbs_data->sampling_rate = sampling_interval;
+
        /*
         * We are operating under dbs_data->mutex and so the list and its
         * entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
        if (ret)
                goto free_policy_dbs_info;
 
-       dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+       /*
+        * The sampling interval should not be less than the transition latency
+        * of the CPU and it also cannot be too small for dbs_update() to work
+        * correctly.
+        */
+       dbs_data->sampling_rate = max_t(unsigned int,
+                                       CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+                                       cpufreq_policy_transition_delay_us(policy));
 
        if (!have_governor_per_policy())
                gov->gdbs_data = dbs_data;
index 628fe899cb483da9dbf0f7661b537734bc82f784..d9b2c2de49c43f125c91b382f818ff81d0ffc6ac 100644 (file)
@@ -226,17 +226,18 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
        val >>= OCOTP_CFG3_SPEED_SHIFT;
        val &= 0x3;
 
-       if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
-            of_machine_is_compatible("fsl,imx6q"))
-               if (dev_pm_opp_disable(dev, 1200000000))
-                       dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        if (val < OCOTP_CFG3_SPEED_996MHZ)
                if (dev_pm_opp_disable(dev, 996000000))
                        dev_warn(dev, "failed to disable 996MHz OPP\n");
-       if (of_machine_is_compatible("fsl,imx6q")) {
+
+       if (of_machine_is_compatible("fsl,imx6q") ||
+           of_machine_is_compatible("fsl,imx6qp")) {
                if (val != OCOTP_CFG3_SPEED_852MHZ)
                        if (dev_pm_opp_disable(dev, 852000000))
                                dev_warn(dev, "failed to disable 852MHz OPP\n");
+               if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+                       if (dev_pm_opp_disable(dev, 1200000000))
+                               dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        }
        iounmap(base);
 put_node:
index 18c4bd9a5c6564776c7ac5f35e259daae5662f48..e0d5090b303dd3840ddb2a53d2481d6ba6bacf50 100644 (file)
@@ -620,3 +620,7 @@ static int __init mtk_cpufreq_driver_init(void)
        return 0;
 }
 device_initcall(mtk_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("MediaTek CPUFreq driver");
+MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
+MODULE_LICENSE("GPL v2");
index 6833ada237ab7d94540671d811f1dbcde2bb59db..7b0bf825c4e73c588ff93183cf5315665d69e082 100644 (file)
@@ -428,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
        return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
 }
 
+static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       struct file *filp = vma->vm_file;
+       struct dev_dax *dev_dax = filp->private_data;
+       struct dax_region *dax_region = dev_dax->region;
+
+       if (!IS_ALIGNED(addr, dax_region->align))
+               return -EINVAL;
+       return 0;
+}
+
 static const struct vm_operations_struct dax_vm_ops = {
        .fault = dev_dax_fault,
        .huge_fault = dev_dax_huge_fault,
+       .split = dev_dax_split,
 };
 
 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
index fbab271b3bf9f9506c86579c75ebe32fc3235228..a861b5b4d4437d6b3be7dcf5e9d0b3475205455d 100644 (file)
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
                         unsigned long flags)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
-       struct data_chunk       *first = xt->sgl;
+       struct data_chunk       *first;
        struct at_desc          *desc = NULL;
        size_t                  xfer_count;
        unsigned int            dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
        if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
                return NULL;
 
+       first = xt->sgl;
+
        dev_info(chan2dev(chan),
                 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
                __func__, &xt->src_start, &xt->dst_start, xt->numf,
index d50273fed715096ac625382f6c511f537da57bf4..afd5e10f8927cb0c5573bb946a48755aad58b0aa 100644 (file)
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(dd);
        if (ret)
-               return ret;
+               goto err_clk;
 
        irq = platform_get_irq(pdev, 0);
        ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
 err_unregister:
        dma_async_device_unregister(dd);
+err_clk:
+       clk_disable_unprepare(dmadev->clk);
        return ret;
 }
 
index 47edc7fbf91f52e5259060824c38eaab69ebdb56..ec5f9d2bc8202f340c615cbe43731016d316d547 100644 (file)
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
 #define PATTERN_COUNT_MASK     0x1f
 #define PATTERN_MEMSET_IDX     0x01
 
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+       bool                    done;
+       wait_queue_head_t       *wait;
+};
+
 struct dmatest_thread {
        struct list_head        node;
        struct dmatest_info     *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
        u8                      **dsts;
        u8                      **udsts;
        enum dma_transaction_type type;
+       wait_queue_head_t done_wait;
+       struct dmatest_done test_done;
        bool                    done;
 };
 
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
        return error_count;
 }
 
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
-       bool                    done;
-       wait_queue_head_t       *wait;
-};
 
 static void dmatest_callback(void *arg)
 {
        struct dmatest_done *done = arg;
-
-       done->done = true;
-       wake_up_all(done->wait);
+       struct dmatest_thread *thread =
+               container_of(arg, struct dmatest_thread, done_wait);
+       if (!thread->done) {
+               done->done = true;
+               wake_up_all(done->wait);
+       } else {
+               /*
+                * If thread->done, it means that this callback occurred
+                * after the parent thread has cleaned up. This can
+                * happen in the case that driver doesn't implement
+                * the terminate_all() functionality and a dma operation
+                * did not occur within the timeout period
+                */
+               WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+       }
 }
 
 static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
  */
 static int dmatest_func(void *data)
 {
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
        struct dmatest_thread   *thread = data;
-       struct dmatest_done     done = { .wait = &done_wait };
+       struct dmatest_done     *done = &thread->test_done;
        struct dmatest_info     *info;
        struct dmatest_params   *params;
        struct dma_chan         *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
                        continue;
                }
 
-               done.done = false;
+               done->done = false;
                tx->callback = dmatest_callback;
-               tx->callback_param = &done;
+               tx->callback_param = done;
                cookie = tx->tx_submit(tx);
 
                if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
                }
                dma_async_issue_pending(chan);
 
-               wait_event_freezable_timeout(done_wait, done.done,
+               wait_event_freezable_timeout(thread->done_wait, done->done,
                                             msecs_to_jiffies(params->timeout));
 
                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-               if (!done.done) {
-                       /*
-                        * We're leaving the timed out dma operation with
-                        * dangling pointer to done_wait.  To make this
-                        * correct, we'll need to allocate wait_done for
-                        * each test iteration and perform "who's gonna
-                        * free it this time?" dancing.  For now, just
-                        * leave it dangling.
-                        */
-                       WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+               if (!done->done) {
                        dmaengine_unmap_put(um);
                        result("test timed out", total_tests, src_off, dst_off,
                               len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
                dmatest_KBs(runtime, total_len), ret);
 
        /* terminate all transfers on specified channels */
-       if (ret)
+       if (ret || failed_tests)
                dmaengine_terminate_all(chan);
 
        thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
                thread->info = info;
                thread->chan = dtc->chan;
                thread->type = type;
+               thread->test_done.wait = &thread->done_wait;
+               init_waitqueue_head(&thread->done_wait);
                smp_wmb();
                thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
                                dma_chan_name(chan), op, i);
index 6775f2c74e25b7269417bbe001adfb03698dea97..c7568869284e17d4b63379b236a0f30391640820 100644 (file)
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
        }
 }
 
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
 {
        int i;
 
-       for (i = 0; i < DMAMUX_NR; i++)
+       for (i = 0; i < nr_clocks; i++)
                clk_disable_unprepare(fsl_edma->muxclk[i]);
 }
 
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
                fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(fsl_edma->muxbase[i]))
+               if (IS_ERR(fsl_edma->muxbase[i])) {
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxbase[i]);
+               }
 
                sprintf(clkname, "dmamux%d", i);
                fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
                if (IS_ERR(fsl_edma->muxclk[i])) {
                        dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxclk[i]);
                }
 
                ret = clk_prepare_enable(fsl_edma->muxclk[i]);
-               if (ret) {
-                       /* disable only clks which were enabled on error */
-                       for (; i >= 0; i--)
-                               clk_disable_unprepare(fsl_edma->muxclk[i]);
-
-                       dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
-                       return ret;
-               }
+               if (ret)
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
 
        }
 
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA engine. (%d)\n", ret);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA of_dma. (%d)\n", ret);
                dma_async_device_unregister(&fsl_edma->dma_dev);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
        fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
        of_dma_controller_free(np);
        dma_async_device_unregister(&fsl_edma->dma_dev);
-       fsl_disable_clocks(fsl_edma);
+       fsl_disable_clocks(fsl_edma, DMAMUX_NR);
 
        return 0;
 }
index 2f31d3d0caa61821aa08aea360e06709bdb25d48..7792a9186f9cf35bae71792e5e0783cf53364b05 100644 (file)
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
        if (memcmp(src, dest, IOAT_TEST_SIZE)) {
                dev_err(dev, "Self-test copy failed compare, disabling\n");
                err = -ENODEV;
-               goto free_resources;
+               goto unmap_dma;
        }
 
 unmap_dma:
index dfb373c8ba2a49629628dd33f1a36fd41a270b7b..7da9f1b83ebecf3641da1e87f35e5e9ec6621df8 100644 (file)
@@ -28,7 +28,6 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/bitmap.h>
-#include <linux/bitfield.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/export.h>
 
 #define MAX_DVFS_DOMAINS       8
 #define MAX_DVFS_OPPS          16
-
-#define PROTO_REV_MAJOR_MASK   GENMASK(31, 16)
-#define PROTO_REV_MINOR_MASK   GENMASK(15, 0)
-
-#define FW_REV_MAJOR_MASK      GENMASK(31, 24)
-#define FW_REV_MINOR_MASK      GENMASK(23, 16)
-#define FW_REV_PATCH_MASK      GENMASK(15, 0)
+#define DVFS_LATENCY(hdr)      (le32_to_cpu(hdr) >> 16)
+#define DVFS_OPP_COUNT(hdr)    ((le32_to_cpu(hdr) >> 8) & 0xff)
+
+#define PROTOCOL_REV_MINOR_BITS        16
+#define PROTOCOL_REV_MINOR_MASK        ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
+#define PROTOCOL_REV_MAJOR(x)  ((x) >> PROTOCOL_REV_MINOR_BITS)
+#define PROTOCOL_REV_MINOR(x)  ((x) & PROTOCOL_REV_MINOR_MASK)
+
+#define FW_REV_MAJOR_BITS      24
+#define FW_REV_MINOR_BITS      16
+#define FW_REV_PATCH_MASK      ((1U << FW_REV_MINOR_BITS) - 1)
+#define FW_REV_MINOR_MASK      ((1U << FW_REV_MAJOR_BITS) - 1)
+#define FW_REV_MAJOR(x)                ((x) >> FW_REV_MAJOR_BITS)
+#define FW_REV_MINOR(x)                (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
+#define FW_REV_PATCH(x)                ((x) & FW_REV_PATCH_MASK)
 
 #define MAX_RX_TIMEOUT         (msecs_to_jiffies(30))
 
@@ -304,6 +311,10 @@ struct clk_get_info {
        u8 name[20];
 } __packed;
 
+struct clk_get_value {
+       __le32 rate;
+} __packed;
+
 struct clk_set_value {
        __le16 id;
        __le16 reserved;
@@ -317,9 +328,7 @@ struct legacy_clk_set_value {
 } __packed;
 
 struct dvfs_info {
-       u8 domain;
-       u8 opp_count;
-       __le16 latency;
+       __le32 header;
        struct {
                __le32 freq;
                __le32 m_volt;
@@ -342,6 +351,11 @@ struct _scpi_sensor_info {
        char name[20];
 };
 
+struct sensor_value {
+       __le32 lo_val;
+       __le32 hi_val;
+} __packed;
+
 struct dev_pstate_set {
        __le16 dev_id;
        u8 pstate;
@@ -405,20 +419,19 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
                unsigned int len;
 
                if (scpi_info->is_legacy) {
-                       struct legacy_scpi_shared_mem __iomem *mem =
-                                                       ch->rx_payload;
+                       struct legacy_scpi_shared_mem *mem = ch->rx_payload;
 
                        /* RX Length is not replied by the legacy Firmware */
                        len = match->rx_len;
 
-                       match->status = ioread32(&mem->status);
+                       match->status = le32_to_cpu(mem->status);
                        memcpy_fromio(match->rx_buf, mem->payload, len);
                } else {
-                       struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+                       struct scpi_shared_mem *mem = ch->rx_payload;
 
                        len = min(match->rx_len, CMD_SIZE(cmd));
 
-                       match->status = ioread32(&mem->status);
+                       match->status = le32_to_cpu(mem->status);
                        memcpy_fromio(match->rx_buf, mem->payload, len);
                }
 
@@ -432,11 +445,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
 static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
 {
        struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
-       struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+       struct scpi_shared_mem *mem = ch->rx_payload;
        u32 cmd = 0;
 
        if (!scpi_info->is_legacy)
-               cmd = ioread32(&mem->command);
+               cmd = le32_to_cpu(mem->command);
 
        scpi_process_cmd(ch, cmd);
 }
@@ -446,7 +459,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
        unsigned long flags;
        struct scpi_xfer *t = msg;
        struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
-       struct scpi_shared_mem __iomem *mem = ch->tx_payload;
+       struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
 
        if (t->tx_buf) {
                if (scpi_info->is_legacy)
@@ -465,7 +478,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
        }
 
        if (!scpi_info->is_legacy)
-               iowrite32(t->cmd, &mem->command);
+               mem->command = cpu_to_le32(t->cmd);
 }
 
 static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -570,13 +583,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
 static unsigned long scpi_clk_get_val(u16 clk_id)
 {
        int ret;
-       __le32 rate;
+       struct clk_get_value clk;
        __le16 le_clk_id = cpu_to_le16(clk_id);
 
        ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
-                               sizeof(le_clk_id), &rate, sizeof(rate));
+                               sizeof(le_clk_id), &clk, sizeof(clk));
 
-       return ret ? ret : le32_to_cpu(rate);
+       return ret ? ret : le32_to_cpu(clk.rate);
 }
 
 static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
@@ -631,35 +644,35 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
 }
 
 static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
-{
-       if (domain >= MAX_DVFS_DOMAINS)
-               return ERR_PTR(-EINVAL);
-
-       return scpi_info->dvfs[domain] ?: ERR_PTR(-EINVAL);
-}
-
-static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
 {
        struct scpi_dvfs_info *info;
        struct scpi_opp *opp;
        struct dvfs_info buf;
        int ret, i;
 
+       if (domain >= MAX_DVFS_DOMAINS)
+               return ERR_PTR(-EINVAL);
+
+       if (scpi_info->dvfs[domain])    /* data already populated */
+               return scpi_info->dvfs[domain];
+
        ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
                                &buf, sizeof(buf));
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
-       info = devm_kmalloc(dev, sizeof(*info), GFP_KERNEL);
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
        if (!info)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
-       info->count = buf.opp_count;
-       info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
+       info->count = DVFS_OPP_COUNT(buf.header);
+       info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */
 
-       info->opps = devm_kcalloc(dev, info->count, sizeof(*opp), GFP_KERNEL);
-       if (!info->opps)
-               return -ENOMEM;
+       info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
+       if (!info->opps) {
+               kfree(info);
+               return ERR_PTR(-ENOMEM);
+       }
 
        for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
                opp->freq = le32_to_cpu(buf.opps[i].freq);
@@ -669,15 +682,7 @@ static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
        sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
 
        scpi_info->dvfs[domain] = info;
-       return 0;
-}
-
-static void scpi_dvfs_populate(struct device *dev)
-{
-       int domain;
-
-       for (domain = 0; domain < MAX_DVFS_DOMAINS; domain++)
-               scpi_dvfs_populate_info(dev, domain);
+       return info;
 }
 
 static int scpi_dev_domain_id(struct device *dev)
@@ -708,6 +713,9 @@ static int scpi_dvfs_get_transition_latency(struct device *dev)
        if (IS_ERR(info))
                return PTR_ERR(info);
 
+       if (!info->latency)
+               return 0;
+
        return info->latency;
 }
 
@@ -768,19 +776,20 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
 static int scpi_sensor_get_value(u16 sensor, u64 *val)
 {
        __le16 id = cpu_to_le16(sensor);
-       __le64 value;
+       struct sensor_value buf;
        int ret;
 
        ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
-                               &value, sizeof(value));
+                               &buf, sizeof(buf));
        if (ret)
                return ret;
 
        if (scpi_info->is_legacy)
-               /* only 32-bits supported, upper 32 bits can be junk */
-               *val = le32_to_cpup((__le32 *)&value);
+               /* only 32-bits supported, hi_val can be junk */
+               *val = le32_to_cpu(buf.lo_val);
        else
-               *val = le64_to_cpu(value);
+               *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
+                       le32_to_cpu(buf.lo_val);
 
        return 0;
 }
@@ -853,19 +862,23 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
 static ssize_t protocol_version_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%lu.%lu\n",
-               FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
-               FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
+       struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d.%d\n",
+                      PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
+                      PROTOCOL_REV_MINOR(scpi_info->protocol_version));
 }
 static DEVICE_ATTR_RO(protocol_version);
 
 static ssize_t firmware_version_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%lu.%lu.%lu\n",
-                    FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
-                    FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
-                    FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
+       struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d.%d.%d\n",
+                      FW_REV_MAJOR(scpi_info->firmware_version),
+                      FW_REV_MINOR(scpi_info->firmware_version),
+                      FW_REV_PATCH(scpi_info->firmware_version));
 }
 static DEVICE_ATTR_RO(firmware_version);
 
@@ -876,13 +889,39 @@ static struct attribute *versions_attrs[] = {
 };
 ATTRIBUTE_GROUPS(versions);
 
-static void scpi_free_channels(void *data)
+static void
+scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count)
 {
-       struct scpi_drvinfo *info = data;
        int i;
 
-       for (i = 0; i < info->num_chans; i++)
-               mbox_free_channel(info->channels[i].chan);
+       for (i = 0; i < count && pchan->chan; i++, pchan++) {
+               mbox_free_channel(pchan->chan);
+               devm_kfree(dev, pchan->xfers);
+               devm_iounmap(dev, pchan->rx_payload);
+       }
+}
+
+static int scpi_remove(struct platform_device *pdev)
+{
+       int i;
+       struct device *dev = &pdev->dev;
+       struct scpi_drvinfo *info = platform_get_drvdata(pdev);
+
+       scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
+
+       of_platform_depopulate(dev);
+       sysfs_remove_groups(&dev->kobj, versions_groups);
+       scpi_free_channels(dev, info->channels, info->num_chans);
+       platform_set_drvdata(pdev, NULL);
+
+       for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
+               kfree(info->dvfs[i]->opps);
+               kfree(info->dvfs[i]);
+       }
+       devm_kfree(dev, info->channels);
+       devm_kfree(dev, info);
+
+       return 0;
 }
 
 #define MAX_SCPI_XFERS         10
@@ -913,6 +952,7 @@ static int scpi_probe(struct platform_device *pdev)
 {
        int count, idx, ret;
        struct resource res;
+       struct scpi_chan *scpi_chan;
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
 
@@ -929,19 +969,13 @@ static int scpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
-                                          GFP_KERNEL);
-       if (!scpi_info->channels)
+       scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL);
+       if (!scpi_chan)
                return -ENOMEM;
 
-       ret = devm_add_action(dev, scpi_free_channels, scpi_info);
-       if (ret)
-               return ret;
-
-       for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+       for (idx = 0; idx < count; idx++) {
                resource_size_t size;
-               int idx = scpi_info->num_chans;
-               struct scpi_chan *pchan = scpi_info->channels + idx;
+               struct scpi_chan *pchan = scpi_chan + idx;
                struct mbox_client *cl = &pchan->cl;
                struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
 
@@ -949,14 +983,15 @@ static int scpi_probe(struct platform_device *pdev)
                of_node_put(shmem);
                if (ret) {
                        dev_err(dev, "failed to get SCPI payload mem resource\n");
-                       return ret;
+                       goto err;
                }
 
                size = resource_size(&res);
                pchan->rx_payload = devm_ioremap(dev, res.start, size);
                if (!pchan->rx_payload) {
                        dev_err(dev, "failed to ioremap SCPI payload\n");
-                       return -EADDRNOTAVAIL;
+                       ret = -EADDRNOTAVAIL;
+                       goto err;
                }
                pchan->tx_payload = pchan->rx_payload + (size >> 1);
 
@@ -982,11 +1017,17 @@ static int scpi_probe(struct platform_device *pdev)
                                dev_err(dev, "failed to get channel%d err %d\n",
                                        idx, ret);
                }
+err:
+               scpi_free_channels(dev, scpi_chan, idx);
+               scpi_info = NULL;
                return ret;
        }
 
+       scpi_info->channels = scpi_chan;
+       scpi_info->num_chans = count;
        scpi_info->commands = scpi_std_commands;
-       scpi_info->scpi_ops = &scpi_ops;
+
+       platform_set_drvdata(pdev, scpi_info);
 
        if (scpi_info->is_legacy) {
                /* Replace with legacy variants */
@@ -1002,23 +1043,23 @@ static int scpi_probe(struct platform_device *pdev)
        ret = scpi_init_versions(scpi_info);
        if (ret) {
                dev_err(dev, "incorrect or no SCP firmware found\n");
+               scpi_remove(pdev);
                return ret;
        }
 
-       scpi_dvfs_populate(dev);
-
-       _dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
-                 FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
-                 FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version),
-                 FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
-                 FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
-                 FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
+       _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n",
+                 PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
+                 PROTOCOL_REV_MINOR(scpi_info->protocol_version),
+                 FW_REV_MAJOR(scpi_info->firmware_version),
+                 FW_REV_MINOR(scpi_info->firmware_version),
+                 FW_REV_PATCH(scpi_info->firmware_version));
+       scpi_info->scpi_ops = &scpi_ops;
 
-       ret = devm_device_add_groups(dev, versions_groups);
+       ret = sysfs_create_groups(&dev->kobj, versions_groups);
        if (ret)
                dev_err(dev, "unable to create sysfs version group\n");
 
-       return devm_of_platform_populate(dev);
+       return of_platform_populate(dev->of_node, NULL, NULL, dev);
 }
 
 static const struct of_device_id scpi_of_match[] = {
@@ -1035,6 +1076,7 @@ static struct platform_driver scpi_driver = {
                .of_match_table = scpi_of_match,
        },
        .probe = scpi_probe,
+       .remove = scpi_remove,
 };
 module_platform_driver(scpi_driver);
 
index f70febf680c392b37217ce5e6f8c8c4301234869..557a47829d03f2b14b0c3b664e1044e7e2cb86bc 100644 (file)
@@ -109,6 +109,8 @@ struct kobject *efi_kobj;
 /*
  * Let's not leave out systab information that snuck into
  * the efivars driver
+ * Note, do not add more fields in systab sysfs file as it breaks sysfs
+ * one value per file rule!
  */
 static ssize_t systab_show(struct kobject *kobj,
                           struct kobj_attribute *attr, char *buf)
@@ -143,8 +145,7 @@ static ssize_t systab_show(struct kobject *kobj,
        return str - buf;
 }
 
-static struct kobj_attribute efi_attr_systab =
-                       __ATTR(systab, 0400, systab_show, NULL);
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
 
 #define EFI_FIELD(var) efi.var
 
index bd7ed3c1148a7ccd5032e367e3af0ba66af18d20..c47e0c6ec00f858c0b9960f605974b4f2e4b1294 100644 (file)
@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
 };
 
 /* Generic ESRT Entry ("ESRE") support. */
-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
 {
        char *str = buf;
 
@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
        return str - buf;
 }
 
-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
-       esre_fw_class_show, NULL);
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
 
 #define esre_attr_decl(name, size, fmt) \
-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
 { \
        return sprintf(buf, fmt "\n", \
                       le##size##_to_cpu(entry->esre.esre1->name)); \
 } \
 \
-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
-       esre_##name##_show, NULL)
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
 
 esre_attr_decl(fw_type, 32, "%u");
 esre_attr_decl(fw_version, 32, "%u");
@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
 
 /* support for displaying ESRT fields at the top level */
 #define esrt_attr_decl(name, size, fmt) \
-static ssize_t esrt_##name##_show(struct kobject *kobj, \
+static ssize_t name##_show(struct kobject *kobj, \
                                  struct kobj_attribute *attr, char *buf)\
 { \
        return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
 } \
 \
-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
-       esrt_##name##_show, NULL)
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
 
 esrt_attr_decl(fw_resource_count, 32, "%u");
 esrt_attr_decl(fw_resource_count_max, 32, "%u");
@@ -431,7 +428,7 @@ err_remove_group:
 err_remove_esrt:
        kobject_put(esrt_kobj);
 err:
-       kfree(esrt);
+       memunmap(esrt);
        esrt = NULL;
        return error;
 }
index 8e64b77aeac95e43c0e0571694f42bbe6c8ba73f..f377609ff141bca733bf498babc25f9d215aefad 100644 (file)
@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
        return map_attr->show(entry, buf);
 }
 
-static struct map_attribute map_type_attr = __ATTR_RO(type);
-static struct map_attribute map_phys_addr_attr   = __ATTR_RO(phys_addr);
-static struct map_attribute map_virt_addr_attr  = __ATTR_RO(virt_addr);
-static struct map_attribute map_num_pages_attr  = __ATTR_RO(num_pages);
-static struct map_attribute map_attribute_attr  = __ATTR_RO(attribute);
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
 
 /*
  * These are default attributes that are added for every memmap entry.
index 35e553b3b19051b45985991b9b66dc19366fc41e..e4b40f2b46274a0871d1cb881732e8358a324112 100644 (file)
@@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       return vpd_sections_init(entry.cbmem_addr);
+       vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+       if (!vpd_kobj)
+               return -ENOMEM;
+
+       ret = vpd_sections_init(entry.cbmem_addr);
+       if (ret) {
+               kobject_put(vpd_kobj);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int vpd_remove(struct platform_device *pdev)
+{
+       vpd_section_destroy(&ro_vpd);
+       vpd_section_destroy(&rw_vpd);
+
+       kobject_put(vpd_kobj);
+
+       return 0;
 }
 
 static struct platform_driver vpd_driver = {
        .probe = vpd_probe,
+       .remove = vpd_remove,
        .driver = {
                .name = "vpd",
        },
 };
 
+static struct platform_device *vpd_pdev;
+
 static int __init vpd_platform_init(void)
 {
-       struct platform_device *pdev;
-
-       pdev = platform_device_register_simple("vpd", -1, NULL, 0);
-       if (IS_ERR(pdev))
-               return PTR_ERR(pdev);
+       int ret;
 
-       vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
-       if (!vpd_kobj)
-               return -ENOMEM;
+       ret = platform_driver_register(&vpd_driver);
+       if (ret)
+               return ret;
 
-       platform_driver_register(&vpd_driver);
+       vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0);
+       if (IS_ERR(vpd_pdev)) {
+               platform_driver_unregister(&vpd_driver);
+               return PTR_ERR(vpd_pdev);
+       }
 
        return 0;
 }
 
 static void __exit vpd_platform_exit(void)
 {
-       vpd_section_destroy(&ro_vpd);
-       vpd_section_destroy(&rw_vpd);
-       kobject_put(vpd_kobj);
+       platform_device_unregister(vpd_pdev);
+       platform_driver_unregister(&vpd_driver);
 }
 
 module_init(vpd_platform_init);
index 56cf825ed7799c26d6d5086703cc0c67f43a2442..f3f4f810e5df39b82a68e71305541b391393dbad 100644 (file)
@@ -220,7 +220,7 @@ out_free_cpus:
        return err;
 }
 
-static void dummy_callback(unsigned long ignored) {}
+static void dummy_callback(struct timer_list *unused) {}
 
 static int suspend_cpu(int index, bool broadcast)
 {
@@ -287,7 +287,7 @@ static int suspend_test_thread(void *arg)
        pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
                cpu, drv->state_count - 1);
 
-       setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
+       timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
        for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
                int index;
                /*
index 5cfe39f7a45f080f56f36eea6259ec4c1b1df8b6..deb483064f53c3e680d34b655360faac04853c3f 100644 (file)
@@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
 {
        pr_debug("fw_cfg: unloading.\n");
        fw_cfg_sysfs_cache_cleanup();
+       sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+       fw_cfg_io_cleanup();
        fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
        fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
-       fw_cfg_io_cleanup();
        return 0;
 }
 
index 6b535ec858cc35330baa773782dd51bcccd4da24..15a1f4b348c41b2915755dba173d71704a862768 100644 (file)
@@ -23,6 +23,7 @@
 struct gen_74x164_chip {
        struct gpio_chip        gpio_chip;
        struct mutex            lock;
+       struct gpio_desc        *gpiod_oe;
        u32                     registers;
        /*
         * Since the registers are chained, every byte sent will make
@@ -31,8 +32,7 @@ struct gen_74x164_chip {
         * register at the end of the transfer. So, to have a logical
         * numbering, store the bytes in reverse order.
         */
-       u8                      buffer[0];
-       struct gpio_desc        *gpiod_oe;
+       u8                      buffer[];
 };
 
 static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
index f75d8443ecaff631d07e8b474e2bdf769357adb0..e4b3d7db68c95a2d87b9766e54f688fe2dd13f36 100644 (file)
@@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
        u32 mask;
 
        d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
-       g = (struct davinci_gpio_regs __iomem *)d->regs;
+       g = (struct davinci_gpio_regs __iomem *)d->regs[0];
        mask = __gpio_mask(data->irq - d->base_irq);
 
        if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
index babb7bd2ba59b60aa723fa10606c186862a12af3..a0a5f9730aa77b92ea5bc520c22f64b386edb583 100644 (file)
@@ -947,7 +947,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
        { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
        { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
 
-       { .compatible = "onsemi,pca9654", .data = OF_953X( 8, PCA_INT), },
+       { .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
 
        { .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
        { }
index 8a08e81ee90d579774ca96bc70853093ba623f09..d4176a3fb7062537e81010fbb5ea42be15b55a29 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the ACP, which is a sub-component
 # of AMDSOC/AMDGPU drm driver.
 # It provides the HW control for ACP related functionalities.
index 78d609123420455a1d8811eaeb6e91ebaa26e626..90202cf4cd1e0e6569b5adf83c83400d2839a510 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
index 5afaf6016b4a654f552c5a7439bfac8e3425ddc6..0b14b537378345870f7aec058b791add80f8fa95 100644 (file)
@@ -717,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
                          struct amdgpu_queue_mgr *mgr);
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                         struct amdgpu_queue_mgr *mgr,
-                        int hw_ip, int instance, int ring,
+                        u32 hw_ip, u32 instance, u32 ring,
                         struct amdgpu_ring **out_ring);
 
 /*
@@ -1572,18 +1572,14 @@ struct amdgpu_device {
        /* sdma */
        struct amdgpu_sdma              sdma;
 
-       union {
-               struct {
-                       /* uvd */
-                       struct amdgpu_uvd               uvd;
+       /* uvd */
+       struct amdgpu_uvd               uvd;
 
-                       /* vce */
-                       struct amdgpu_vce               vce;
-               };
+       /* vce */
+       struct amdgpu_vce               vce;
 
-               /* vcn */
-               struct amdgpu_vcn               vcn;
-       };
+       /* vcn */
+       struct amdgpu_vcn               vcn;
 
        /* firmwares */
        struct amdgpu_firmware          firmware;
index 47d1c132ac40b24c719c5801c84f2a55b4c598e2..1e3e9be7d77ecf29883cf0ec5d5874f0cb67bd64 100644 (file)
@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct cik_sdma_rlc_registers *m;
+       unsigned long end_jiffies;
        uint32_t sdma_base_addr;
+       uint32_t data;
 
        m = get_sdma_mqd(mqd);
        sdma_base_addr = get_sdma_base_addr(m);
 
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
-                       m->sdma_rlc_virtual_addr);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+               m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
 
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
-                       m->sdma_rlc_rb_base);
+       end_jiffies = msecs_to_jiffies(2000) + jiffies;
+       while (true) {
+               data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+               if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+                       break;
+               if (time_after(jiffies, end_jiffies))
+                       return -ETIME;
+               usleep_range(500, 1000);
+       }
+       if (m->sdma_engine_id) {
+               data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+               data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+                               RESUME_CTX, 0);
+               WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+       } else {
+               data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+               data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+                               RESUME_CTX, 0);
+               WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+       }
 
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+                               m->sdma_rlc_doorbell);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+                               m->sdma_rlc_virtual_addr);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
                        m->sdma_rlc_rb_base_hi);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
                        m->sdma_rlc_rb_rptr_addr_lo);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
                        m->sdma_rlc_rb_rptr_addr_hi);
-
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
-                       m->sdma_rlc_doorbell);
-
        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
                        m->sdma_rlc_rb_cntl);
 
@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
        }
 
        WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
-       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+       WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+               RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+               SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
 
        return 0;
 }
index a57cec737c18ab1b8db405607042a4363493205b..57abf7abd7a9cda177e9e82c6c5e42d3dc759f6d 100644 (file)
@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
                if (candidate->robj == validated)
                        break;
 
+               /* We can't move pinned BOs here */
+               if (bo->pin_count)
+                       continue;
+
                other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 
                /* Check if this BO is in one of the domains we need space for */
index 2c574374d9b6884e6c4473f2dd3ede86b7a612df..3573ecdb06eeff06d2f1507cdadd4f0b05ba9435 100644 (file)
@@ -1837,9 +1837,6 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_blocks[i].status.hw = false;
        }
 
-       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
-               amdgpu_ucode_fini_bo(adev);
-
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
index ec96bb1f9eafbc374cdad09c85f96da8e8d1bbad..c2f414ffb2cc205c40873afaabeee821ffa816e6 100644 (file)
@@ -536,7 +536,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        /* Raven */
-       {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 
        {0, 0, 0}
 };
index bd5b8065c32e86fcf4e71c25779cffee2939d203..2fa95aef74d5200449c94a0215f04be60e8cb184 100644 (file)
@@ -268,9 +268,10 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
  *
  * Checks for fence activity.
  */
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
 {
-       struct amdgpu_ring *ring = (void *)arg;
+       struct amdgpu_ring *ring = from_timer(ring, t,
+                                             fence_drv.fallback_timer);
 
        amdgpu_fence_process(ring);
 }
@@ -422,8 +423,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
        atomic_set(&ring->fence_drv.last_seq, 0);
        ring->fence_drv.initialized = false;
 
-       setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
-                   (unsigned long)ring);
+       timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
 
        ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
        spin_lock_init(&ring->fence_drv.lock);
index 6c570d4e4516488b81cd385353b8d02c7e428912..f8edf5483f11a65f2cade841a3ab9ea9ed8d4f4d 100644 (file)
@@ -1,4 +1,6 @@
 /*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  * to deal in the Software without restriction, including without limitation
index 033fba2def6f775b35f4f840f7032477c4403ad3..5f5aa5fddc169355077a4e61665563c087d860f5 100644 (file)
@@ -164,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle)
                ret = adev->powerplay.ip_funcs->hw_fini(
                                        adev->powerplay.pp_handle);
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+               amdgpu_ucode_fini_bo(adev);
+
        return ret;
 }
 
index 7714f4a6c8b000072c2b7c3d8691884b3d902a35..447d446b50150d475cb9a01945706b17bbfc2e78 100644 (file)
@@ -442,6 +442,8 @@ static int psp_hw_fini(void *handle)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                return 0;
 
+       amdgpu_ucode_fini_bo(adev);
+
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
        amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
index 190e28cb827e535d247377e731507a82bc20dc3a..93d86619e802c998636d61f36f2daf2e2d5d1c34 100644 (file)
@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
 
 static int amdgpu_identity_map(struct amdgpu_device *adev,
                               struct amdgpu_queue_mapper *mapper,
-                              int ring,
+                              u32 ring,
                               struct amdgpu_ring **out_ring)
 {
        switch (mapper->hw_ip) {
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
 
 static int amdgpu_lru_map(struct amdgpu_device *adev,
                          struct amdgpu_queue_mapper *mapper,
-                         int user_ring, bool lru_pipe_order,
+                         u32 user_ring, bool lru_pipe_order,
                          struct amdgpu_ring **out_ring)
 {
        int r, i, j;
@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
  */
 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                         struct amdgpu_queue_mgr *mgr,
-                        int hw_ip, int instance, int ring,
+                        u32 hw_ip, u32 instance, u32 ring,
                         struct amdgpu_ring **out_ring)
 {
        int r, ip_num_rings;
index f337c316ec2c656a823230b355250929715db327..06525f2c36c3cd2cfc670f1713f0778f62cbbf5e 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _AMDGPU_TRACE_H_
 
index 793b1470284d67b13c68954481520f7ab3ef635e..a296f7bbe57cbb0c768499a6c254e433d931773a 100644 (file)
@@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] =
        {mmPA_SC_RASTER_CONFIG_1, true},
 };
 
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
-                                         u32 se_num, u32 sh_num,
-                                         u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+                                      bool indexed, u32 se_num,
+                                      u32 sh_num, u32 reg_offset)
 {
-       uint32_t val;
+       if (indexed) {
+               uint32_t val;
+               unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+               unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+               switch (reg_offset) {
+               case mmCC_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+               case mmGC_USER_RB_BACKEND_DISABLE:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+               case mmPA_SC_RASTER_CONFIG:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+               case mmPA_SC_RASTER_CONFIG_1:
+                       return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+               }
 
-       mutex_lock(&adev->grbm_idx_mutex);
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+               mutex_lock(&adev->grbm_idx_mutex);
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-       val = RREG32(reg_offset);
+               val = RREG32(reg_offset);
 
-       if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-       mutex_unlock(&adev->grbm_idx_mutex);
-       return val;
+               if (se_num != 0xffffffff || sh_num != 0xffffffff)
+                       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+               return val;
+       } else {
+               unsigned idx;
+
+               switch (reg_offset) {
+               case mmGB_ADDR_CONFIG:
+                       return adev->gfx.config.gb_addr_config;
+               case mmMC_ARB_RAMCFG:
+                       return adev->gfx.config.mc_arb_ramcfg;
+               case mmGB_TILE_MODE0:
+               case mmGB_TILE_MODE1:
+               case mmGB_TILE_MODE2:
+               case mmGB_TILE_MODE3:
+               case mmGB_TILE_MODE4:
+               case mmGB_TILE_MODE5:
+               case mmGB_TILE_MODE6:
+               case mmGB_TILE_MODE7:
+               case mmGB_TILE_MODE8:
+               case mmGB_TILE_MODE9:
+               case mmGB_TILE_MODE10:
+               case mmGB_TILE_MODE11:
+               case mmGB_TILE_MODE12:
+               case mmGB_TILE_MODE13:
+               case mmGB_TILE_MODE14:
+               case mmGB_TILE_MODE15:
+               case mmGB_TILE_MODE16:
+               case mmGB_TILE_MODE17:
+               case mmGB_TILE_MODE18:
+               case mmGB_TILE_MODE19:
+               case mmGB_TILE_MODE20:
+               case mmGB_TILE_MODE21:
+               case mmGB_TILE_MODE22:
+               case mmGB_TILE_MODE23:
+               case mmGB_TILE_MODE24:
+               case mmGB_TILE_MODE25:
+               case mmGB_TILE_MODE26:
+               case mmGB_TILE_MODE27:
+               case mmGB_TILE_MODE28:
+               case mmGB_TILE_MODE29:
+               case mmGB_TILE_MODE30:
+               case mmGB_TILE_MODE31:
+                       idx = (reg_offset - mmGB_TILE_MODE0);
+                       return adev->gfx.config.tile_mode_array[idx];
+               case mmGB_MACROTILE_MODE0:
+               case mmGB_MACROTILE_MODE1:
+               case mmGB_MACROTILE_MODE2:
+               case mmGB_MACROTILE_MODE3:
+               case mmGB_MACROTILE_MODE4:
+               case mmGB_MACROTILE_MODE5:
+               case mmGB_MACROTILE_MODE6:
+               case mmGB_MACROTILE_MODE7:
+               case mmGB_MACROTILE_MODE8:
+               case mmGB_MACROTILE_MODE9:
+               case mmGB_MACROTILE_MODE10:
+               case mmGB_MACROTILE_MODE11:
+               case mmGB_MACROTILE_MODE12:
+               case mmGB_MACROTILE_MODE13:
+               case mmGB_MACROTILE_MODE14:
+               case mmGB_MACROTILE_MODE15:
+                       idx = (reg_offset - mmGB_MACROTILE_MODE0);
+                       return adev->gfx.config.macrotile_mode_array[idx];
+               default:
+                       return RREG32(reg_offset);
+               }
+       }
 }
 
 static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
 
        *value = 0;
        for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+               bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
                if (reg_offset != cik_allowed_read_registers[i].reg_offset)
                        continue;
 
-               *value = cik_allowed_read_registers[i].grbm_indexed ?
-                        cik_read_indexed_register(adev, se_num,
-                                                  sh_num, reg_offset) :
-                        RREG32(reg_offset);
+               *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+                                               reg_offset);
                return 0;
        }
        return -EINVAL;
index 5c8a7a48a4adb16834ab5893e2341c03a5899d7d..419ba0ce7ee5b0cc343e14665ab61d91f3801d93 100644 (file)
@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
                                                        adev->gfx.config.backend_enable_mask,
                                                        num_rb_pipes);
        }
+
+       /* cache the values for userspace */
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+                       adev->gfx.config.rb_config[i][j].rb_backend_disable =
+                               RREG32(mmCC_RB_BACKEND_DISABLE);
+                       adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+                               RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+                       adev->gfx.config.rb_config[i][j].raster_config =
+                               RREG32(mmPA_SC_RASTER_CONFIG);
+                       adev->gfx.config.rb_config[i][j].raster_config_1 =
+                               RREG32(mmPA_SC_RASTER_CONFIG_1);
+               }
+       }
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
index 1eb4d79d6e306f7137daa4e57e58078993f608d3..0450ac5ba6b6d1db4e3d19b8b1b027008ed112e9 100644 (file)
@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
 
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
        adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
 }
 
index 7bb0bc0ca3d6aaa9ff330d41c896089ce2121011..342c2d937b17bdd30fb751753a5af2733763e1e6 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for Heterogenous System Architecture support for AMD GPU devices
 #
index 6c5a9cab55ded2448f99ae9c2ec1c59bbd68a3e7..f744caeaee049587520c75524f3ee6e580b8a63e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <linux/device.h>
+#include <linux/printk.h>
 #include "kfd_priv.h"
 
 #define KFD_DRIVER_AUTHOR      "AMD Inc. and others"
@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void)
        kfd_process_destroy_wq();
        kfd_topology_shutdown();
        kfd_chardev_exit();
-       dev_info(kfd_device, "Removed module\n");
+       pr_info("amdkfd: Removed module\n");
 }
 
 module_init(kfd_module_init);
index 4859d263fa2a3ce51a816b2f3b36b98f298cf9f7..4728fad3fd7425ca2e0ef2fbb805dc145d078df7 100644 (file)
@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        struct cik_sdma_rlc_registers *m;
 
        m = get_sdma_mqd(mqd);
-       m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
-                       SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+       m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+                       << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
                        q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
                        1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
                        6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
index 2bec902fc93906c7bbbe562e4d906048f0efa336..a3f1e62c60ba9d80b7d0244be3b3e3301a64557c 100644 (file)
@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 
        switch (type) {
        case KFD_QUEUE_TYPE_SDMA:
+               if (dev->dqm->queue_count >=
+                       CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+                       pr_err("Over-subscription is not allowed for SDMA.\n");
+                       retval = -EPERM;
+                       goto err_create_queue;
+               }
+
+               retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+               if (retval != 0)
+                       goto err_create_queue;
+               pqn->q = q;
+               pqn->kq = NULL;
+               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+                                               &q->properties.vmid);
+               pr_debug("DQM returned %d for create_queue\n", retval);
+               print_queue(q);
+               break;
+
        case KFD_QUEUE_TYPE_COMPUTE:
                /* check if there is over subscription */
                if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
index 8ba37dd9cf7fce68104ffdd8a637642d4507290a..c27c81cdeed3b815eb4f0630b12a412cb9eabb27 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the DAL (Display Abstract Layer), which is a  sub-component
 # of the AMDGPU drm driver.
 # It provides the HW control for display related functionalities.
index 4699e47aa76b00969bd0a7a7cc1f074df709563c..2b72009844f8316f92ed9dd2bf9bfac187219520 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'dm' sub-component of DAL.
 # It provides the control and status of dm blocks.
 
index 889ed24084e866bb846d9b6cc1f52a38e51cb5eb..f71fe6d2ddda795fd2fb914740b75845893c1298 100644 (file)
@@ -520,7 +520,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                aconnector = to_amdgpu_dm_connector(connector);
-               if (aconnector->dc_link->type == dc_connection_mst_branch) {
+               if (aconnector->dc_link->type == dc_connection_mst_branch &&
+                   aconnector->mst_mgr.aux) {
                        DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
                                        aconnector, aconnector->base.base.id);
 
@@ -677,6 +678,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 
                mutex_lock(&aconnector->hpd_lock);
                dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+               if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+                       aconnector->fake_enable = false;
+
                aconnector->dc_sink = NULL;
                amdgpu_dm_update_connector_after_detect(aconnector);
                mutex_unlock(&aconnector->hpd_lock);
@@ -711,7 +716,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 
        ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
 
-       drm_atomic_state_put(adev->dm.cached_state);
        adev->dm.cached_state = NULL;
 
        amdgpu_dm_irq_resume_late(adev);
@@ -2704,7 +2708,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_VIRTUAL
        };
-       struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+       struct edid *edid;
 
        if (!aconnector->base.edid_blob_ptr ||
                !aconnector->base.edid_blob_ptr->data) {
@@ -2716,6 +2720,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
                return;
        }
 
+       edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
        aconnector->edid = edid;
 
        aconnector->dc_em_sink = dc_link_add_remote_sink(
@@ -4193,13 +4199,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
                                dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
 
+               if (!dm_new_crtc_state->stream)
+                       continue;
+
                status = dc_stream_get_status(dm_new_crtc_state->stream);
                WARN_ON(!status);
                WARN_ON(!status->plane_count);
 
-               if (!dm_new_crtc_state->stream)
-                       continue;
-
                /*TODO How it works with MPO ?*/
                if (!dc_commit_planes_to_stream(
                                dm->dc,
@@ -4253,7 +4259,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        drm_atomic_helper_commit_hw_done(state);
 
        if (wait_for_vblank)
-               drm_atomic_helper_wait_for_vblanks(dev, state);
+               drm_atomic_helper_wait_for_flip_done(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 }
@@ -4332,9 +4338,11 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
                return;
 
        disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
-       acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+       if (!disconnected_acrtc)
+               return;
 
-       if (!disconnected_acrtc || !acrtc_state->stream)
+       acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+       if (!acrtc_state->stream)
                return;
 
        /*
@@ -4455,7 +4463,7 @@ static int dm_update_crtcs_state(struct dc *dc,
                        }
                }
 
-               if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+               if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
                                dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
 
                        new_crtc_state->mode_changed = false;
@@ -4709,7 +4717,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                }
        } else {
                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-                       if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+                       if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+                                       !new_crtc_state->color_mgmt_changed)
                                continue;
 
                        if (!new_crtc_state->enable)
index 4f83e3011743f14469401d867f9892a2dbb311bd..aed538a4d1bace016fb37526d099656227b81348 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for Display Core (dc) component.
 #
 
index 43c5ccdeeb724e65729b5d93b6aea5cc5cf2b81f..6af8c8a9ad8096a1521ac908a143767d21a3c61b 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'utils' sub-component of DAL.
 # It provides the general basic services required by other DAL
 # subcomponents.
index 785b943b60ed11b12ccbeeca27c9cc30e3219c81..6e43168fbdd65b1d12be9d647eb2763200c89d27 100644 (file)
@@ -75,6 +75,9 @@ void dc_conn_log(struct dc_context *ctx,
                if (signal == signal_type_info_tbl[i].type)
                        break;
 
+       if (i == NUM_ELEMENTS(signal_type_info_tbl))
+               goto fail;
+
        dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
                        signal_type_info_tbl[i].name,
                        link->link_index);
@@ -96,6 +99,8 @@ void dc_conn_log(struct dc_context *ctx,
 
        dm_logger_append(&entry, "^\n");
        dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
        dm_logger_close(&entry);
 
        va_end(args);
index 6ec815dce9ccc116c330cfebdfbefcc29ae91b20..239e86bbec5a17e318f7d82bad3d607bcbcd43d3 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'bios' sub-component of DAL.
 # It provides the parsing and executing controls for atom bios image.
 
index aaaebd06d7ee33bb1f5f3bdb91eff71e752d88e9..86e6438c5cf35a6c0f981524454d58af417a183d 100644 (file)
@@ -249,7 +249,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
        struct graphics_object_id *dest_object_id)
 {
        uint32_t number;
-       uint16_t *id;
+       uint16_t *id = NULL;
        ATOM_OBJECT *object;
        struct bios_parser *bp = BP_FROM_DCB(dcb);
 
@@ -260,7 +260,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
 
        number = get_dest_obj_list(bp, object, &id);
 
-       if (number <= index)
+       if (number <= index || !id)
                return BP_RESULT_BADINPUT;
 
        *dest_object_id = object_id_from_bios_object_id(id[index]);
index 41ef35995b029e5586e404a4b8632ef689d315c7..7959e382ed28f0b4aa833edace0ec3938dbefe34 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'calcs' sub-component of DAL.
 # It calculates Bandwidth and Watermarks values for HW programming
 #
index fe63f5894d43bf93daf320e5486914bde724a80c..7240db2e6f095ebde8d2b685c0dd17b281c8e6d3 100644 (file)
@@ -121,6 +121,10 @@ static bool create_links(
                        goto failed_alloc;
                }
 
+               link->link_index = dc->link_count;
+               dc->links[dc->link_count] = link;
+               dc->link_count++;
+
                link->ctx = dc->ctx;
                link->dc = dc;
                link->connector_signal = SIGNAL_TYPE_VIRTUAL;
@@ -129,6 +133,13 @@ static bool create_links(
                link->link_id.enum_id = ENUM_ID_1;
                link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
 
+               if (!link->link_enc) {
+                       BREAK_TO_DEBUGGER();
+                       goto failed_alloc;
+               }
+
+               link->link_status.dpcd_caps = &link->dpcd_caps;
+
                enc_init.ctx = dc->ctx;
                enc_init.channel = CHANNEL_ID_UNKNOWN;
                enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
@@ -138,10 +149,6 @@ static bool create_links(
                enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
                enc_init.encoder.enum_id = ENUM_ID_1;
                virtual_link_encoder_construct(link->link_enc, &enc_init);
-
-               link->link_index = dc->link_count;
-               dc->links[dc->link_count] = link;
-               dc->link_count++;
        }
 
        return true;
index 6acee5426e4bd64737f583d718d9f1a9fdeaa827..43c7a7fddb8344b656efbf7831b57e3c0123ce3e 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * dc_debug.c
  *
index 0602610489d759d55263112505ce177ccd728f41..e27ed4a45265290690604b10e6d4df4fbee77514 100644 (file)
@@ -480,22 +480,6 @@ static void detect_dp(
                sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
                detect_dp_sink_caps(link);
 
-               /* DP active dongles */
-               if (is_dp_active_dongle(link)) {
-                       link->type = dc_connection_active_dongle;
-                       if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
-                               /*
-                                * active dongle unplug processing for short irq
-                                */
-                               link_disconnect_sink(link);
-                               return;
-                       }
-
-                       if (link->dpcd_caps.dongle_type !=
-                       DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
-                               *converter_disable_audio = true;
-                       }
-               }
                if (is_mst_supported(link)) {
                        sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
                        link->type = dc_connection_mst_branch;
@@ -535,6 +519,22 @@ static void detect_dp(
                                sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
                        }
                }
+
+               if (link->type != dc_connection_mst_branch &&
+                       is_dp_active_dongle(link)) {
+                       /* DP active dongles */
+                       link->type = dc_connection_active_dongle;
+                       if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+                               /*
+                                * active dongle unplug processing for short irq
+                                */
+                               link_disconnect_sink(link);
+                               return;
+                       }
+
+                       if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+                               *converter_disable_audio = true;
+               }
        } else {
                /* DP passive dongles */
                sink_caps->signal = dp_passive_dongle_detection(link->ddc,
@@ -1801,12 +1801,75 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
                link->link_enc->funcs->disable_output(link->link_enc, signal, link);
 }
 
+bool dp_active_dongle_validate_timing(
+               const struct dc_crtc_timing *timing,
+               const struct dc_dongle_caps *dongle_caps)
+{
+       unsigned int required_pix_clk = timing->pix_clk_khz;
+
+       if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+               dongle_caps->extendedCapValid == false)
+               return true;
+
+       /* Check Pixel Encoding */
+       switch (timing->pixel_encoding) {
+       case PIXEL_ENCODING_RGB:
+       case PIXEL_ENCODING_YCBCR444:
+               break;
+       case PIXEL_ENCODING_YCBCR422:
+               if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+                       return false;
+               break;
+       case PIXEL_ENCODING_YCBCR420:
+               if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+                       return false;
+               break;
+       default:
+               /* Invalid Pixel Encoding*/
+               return false;
+       }
+
+
+       /* Check Color Depth and Pixel Clock */
+       if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               required_pix_clk /= 2;
+
+       switch (timing->display_color_depth) {
+       case COLOR_DEPTH_666:
+       case COLOR_DEPTH_888:
+               /*888 and 666 should always be supported*/
+               break;
+       case COLOR_DEPTH_101010:
+               if (dongle_caps->dp_hdmi_max_bpc < 10)
+                       return false;
+               required_pix_clk = required_pix_clk * 10 / 8;
+               break;
+       case COLOR_DEPTH_121212:
+               if (dongle_caps->dp_hdmi_max_bpc < 12)
+                       return false;
+               required_pix_clk = required_pix_clk * 12 / 8;
+               break;
+
+       case COLOR_DEPTH_141414:
+       case COLOR_DEPTH_161616:
+       default:
+               /* These color depths are currently not supported */
+               return false;
+       }
+
+       if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+               return false;
+
+       return true;
+}
+
 enum dc_status dc_link_validate_mode_timing(
                const struct dc_stream_state *stream,
                struct dc_link *link,
                const struct dc_crtc_timing *timing)
 {
        uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+       struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
 
        /* A hack to avoid failing any modes for EDID override feature on
         * topology change such as lower quality cable for DP or different dongle
@@ -1814,8 +1877,13 @@ enum dc_status dc_link_validate_mode_timing(
        if (link->remote_sinks[0])
                return DC_OK;
 
+       /* Passive Dongle */
        if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
-               return DC_EXCEED_DONGLE_MAX_CLK;
+               return DC_EXCEED_DONGLE_CAP;
+
+       /* Active Dongle*/
+       if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+               return DC_EXCEED_DONGLE_CAP;
 
        switch (stream->signal) {
        case SIGNAL_TYPE_EDP:
index ced42484dcfc7a45e51aec5ff1c69e823fe78ae9..e6bf05d76a942d0f764c224e9480acc5f07b7950 100644 (file)
@@ -1512,7 +1512,7 @@ static bool hpd_rx_irq_check_link_loss_status(
        struct dc_link *link,
        union hpd_irq_data *hpd_irq_dpcd_data)
 {
-       uint8_t irq_reg_rx_power_state;
+       uint8_t irq_reg_rx_power_state = 0;
        enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
        union lane_status lane_status;
        uint32_t lane;
@@ -1524,60 +1524,55 @@ static bool hpd_rx_irq_check_link_loss_status(
 
        if (link->cur_link_settings.lane_count == 0)
                return return_code;
-       /*1. Check that we can handle interrupt: Not in FS DOS,
-        *  Not in "Display Timeout" state, Link is trained.
-        */
 
-       dpcd_result = core_link_read_dpcd(link,
-               DP_SET_POWER,
-               &irq_reg_rx_power_state,
-               sizeof(irq_reg_rx_power_state));
+       /*1. Check that Link Status changed, before re-training.*/
 
-       if (dpcd_result != DC_OK) {
-               irq_reg_rx_power_state = DP_SET_POWER_D0;
-               dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-                       "%s: DPCD read failed to obtain power state.\n",
-                       __func__);
+       /*parse lane status*/
+       for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+               /* check status of lanes 0,1
+                * changed DpcdAddress_Lane01Status (0x202)
+                */
+               lane_status.raw = get_nibble_at_index(
+                       &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+                       lane);
+
+               if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+                       !lane_status.bits.CR_DONE_0 ||
+                       !lane_status.bits.SYMBOL_LOCKED_0) {
+                       /* if one of the channel equalization, clock
+                        * recovery or symbol lock is dropped
+                        * consider it as (link has been
+                        * dropped) dp sink status has changed
+                        */
+                       sink_status_changed = true;
+                       break;
+               }
        }
 
-       if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
-
-               /*2. Check that Link Status changed, before re-training.*/
-
-               /*parse lane status*/
-               for (lane = 0;
-                       lane < link->cur_link_settings.lane_count;
-                       lane++) {
+       /* Check interlane align.*/
+       if (sink_status_changed ||
+               !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
 
-                       /* check status of lanes 0,1
-                        * changed DpcdAddress_Lane01Status (0x202)*/
-                       lane_status.raw = get_nibble_at_index(
-                               &hpd_irq_dpcd_data->bytes.lane01_status.raw,
-                               lane);
-
-                       if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
-                               !lane_status.bits.CR_DONE_0 ||
-                               !lane_status.bits.SYMBOL_LOCKED_0) {
-                               /* if one of the channel equalization, clock
-                                * recovery or symbol lock is dropped
-                                * consider it as (link has been
-                                * dropped) dp sink status has changed*/
-                               sink_status_changed = true;
-                               break;
-                       }
+               dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+                       "%s: Link Status changed.\n", __func__);
 
-               }
+               return_code = true;
 
-               /* Check interlane align.*/
-               if (sink_status_changed ||
-                       !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
-                       INTERLANE_ALIGN_DONE) {
+               /*2. Check that we can handle interrupt: Not in FS DOS,
+                *  Not in "Display Timeout" state, Link is trained.
+                */
+               dpcd_result = core_link_read_dpcd(link,
+                       DP_SET_POWER,
+                       &irq_reg_rx_power_state,
+                       sizeof(irq_reg_rx_power_state));
 
+               if (dpcd_result != DC_OK) {
                        dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
-                               "%s: Link Status changed.\n",
+                               "%s: DPCD read failed to obtain power state.\n",
                                __func__);
-
-                       return_code = true;
+               } else {
+                       if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+                               return_code = false;
                }
        }
 
@@ -2062,6 +2057,24 @@ bool is_dp_active_dongle(const struct dc_link *link)
                        (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
 }
 
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+       switch (bpc) {
+       case DOWN_STREAM_MAX_8BPC:
+               return 8;
+       case DOWN_STREAM_MAX_10BPC:
+               return 10;
+       case DOWN_STREAM_MAX_12BPC:
+               return 12;
+       case DOWN_STREAM_MAX_16BPC:
+               return 16;
+       default:
+               break;
+       }
+
+       return -1;
+}
+
 static void get_active_converter_info(
        uint8_t data, struct dc_link *link)
 {
@@ -2131,7 +2144,8 @@ static void get_active_converter_info(
                                        hdmi_caps.bits.YCrCr420_CONVERSION;
 
                                link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
-                                       hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+                                       translate_dpcd_max_bpc(
+                                               hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
 
                                link->dpcd_caps.dongle_caps.extendedCapValid = true;
                        }
index d1cdf9f8853d7754df856740439740e1493ae111..9288958098676611dc91b632e53e00802ef6b7bc 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -516,13 +516,11 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
                        right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
 
                if (right_view) {
-                       data->viewport.width /= 2;
-                       data->viewport_c.width /= 2;
-                       data->viewport.x +=  data->viewport.width;
-                       data->viewport_c.x +=  data->viewport_c.width;
+                       data->viewport.x +=  data->viewport.width / 2;
+                       data->viewport_c.x +=  data->viewport_c.width / 2;
                        /* Ceil offset pipe */
-                       data->viewport.width += data->viewport.width % 2;
-                       data->viewport_c.width += data->viewport_c.width % 2;
+                       data->viewport.width = (data->viewport.width + 1) / 2;
+                       data->viewport_c.width = (data->viewport_c.width + 1) / 2;
                } else {
                        data->viewport.width /= 2;
                        data->viewport_c.width /= 2;
@@ -580,14 +578,12 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
        if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
                pipe_ctx->plane_state) {
                if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-                       pipe_ctx->plane_res.scl_data.recout.height /= 2;
-                       pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+                       pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
                        /* Floor primary pipe, ceil 2ndary pipe */
-                       pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+                       pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
                } else {
-                       pipe_ctx->plane_res.scl_data.recout.width /= 2;
-                       pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
-                       pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+                       pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+                       pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
                }
        } else if (pipe_ctx->bottom_pipe &&
                        pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
@@ -856,6 +852,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
        pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
 
+
        /* Taps calculations */
        if (pipe_ctx->plane_res.xfm != NULL)
                res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
@@ -864,16 +861,21 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->plane_res.dpp != NULL)
                res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
                                pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
-
        if (!res) {
                /* Try 24 bpp linebuffer */
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
 
-               res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
-                       pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+               if (pipe_ctx->plane_res.xfm != NULL)
+                       res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+                                       pipe_ctx->plane_res.xfm,
+                                       &pipe_ctx->plane_res.scl_data,
+                                       &plane_state->scaling_quality);
 
-               res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
-                       pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+               if (pipe_ctx->plane_res.dpp != NULL)
+                       res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+                                       pipe_ctx->plane_res.dpp,
+                                       &pipe_ctx->plane_res.scl_data,
+                                       &plane_state->scaling_quality);
        }
 
        if (res)
@@ -991,8 +993,10 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
 
        head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
 
-       if (!head_pipe)
+       if (!head_pipe) {
                ASSERT(0);
+               return NULL;
+       }
 
        if (!head_pipe->plane_state)
                return head_pipe;
@@ -1447,11 +1451,16 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link(
 
 static struct audio *find_first_free_audio(
                struct resource_context *res_ctx,
-               const struct resource_pool *pool)
+               const struct resource_pool *pool,
+               enum engine_id id)
 {
        int i;
        for (i = 0; i < pool->audio_count; i++) {
                if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+                       /*we have enough audio endpoint, find the matching inst*/
+                       if (id != i)
+                               continue;
+
                        return pool->audios[i];
                }
        }
@@ -1700,7 +1709,7 @@ enum dc_status resource_map_pool_resources(
            dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
            stream->audio_info.mode_count) {
                pipe_ctx->stream_res.audio = find_first_free_audio(
-               &context->res_ctx, pool);
+               &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
 
                /*
                 * Audio assigned in order first come first get.
@@ -1765,13 +1774,16 @@ enum dc_status dc_validate_global_state(
        enum dc_status result = DC_ERROR_UNEXPECTED;
        int i, j;
 
+       if (!new_ctx)
+               return DC_ERROR_UNEXPECTED;
+
        if (dc->res_pool->funcs->validate_global) {
                        result = dc->res_pool->funcs->validate_global(dc, new_ctx);
                        if (result != DC_OK)
                                return result;
        }
 
-       for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+       for (i = 0; i < new_ctx->stream_count; i++) {
                struct dc_stream_state *stream = new_ctx->streams[i];
 
                for (j = 0; j < dc->res_pool->pipe_count; j++) {
index b00a6040a69746e24b1291ae9250e225b0290e68..e230cc44a0a7d31f5889d5cb0f4f0688612ace4b 100644 (file)
@@ -263,7 +263,6 @@ bool dc_stream_set_cursor_position(
                struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
                struct mem_input *mi = pipe_ctx->plane_res.mi;
                struct hubp *hubp = pipe_ctx->plane_res.hubp;
-               struct transform *xfm = pipe_ctx->plane_res.xfm;
                struct dpp *dpp = pipe_ctx->plane_res.dpp;
                struct dc_cursor_position pos_cpy = *position;
                struct dc_cursor_mi_param param = {
@@ -294,11 +293,11 @@ bool dc_stream_set_cursor_position(
                if (mi != NULL && mi->funcs->set_cursor_position != NULL)
                        mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
 
-               if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
-                       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+               if (!hubp)
+                       continue;
 
-               if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
-                       xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width);
+               if (hubp->funcs->set_cursor_position != NULL)
+                       hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
 
                if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
                        dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
index 0d84b2a1ccfdf276785b89be795aee0478899d2c..90e81f7ba9199384a0733de7f87cb5069f073352 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * dc_helper.c
  *
index 8abec0bed379c15c4c3fb52f620af53ee4af6fb7..11401fd8e5356c8e4866f6660df34ff2da1e155b 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for common 'dce' logic
 # HW object file under this folder follow similar pattern for HW programming
 #   - register offset and/or shift + mask stored in the dec_hw struct
index 81c40f8864db2086324da7d21abccacb53289744..0df9ecb2710c2ead2e23833a26b4aa03b53706ef 100644 (file)
@@ -352,11 +352,11 @@ void dce_aud_az_enable(struct audio *audio)
        uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
 
        set_reg_field_value(value, 1,
-                       AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-                       CLOCK_GATING_DISABLE);
-               set_reg_field_value(value, 1,
-                       AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
-                       AUDIO_ENABLED);
+                           AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                           CLOCK_GATING_DISABLE);
+       set_reg_field_value(value, 1,
+                           AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                           AUDIO_ENABLED);
 
        AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
        value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
index 4fd49a16c3b6ef8d225d16eec01a89bb3a1f9954..e42b6eb1c1f0e133d4ae4c33d6fe804033db275e 100644 (file)
@@ -87,6 +87,9 @@ static void dce110_update_generic_info_packet(
         */
        uint32_t max_retries = 50;
 
+       /*we need turn on clock before programming AFMT block*/
+       REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
        if (REG(AFMT_VBI_PACKET_CONTROL1)) {
                if (packet_index >= 8)
                        ASSERT(0);
index ea40870624b3884b41b2369987ecc850919f4f29..a822d4e2a1693881f090d776f6fbc11ab68d5f24 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index 90911258bdb3ac92a490846199dfd85ba701ca1a..3ea43e2a9450ce562cd01b8a5c18a9ee060d2a62 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
index de8fdf438f9b34125d7179aa6af941f68f18453a..2f366d66635d8f11ed566e025a69fafa6df96711 100644 (file)
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
 /*
  * dce100_resource.h
  *
index 98d956e2f218f7d2e4142f586513276b48f5a108..d564c0eb8b045393ae235c6169b6c85b6af94693 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index 1229a3315018e4c4fb6af64134b4deb211716360..07ff8d2faf3f4630276d9241092f605274375cda 100644 (file)
@@ -991,6 +991,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
        struct dc_link *link = stream->sink->link;
        struct dc *dc = pipe_ctx->stream->ctx->dc;
 
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       if (dc_is_dp_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+                       pipe_ctx->stream_res.stream_enc, true);
        if (pipe_ctx->stream_res.audio) {
                pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
 
@@ -1015,18 +1025,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
                 */
        }
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
-
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
-
-       pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-                       pipe_ctx->stream_res.stream_enc, true);
-
-
        /* blank at encoder level */
        if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
                if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
@@ -1774,6 +1772,10 @@ static enum dc_status validate_fbc(struct dc *dc,
        if (pipe_ctx->stream->sink->link->psr_enabled)
                return DC_ERROR_UNEXPECTED;
 
+       /* Nothing to compress */
+       if (!pipe_ctx->plane_state)
+               return DC_ERROR_UNEXPECTED;
+
        /* Only for non-linear tiling */
        if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
                return DC_ERROR_UNEXPECTED;
@@ -1868,8 +1870,10 @@ static void dce110_reset_hw_ctx_wrap(
                                pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
                        struct clock_source *old_clk = pipe_ctx_old->clock_source;
 
-                       /* disable already, no need to disable again */
-                       if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off)
+                       /* Disable if new stream is null. O/w, if stream is
+                        * disabled already, no need to disable again.
+                        */
+                       if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
                                core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
 
                        pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
index db96d2b47ff1627ab2240c213ff01a69d83ea4fb..42df17f9aa8de440fe17132f03fd36d779f1f56b 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -1037,11 +1037,13 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
        struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
                                                 GFP_KERNEL);
 
-       if ((dce110_tgv == NULL) ||
-               (dce110_xfmv == NULL) ||
-               (dce110_miv == NULL) ||
-               (dce110_oppv == NULL))
-                       return false;
+       if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+               kfree(dce110_tgv);
+               kfree(dce110_xfmv);
+               kfree(dce110_miv);
+               kfree(dce110_oppv);
+               return false;
+       }
 
        dce110_opp_v_construct(dce110_oppv, ctx);
 
index 67ac737eaa7e9de169d5aca845c3384dacfc16fb..4befce6cd87a28ea5a330697b8e7c657dbc37cf5 100644 (file)
@@ -1112,10 +1112,7 @@ bool dce110_timing_generator_validate_timing(
        enum signal_type signal)
 {
        uint32_t h_blank;
-       uint32_t h_back_porch;
-       uint32_t hsync_offset = timing->h_border_right +
-                       timing->h_front_porch;
-       uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+       uint32_t h_back_porch, hsync_offset, h_sync_start;
 
        struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
 
@@ -1124,6 +1121,9 @@ bool dce110_timing_generator_validate_timing(
        if (!timing)
                return false;
 
+       hsync_offset = timing->h_border_right + timing->h_front_porch;
+       h_sync_start = timing->h_addressable + hsync_offset;
+
        /* Currently we don't support 3D, so block all 3D timings */
        if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
                return false;
index 07d9303d54772581a42fc1888b3267c0f44e728d..59b4cd3297155129096e7a16c4edd4ece401fd5a 100644 (file)
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include "dm_services.h"
 
 /* include DCE11 register header files */
index 265ac4310d855cd8ad8493ec7cb6d7de7bdd750f..8e090446d511937361db77ed56471a44d6ed897b 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index 1779b963525cda70d14c855a0e0acd0eb336cd63..37db1f8d45ea547862b07b2503c4a55d6485c8ff 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
@@ -8,4 +29,4 @@ dce120_hw_sequencer.o
 
 AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
 
-AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
\ No newline at end of file
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
index c1105895e5facd2cf3619e5432ba620123b926d5..bc388aa4b2f50cdd5c376b1f1833493dda24f550 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'controller' sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index ebeb88283a143faf62d06ed0f76374aa80ddf3cd..f565a60429704cae0041846a08cac651f541a603 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for DCN.
 
 DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
index 4c4bd72d4e405ad24e61760828fc3d7c7e439dd3..9fc8f827f2a187f0f72dbc888a1ca25d85986205 100644 (file)
@@ -912,11 +912,13 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
        struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
        struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
 
-       if (!head_pipe)
+       if (!head_pipe) {
                ASSERT(0);
+               return NULL;
+       }
 
        if (!idle_pipe)
-               return false;
+               return NULL;
 
        idle_pipe->stream = head_pipe->stream;
        idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
index c7333cdf18021750b9cd3805d2f1bcb1008eea69..fced178c8c794493bb3edacce369c1cd81ee00d4 100644 (file)
@@ -496,9 +496,6 @@ static bool tgn10_validate_timing(
                timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
                return false;
 
-       if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
-               tg->ctx->dc->debug.disable_stereo_support)
-               return false;
        /* Temporarily blocking interlacing mode until it's supported */
        if (timing->flags.INTERLACE == 1)
                return false;
index 87bab8e8139fbb05e960a88d989455ef48371ae3..3488af2b5786cd0a299df91da12b8e4b2ed3f843 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'utils' sub-component of DAL.
 # It provides the general basic services required by other DAL
 # subcomponents.
index 70d01a9e96760ac2828dd5537cefe63e53ac3427..562ee189d780c4de20d150a16497c27ef44300a8 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'gpio' sub-component of DAL.
 # It provides the control and status of HW GPIO pins.
 
index 55603400acd99128c0957ebe7ee19d610283003f..352885cb4d0763dd3bb4912e59722abd9ee4589c 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'i2c' sub-component of DAL.
 # It provides the control and status of HW i2c engine of the adapter.
 
index 01df85641684fe2d92ee149fc0b4642a01708ddd..94fc31080fdad1faa14fd4de8c13db8e6ccc2bcc 100644 (file)
@@ -38,7 +38,7 @@ enum dc_status {
        DC_FAIL_DETACH_SURFACES = 8,
        DC_FAIL_SURFACE_VALIDATE = 9,
        DC_NO_DP_LINK_BANDWIDTH = 10,
-       DC_EXCEED_DONGLE_MAX_CLK = 11,
+       DC_EXCEED_DONGLE_CAP = 11,
        DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
        DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
        DC_FAIL_SCALING = 14,
index 3d33bcda7059432d4b0343311bf7d3b998accda9..498b7f05c5ca50b3b27b0ab32a1c0af8f25cfb11 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * link_encoder.h
  *
index 3050afe8e8a96c62c8a7eb26ac70e8ed21c7fcbc..b5db1692393c9e5769f43848cc60cc4317691bde 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 /*
  * stream_encoder.h
  *
index 7c08bc62c1f53545d362edb3fb102efb12d25fd0..ea88997e1bbd88a65823ce0091994bbc92638bdd 100644 (file)
@@ -259,13 +259,6 @@ struct transform_funcs {
                        struct transform *xfm_base,
                        const struct dc_cursor_attributes *attr);
 
-       void (*set_cursor_position)(
-                       struct transform *xfm_base,
-                       const struct dc_cursor_position *pos,
-                       const struct dc_cursor_mi_param *param,
-                       uint32_t width
-                       );
-
 };
 
 const uint16_t *get_filter_2tap_16p(void);
index c7e93f7223bdabe032436004d6d2dd0304ebed10..498515aad4a50bf35133755d1bded3a387a0817e 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'audio' sub-component of DAL.
 # It provides the control and status of HW adapter resources,
 # that are global for the ASIC and sharable between pipes.
index fc0b7318d9ccb239bc7127a1b735ff1f7324e3aa..07326d244d50a7333b43e0d3e02bc0dedca35f97 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the virtual sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
index db8e0ff6d7a9c07da7fef4ec5808f73d5bf3a1b9..fb9a499780e8c82c6a833ee424dfe1b36a302fec 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for the 'freesync' sub-module of DAL.
 #
 
index 87cd7009e80f1b59744d4a928c9b66d74c16f5e1..690243001e1aa935be3c29ccf874e0ec82291dad 100644 (file)
@@ -1,4 +1,25 @@
 #
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
 # Makefile for AMD library routines, which are used by AMD driver
 # components.
 #
index 8c55c6e254d99eb008231587bb21beb29c09a450..231785a9e24c67bb31695367f7e1294d15f47bb3 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 
 subdir-ccflags-y += \
                -I$(FULL_AMD_PATH)/powerplay/inc/  \
index 824fb6fe54ae97dc2a1768f49141ee6ff0ce3a49..a212c27f2e17c99915e4bede575f6ed40cb263f0 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for the 'hw manager' sub-component of powerplay.
 # It provides the hardware management services for the driver.
index 67fae834bc6788680eddd50b8f55a737f4b013ab..8de384bf9a8fb62a12bb90086a17bd4fd04c9de5 100644 (file)
@@ -1,4 +1,26 @@
-// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include "pp_overdriver.h"
 #include <linux/errno.h>
 
index 08cd70c75d8b8e55c8d19b51b680c703441685bf..9ad1cefff79fc17786cbacfd22af8b00365cc8b5 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #ifndef SMU72_H
 #define SMU72_H
 
index b2edbc0c3c4dfea4b66d3c8d5b7b5a38b24c165a..2aefbb85f62003de2e9020f5d8496642dcdebac3 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #ifndef SMU72_DISCRETE_H
 #define SMU72_DISCRETE_H
 
index 30d3089d7dbafdc45ce9b666edb4a65916ab029d..98e701e4f55383eeee93d1a7ebd60c8d15e7b80c 100644 (file)
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
 #
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
index 283a0dc25e84eac15d6383e1a5608ad885f3453b..07129e6c31a9779845aa26c77edf596138def01a 100644 (file)
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _GPU_SCHED_TRACE_H_
 
index 72b22b805412b2cb19d8d90e85904be737ecf335..5a5427bbd70e47e8e8e1e0b744bf356a6175a6bc 100644 (file)
@@ -317,9 +317,8 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
                                       formats, ARRAY_SIZE(formats),
                                       NULL,
                                       DRM_PLANE_TYPE_PRIMARY, NULL);
-       if (ret) {
+       if (ret)
                return ERR_PTR(ret);
-       }
 
        drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
        hdlcd->plane = plane;
index 764d0c83710ca563554672d06332fc10883b5de7..0afb53b1f4e92b516088b03e287b09936818dd43 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/console.h>
 #include <linux/list.h>
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
@@ -354,7 +355,7 @@ err_unload:
 err_free:
        drm_mode_config_cleanup(drm);
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        return ret;
 }
@@ -379,7 +380,7 @@ static void hdlcd_drm_unbind(struct device *dev)
        pm_runtime_disable(drm->dev);
        of_reserved_mem_device_release(drm->dev);
        drm_mode_config_cleanup(drm);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
 }
@@ -432,9 +433,11 @@ static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
                return 0;
 
        drm_kms_helper_poll_disable(drm);
+       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
 
        hdlcd->state = drm_atomic_helper_suspend(drm);
        if (IS_ERR(hdlcd->state)) {
+               drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
                drm_kms_helper_poll_enable(drm);
                return PTR_ERR(hdlcd->state);
        }
@@ -451,8 +454,8 @@ static int __maybe_unused hdlcd_pm_resume(struct device *dev)
                return 0;
 
        drm_atomic_helper_resume(drm, hdlcd->state);
+       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
        drm_kms_helper_poll_enable(drm);
-       pm_runtime_set_active(dev);
 
        return 0;
 }
index 3615d18a7ddf3a5cc49f7d09d68eec35fccc6e3c..904fff80917baa09bfa5cdafa2f56ee2aa4b4474 100644 (file)
@@ -65,8 +65,8 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
        /* We rely on firmware to set mclk to a sensible level. */
        clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 
-       hwdev->modeset(hwdev, &vm);
-       hwdev->leave_config_mode(hwdev);
+       hwdev->hw->modeset(hwdev, &vm);
+       hwdev->hw->leave_config_mode(hwdev);
        drm_crtc_vblank_on(crtc);
 }
 
@@ -77,8 +77,12 @@ static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
        struct malidp_hw_device *hwdev = malidp->dev;
        int err;
 
+       /* always disable planes on the CRTC that is being turned off */
+       drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
        drm_crtc_vblank_off(crtc);
-       hwdev->enter_config_mode(hwdev);
+       hwdev->hw->enter_config_mode(hwdev);
+
        clk_disable_unprepare(hwdev->pxlclk);
 
        err = pm_runtime_put(crtc->dev->dev);
@@ -319,7 +323,7 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
 
 mclk_calc:
        drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
-       ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+       ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
        if (ret < 0)
                return -EINVAL;
        return 0;
@@ -475,7 +479,7 @@ static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-                            hwdev->map.de_irq_map.vsync_irq);
+                            hwdev->hw->map.de_irq_map.vsync_irq);
        return 0;
 }
 
@@ -485,7 +489,7 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-                             hwdev->map.de_irq_map.vsync_irq);
+                             hwdev->hw->map.de_irq_map.vsync_irq);
 }
 
 static const struct drm_crtc_funcs malidp_crtc_funcs = {
index b8944666a18f0e72ea4715ff8742c5675b90d8a4..91f2b0191368c942ddb936ad6e736e560342fe99 100644 (file)
@@ -47,10 +47,10 @@ static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
         * directly.
         */
        malidp_hw_write(hwdev, gamma_write_mask,
-                       hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+                       hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
        for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
                malidp_hw_write(hwdev, data[i],
-                               hwdev->map.coeffs_base +
+                               hwdev->hw->map.coeffs_base +
                                MALIDP_COEF_TABLE_DATA);
 }
 
@@ -103,7 +103,7 @@ void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
                        for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
                                malidp_hw_write(hwdev,
                                                mc->coloradj_coeffs[i],
-                                               hwdev->map.coeffs_base +
+                                               hwdev->hw->map.coeffs_base +
                                                MALIDP_COLOR_ADJ_COEF + 4 * i);
 
                malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
@@ -120,8 +120,8 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
        struct malidp_hw_device *hwdev = malidp->dev;
        struct malidp_se_config *s = &cs->scaler_config;
        struct malidp_se_config *old_s = &old_cs->scaler_config;
-       u32 se_control = hwdev->map.se_base +
-                        ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+       u32 se_control = hwdev->hw->map.se_base +
+                        ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
                         0x10 : 0xC);
        u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
        u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
@@ -135,7 +135,7 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
                return;
        }
 
-       hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+       hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
        val = malidp_hw_read(hwdev, se_control);
        val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
 
@@ -170,9 +170,9 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
        int ret;
 
        atomic_set(&malidp->config_valid, 0);
-       hwdev->set_config_valid(hwdev);
+       hwdev->hw->set_config_valid(hwdev);
        /* don't wait for config_valid flag if we are in config mode */
-       if (hwdev->in_config_mode(hwdev))
+       if (hwdev->hw->in_config_mode(hwdev))
                return 0;
 
        ret = wait_event_interruptible_timeout(malidp->wq,
@@ -455,7 +455,7 @@ static int malidp_runtime_pm_suspend(struct device *dev)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        /* we can only suspend if the hardware is in config mode */
-       WARN_ON(!hwdev->in_config_mode(hwdev));
+       WARN_ON(!hwdev->hw->in_config_mode(hwdev));
 
        hwdev->pm_suspended = true;
        clk_disable_unprepare(hwdev->mclk);
@@ -500,11 +500,7 @@ static int malidp_bind(struct device *dev)
        if (!hwdev)
                return -ENOMEM;
 
-       /*
-        * copy the associated data from malidp_drm_of_match to avoid
-        * having to keep a reference to the OF node after binding
-        */
-       memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+       hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
        malidp->dev = hwdev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -568,13 +564,13 @@ static int malidp_bind(struct device *dev)
                goto query_hw_fail;
        }
 
-       ret = hwdev->query_hw(hwdev);
+       ret = hwdev->hw->query_hw(hwdev);
        if (ret) {
                DRM_ERROR("Invalid HW configuration\n");
                goto query_hw_fail;
        }
 
-       version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+       version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
        DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
                 (version >> 12) & 0xf, (version >> 8) & 0xf);
 
@@ -589,7 +585,7 @@ static int malidp_bind(struct device *dev)
 
        for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
                out_depth = (out_depth << 8) | (output_width[i] & 0xf);
-       malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+       malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
 
        atomic_set(&malidp->config_valid, 0);
        init_waitqueue_head(&malidp->wq);
@@ -671,7 +667,7 @@ query_hw_fail:
                malidp_runtime_pm_suspend(dev);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 alloc_fail:
        of_reserved_mem_device_release(dev);
 
@@ -704,7 +700,7 @@ static void malidp_unbind(struct device *dev)
                malidp_runtime_pm_suspend(dev);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        of_reserved_mem_device_release(dev);
 }
 
index 17bca99e8ac825334c982122fec82f22ebac636c..2bfb542135ac5ab3201a9934fe32413d6984e354 100644 (file)
@@ -183,7 +183,7 @@ static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
 
        malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
                        break;
                /*
@@ -203,7 +203,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
        malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
        malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
                        break;
                usleep_range(100, 1000);
@@ -216,7 +216,7 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
 {
        u32 status;
 
-       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
        if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
                return true;
 
@@ -407,7 +407,7 @@ static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
 
        malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
                        break;
                /*
@@ -427,7 +427,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
        malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
        malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
        while (count) {
-               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
                if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
                        break;
                usleep_range(100, 1000);
@@ -440,7 +440,7 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
 {
        u32 status;
 
-       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
        if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
                return true;
 
@@ -616,7 +616,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev)
        return 0;
 }
 
-const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
        [MALIDP_500] = {
                .map = {
                        .coeffs_base = MALIDP500_COEFFS_BASE,
@@ -751,7 +751,7 @@ static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 ir
 {
        u32 base = malidp_get_block_base(hwdev, block);
 
-       if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+       if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
                malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
        else
                malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
@@ -762,12 +762,14 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
        struct drm_device *drm = arg;
        struct malidp_drm *malidp = drm->dev_private;
        struct malidp_hw_device *hwdev;
+       struct malidp_hw *hw;
        const struct malidp_irq_map *de;
        u32 status, mask, dc_status;
        irqreturn_t ret = IRQ_NONE;
 
        hwdev = malidp->dev;
-       de = &hwdev->map.de_irq_map;
+       hw = hwdev->hw;
+       de = &hw->map.de_irq_map;
 
        /*
         * if we are suspended it is likely that we were invoked because
@@ -778,8 +780,8 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
                return IRQ_NONE;
 
        /* first handle the config valid IRQ */
-       dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
-       if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+       dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+       if (dc_status & hw->map.dc_irq_map.vsync_irq) {
                /* we have a page flip event */
                atomic_set(&malidp->config_valid, 1);
                malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
@@ -832,11 +834,11 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
 
        /* first enable the DC block IRQs */
        malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
-                            hwdev->map.dc_irq_map.irq_mask);
+                            hwdev->hw->map.dc_irq_map.irq_mask);
 
        /* now enable the DE block IRQs */
        malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-                            hwdev->map.de_irq_map.irq_mask);
+                            hwdev->hw->map.de_irq_map.irq_mask);
 
        return 0;
 }
@@ -847,9 +849,9 @@ void malidp_de_irq_fini(struct drm_device *drm)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
-                             hwdev->map.de_irq_map.irq_mask);
+                             hwdev->hw->map.de_irq_map.irq_mask);
        malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
-                             hwdev->map.dc_irq_map.irq_mask);
+                             hwdev->hw->map.dc_irq_map.irq_mask);
 }
 
 static irqreturn_t malidp_se_irq(int irq, void *arg)
@@ -857,6 +859,8 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
        struct drm_device *drm = arg;
        struct malidp_drm *malidp = drm->dev_private;
        struct malidp_hw_device *hwdev = malidp->dev;
+       struct malidp_hw *hw = hwdev->hw;
+       const struct malidp_irq_map *se = &hw->map.se_irq_map;
        u32 status, mask;
 
        /*
@@ -867,12 +871,12 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
        if (hwdev->pm_suspended)
                return IRQ_NONE;
 
-       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
-       if (!(status & hwdev->map.se_irq_map.irq_mask))
+       status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
+       if (!(status & se->irq_mask))
                return IRQ_NONE;
 
-       mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
-       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+       mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
+       status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
        status &= mask;
        /* ToDo: status decoding and firing up of VSYNC and page flip events */
 
@@ -905,7 +909,7 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
        }
 
        malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
-                            hwdev->map.se_irq_map.irq_mask);
+                            hwdev->hw->map.se_irq_map.irq_mask);
 
        return 0;
 }
@@ -916,5 +920,5 @@ void malidp_se_irq_fini(struct drm_device *drm)
        struct malidp_hw_device *hwdev = malidp->dev;
 
        malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
-                             hwdev->map.se_irq_map.irq_mask);
+                             hwdev->hw->map.se_irq_map.irq_mask);
 }
index 849ad9a30c3af4b31e98526b954e2b0819275534..b0690ebb356523781bfaf5a6fbbb4a38d39cb5e2 100644 (file)
@@ -120,18 +120,14 @@ struct malidp_hw_regmap {
 /* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
 #define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
 
-struct malidp_hw_device {
-       const struct malidp_hw_regmap map;
-       void __iomem *regs;
+struct malidp_hw_device;
 
-       /* APB clock */
-       struct clk *pclk;
-       /* AXI clock */
-       struct clk *aclk;
-       /* main clock for display core */
-       struct clk *mclk;
-       /* pixel clock for display core */
-       struct clk *pxlclk;
+/*
+ * Static structure containing hardware specific data and pointers to
+ * functions that behave differently between various versions of the IP.
+ */
+struct malidp_hw {
+       const struct malidp_hw_regmap map;
 
        /*
         * Validate the driver instance against the hardware bits
@@ -182,15 +178,6 @@ struct malidp_hw_device {
                             struct videomode *vm);
 
        u8 features;
-
-       u8 min_line_size;
-       u16 max_line_size;
-
-       /* track the device PM state */
-       bool pm_suspended;
-
-       /* size of memory used for rotating layers, up to two banks available */
-       u32 rotation_memory[2];
 };
 
 /* Supported variants of the hardware */
@@ -202,7 +189,33 @@ enum {
        MALIDP_MAX_DEVICES
 };
 
-extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES];
+
+/*
+ * Structure used by the driver during runtime operation.
+ */
+struct malidp_hw_device {
+       struct malidp_hw *hw;
+       void __iomem *regs;
+
+       /* APB clock */
+       struct clk *pclk;
+       /* AXI clock */
+       struct clk *aclk;
+       /* main clock for display core */
+       struct clk *mclk;
+       /* pixel clock for display core */
+       struct clk *pxlclk;
+
+       u8 min_line_size;
+       u16 max_line_size;
+
+       /* track the device PM state */
+       bool pm_suspended;
+
+       /* size of memory used for rotating layers, up to two banks available */
+       u32 rotation_memory[2];
+};
 
 static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
 {
@@ -240,9 +253,9 @@ static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
 {
        switch (block) {
        case MALIDP_SE_BLOCK:
-               return hwdev->map.se_base;
+               return hwdev->hw->map.se_base;
        case MALIDP_DC_BLOCK:
-               return hwdev->map.dc_base;
+               return hwdev->hw->map.dc_base;
        }
 
        return 0;
@@ -275,7 +288,7 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
 static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
                                         unsigned int pitch)
 {
-       return !(pitch & (hwdev->map.bus_align_bytes - 1));
+       return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
 }
 
 /* U16.16 */
@@ -308,8 +321,8 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
        };
        u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
                  MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
-       u32 image_enh = hwdev->map.se_base +
-                       ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+       u32 image_enh = hwdev->hw->map.se_base +
+                       ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
                         0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
        u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
        int i;
index 94e7e3fa3408cf163fda7f81b40e76ca73ac4c96..e7419797bbd16c157cab134184c24fd552cd2c44 100644 (file)
@@ -57,7 +57,7 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
        struct malidp_plane *mp = to_malidp_plane(plane);
 
        if (mp->base.fb)
-               drm_framebuffer_unreference(mp->base.fb);
+               drm_framebuffer_put(mp->base.fb);
 
        drm_plane_helper_disable(plane);
        drm_plane_cleanup(plane);
@@ -185,8 +185,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
 
        fb = state->fb;
 
-       ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
-                                           fb->format->format);
+       ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
+                                            mp->layer->id,
+                                            fb->format->format);
        if (ms->format == MALIDP_INVALID_FORMAT_ID)
                return -EINVAL;
 
@@ -211,7 +212,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
         * third plane stride register.
         */
        if (ms->n_planes == 3 &&
-           !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+           !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
            (state->fb->pitches[1] != state->fb->pitches[2]))
                return -EINVAL;
 
@@ -229,9 +230,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
 
-               val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
-                                                state->crtc_w,
-                                                fb->format->format);
+               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
+                                                    state->crtc_w,
+                                                    fb->format->format);
                if (val < 0)
                        return val;
 
@@ -251,7 +252,7 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
                return;
 
        if (num_planes == 3)
-               num_strides = (mp->hwdev->features &
+               num_strides = (mp->hwdev->hw->features &
                               MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
 
        for (i = 0; i < num_strides; ++i)
@@ -264,13 +265,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
                                   struct drm_plane_state *old_state)
 {
        struct malidp_plane *mp;
-       const struct malidp_hw_regmap *map;
        struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
        u32 src_w, src_h, dest_w, dest_h, val;
        int i;
 
        mp = to_malidp_plane(plane);
-       map = &mp->hwdev->map;
 
        /* convert src values from Q16 fixed point to integer */
        src_w = plane->state->src_w >> 16;
@@ -363,7 +362,7 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
 int malidp_de_planes_init(struct drm_device *drm)
 {
        struct malidp_drm *malidp = drm->dev_private;
-       const struct malidp_hw_regmap *map = &malidp->dev->map;
+       const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
        struct malidp_plane *plane = NULL;
        enum drm_plane_type plane_type;
        unsigned long crtcs = 1 << drm->mode_config.num_crtc;
index b4efcbabf7f726f6e790400a15dc57127f6b341d..d034b2cb5eee30ee0b1b2fc44d1b010c01263787 100644 (file)
@@ -372,9 +372,18 @@ struct adv7511 {
 };
 
 #ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-                    unsigned int offset);
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
 void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+       unsigned int offset = adv7511->type == ADV7533 ?
+                                               ADV7533_REG_CEC_OFFSET : 0;
+
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+       return 0;
+}
 #endif
 
 #ifdef CONFIG_DRM_I2C_ADV7533
index b33d730e4d7366880574e345670bb8e9c3bb3e8b..a20a45c0b353f18eb9d2af13f2a5d707e7b1fc5c 100644 (file)
@@ -300,18 +300,21 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
        return 0;
 }
 
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
-                    unsigned int offset)
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
 {
+       unsigned int offset = adv7511->type == ADV7533 ?
+                                               ADV7533_REG_CEC_OFFSET : 0;
        int ret = adv7511_cec_parse_dt(dev, adv7511);
 
        if (ret)
-               return ret;
+               goto err_cec_parse_dt;
 
        adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
                adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
-       if (IS_ERR(adv7511->cec_adap))
-               return PTR_ERR(adv7511->cec_adap);
+       if (IS_ERR(adv7511->cec_adap)) {
+               ret = PTR_ERR(adv7511->cec_adap);
+               goto err_cec_alloc;
+       }
 
        regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
        /* cec soft reset */
@@ -329,9 +332,18 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
                     ((adv7511->cec_clk_freq / 750000) - 1) << 2);
 
        ret = cec_register_adapter(adv7511->cec_adap, dev);
-       if (ret) {
-               cec_delete_adapter(adv7511->cec_adap);
-               adv7511->cec_adap = NULL;
-       }
-       return ret;
+       if (ret)
+               goto err_cec_register;
+       return 0;
+
+err_cec_register:
+       cec_delete_adapter(adv7511->cec_adap);
+       adv7511->cec_adap = NULL;
+err_cec_alloc:
+       dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+                ret);
+err_cec_parse_dt:
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+       return ret == -EPROBE_DEFER ? ret : 0;
 }
index 0e14f1572d0593452d494a75dd712f7fb194d740..efa29db5fc2b7eeff375d2f6cdafce9340fd6301 100644 (file)
@@ -1084,7 +1084,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        struct device *dev = &i2c->dev;
        unsigned int main_i2c_addr = i2c->addr << 1;
        unsigned int edid_i2c_addr = main_i2c_addr + 4;
-       unsigned int offset;
        unsigned int val;
        int ret;
 
@@ -1192,24 +1191,16 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        if (adv7511->type == ADV7511)
                adv7511_set_link_config(adv7511, &link_config);
 
+       ret = adv7511_cec_init(dev, adv7511);
+       if (ret)
+               goto err_unregister_cec;
+
        adv7511->bridge.funcs = &adv7511_bridge_funcs;
        adv7511->bridge.of_node = dev->of_node;
 
        drm_bridge_add(&adv7511->bridge);
 
        adv7511_audio_init(dev, adv7511);
-
-       offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
-
-#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-       ret = adv7511_cec_init(dev, adv7511, offset);
-       if (ret)
-               goto err_unregister_cec;
-#else
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
-                    ADV7511_CEC_CTRL_POWER_DOWN);
-#endif
-
        return 0;
 
 err_unregister_cec:
index 5dd3f1cd074a1d36f17022af23c7a23638eaa41e..a8905049b9da022802a44cb836c22fbb86f90560 100644 (file)
@@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
                        return 0;
                }
 
+               pm_runtime_get_sync(dp->dev);
                edid = drm_get_edid(connector, &dp->aux.ddc);
+               pm_runtime_put(dp->dev);
                if (edid) {
                        drm_mode_connector_update_edid_property(&dp->connector,
                                                                edid);
index 0903ba574f61c4d1ab40a0e18d77d852bd282c0a..75b0d3f6e4de919301b63af95b28d2b7af4bf79e 100644 (file)
 
 #include <linux/of_graph.h>
 
+struct lvds_encoder {
+       struct drm_bridge bridge;
+       struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+       struct lvds_encoder *lvds_encoder = container_of(bridge,
+                                                        struct lvds_encoder,
+                                                        bridge);
+
+       return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+                                bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+       .attach = lvds_encoder_attach,
+};
+
 static int lvds_encoder_probe(struct platform_device *pdev)
 {
        struct device_node *port;
        struct device_node *endpoint;
        struct device_node *panel_node;
        struct drm_panel *panel;
-       struct drm_bridge *bridge;
+       struct lvds_encoder *lvds_encoder;
+
+       lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+                                   GFP_KERNEL);
+       if (!lvds_encoder)
+               return -ENOMEM;
 
        /* Locate the panel DT node. */
        port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
-       if (IS_ERR(bridge))
-               return PTR_ERR(bridge);
+       lvds_encoder->panel_bridge =
+               devm_drm_panel_bridge_add(&pdev->dev,
+                                         panel, DRM_MODE_CONNECTOR_LVDS);
+       if (IS_ERR(lvds_encoder->panel_bridge))
+               return PTR_ERR(lvds_encoder->panel_bridge);
+
+       /* The panel_bridge bridge is attached to the panel's of_node,
+        * but we need a bridge attached to our of_node for our user
+        * to look up.
+        */
+       lvds_encoder->bridge.of_node = pdev->dev.of_node;
+       lvds_encoder->bridge.funcs = &funcs;
+       drm_bridge_add(&lvds_encoder->bridge);
 
-       platform_set_drvdata(pdev, bridge);
+       platform_set_drvdata(pdev, lvds_encoder);
 
        return 0;
 }
 
 static int lvds_encoder_remove(struct platform_device *pdev)
 {
-       struct drm_bridge *bridge = platform_get_drvdata(pdev);
+       struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
 
-       drm_bridge_remove(bridge);
+       drm_bridge_remove(&lvds_encoder->bridge);
 
        return 0;
 }
index bf14214fa4640279fa46b655333198ed5aa1446e..b72259bf6e2fb37ce155108a43f6e1ce49de91dc 100644 (file)
@@ -138,6 +138,7 @@ struct dw_hdmi {
        struct device *dev;
        struct clk *isfr_clk;
        struct clk *iahb_clk;
+       struct clk *cec_clk;
        struct dw_hdmi_i2c *i2c;
 
        struct hdmi_data_info hdmi_data;
@@ -2382,6 +2383,26 @@ __dw_hdmi_probe(struct platform_device *pdev,
                goto err_isfr;
        }
 
+       hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+       if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+               hdmi->cec_clk = NULL;
+       } else if (IS_ERR(hdmi->cec_clk)) {
+               ret = PTR_ERR(hdmi->cec_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+                               ret);
+
+               hdmi->cec_clk = NULL;
+               goto err_iahb;
+       } else {
+               ret = clk_prepare_enable(hdmi->cec_clk);
+               if (ret) {
+                       dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+                               ret);
+                       goto err_iahb;
+               }
+       }
+
        /* Product and revision IDs */
        hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
                      | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
@@ -2518,6 +2539,8 @@ err_iahb:
                cec_notifier_put(hdmi->cec_notifier);
 
        clk_disable_unprepare(hdmi->iahb_clk);
+       if (hdmi->cec_clk)
+               clk_disable_unprepare(hdmi->cec_clk);
 err_isfr:
        clk_disable_unprepare(hdmi->isfr_clk);
 err_res:
@@ -2541,6 +2564,8 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
 
        clk_disable_unprepare(hdmi->iahb_clk);
        clk_disable_unprepare(hdmi->isfr_clk);
+       if (hdmi->cec_clk)
+               clk_disable_unprepare(hdmi->cec_clk);
 
        if (hdmi->i2c)
                i2c_del_adapter(&hdmi->i2c->adap);
index 8571cfd877c520b2e09530f1070a58dae8a4baf9..8636e7eeb7315c471eae9a01dad16d20133a441b 100644 (file)
@@ -97,7 +97,7 @@
 #define DP0_ACTIVEVAL          0x0650
 #define DP0_SYNCVAL            0x0654
 #define DP0_MISC               0x0658
-#define TU_SIZE_RECOMMENDED            (0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED            (63) /* LSCLK cycles per TU */
 #define BPC_6                          (0 << 5)
 #define BPC_8                          (1 << 5)
 
@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
                                tmp = (tmp << 8) | buf[i];
                        i++;
                        if (((i % 4) == 0) || (i == size)) {
-                               tc_write(DP0_AUXWDATA(i >> 2), tmp);
+                               tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
                                tmp = 0;
                        }
                }
@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
        ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
        if (ret < 0)
                goto err_dpcd_read;
-       if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
-               goto err_dpcd_inval;
+       if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+               dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+               tc->link.base.rate = 270000;
+       }
+
+       if (tc->link.base.num_lanes > 2) {
+               dev_dbg(tc->dev, "Falling to 2 lanes\n");
+               tc->link.base.num_lanes = 2;
+       }
 
        ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
        if (ret < 0)
@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
 err_dpcd_read:
        dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
        return ret;
-err_dpcd_inval:
-       dev_err(tc->dev, "invalid DPCD\n");
-       return -EINVAL;
 }
 
 static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        int lower_margin = mode->vsync_start - mode->vdisplay;
        int vsync_len = mode->vsync_end - mode->vsync_start;
 
+       /*
+        * Recommended maximum number of symbols transferred in a transfer unit:
+        * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+        *              (output active video bandwidth in bytes))
+        * Must be less than tu_size.
+        */
+       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
        dev_dbg(tc->dev, "set mode %dx%d\n",
                mode->hdisplay, mode->vdisplay);
        dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
 
 
-       /* LCD Ctl Frame Size */
-       tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+       /*
+        * LCD Ctl Frame Size
+        * datasheet is not clear of vsdelay in case of DPI
+        * assume we do not need any delay when DPI is a source of
+        * sync signals
+        */
+       tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
                 OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
-       tc_write(HTIM01, (left_margin << 16) |          /* H back porch */
-                        (hsync_len << 0));             /* Hsync */
-       tc_write(HTIM02, (right_margin << 16) |         /* H front porch */
-                        (mode->hdisplay << 0));        /* width */
+       tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+                        (ALIGN(hsync_len, 2) << 0));    /* Hsync */
+       tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) |  /* H front porch */
+                        (ALIGN(mode->hdisplay, 2) << 0)); /* width */
        tc_write(VTIM01, (upper_margin << 16) |         /* V back porch */
                         (vsync_len << 0));             /* Vsync */
        tc_write(VTIM02, (lower_margin << 16) |         /* V front porch */
@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        /* DP Main Stream Attributes */
        vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
        tc_write(DP0_VIDSYNCDELAY,
-                (0x003e << 16) |       /* thresh_dly */
+                (max_tu_symbol << 16) |        /* thresh_dly */
                 (vid_sync_dly << 0));
 
        tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
        tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
                 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
 
-       /*
-        * Recommended maximum number of symbols transferred in a transfer unit:
-        * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
-        *              (output active video bandwidth in bytes))
-        * Must be less than tu_size.
-        */
-       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
-       tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+       tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+                          BPC_8);
 
        return 0;
 err:
@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
        unsigned int rate;
        u32 dp_phy_ctrl;
        int timeout;
-       bool aligned;
-       bool ready;
        u32 value;
        int ret;
        u8 tmp[8];
@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
                ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
                if (ret < 0)
                        goto err_dpcd_read;
-               ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
-                                    DP_CHANNEL_EQ_BITS));      /* Lane0 */
-               aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
-       } while ((--timeout) && !(ready && aligned));
+       } while ((--timeout) &&
+                !(drm_dp_channel_eq_ok(tmp + 2,  tc->link.base.num_lanes)));
 
        if (timeout == 0) {
                /* Read DPCD 0x200-0x201 */
                ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
                if (ret < 0)
                        goto err_dpcd_read;
+               dev_err(dev, "channel(s) EQ not ok\n");
                dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
                dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
                         tmp[1]);
@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
                dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
                         tmp[6]);
 
-               if (!ready)
-                       dev_err(dev, "Lane0/1 not ready\n");
-               if (!aligned)
-                       dev_err(dev, "Lane0/1 not aligned\n");
                return -EAGAIN;
        }
 
@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
 static int tc_connector_mode_valid(struct drm_connector *connector,
                                   struct drm_display_mode *mode)
 {
-       /* Accept any mode */
+       /* DPI interface clock limitation: upto 154 MHz */
+       if (mode->clock > 154000)
+               return MODE_CLOCK_HIGH;
+
        return MODE_OK;
 }
 
index 71d712f1b56a285bac904b0d1e74a7363d766af3..b16f1d69a0bbf345e33e277de5ce73a38974bf73 100644 (file)
@@ -1225,7 +1225,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                return;
 
        for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+               if (!new_crtc_state->active)
                        continue;
 
                ret = drm_crtc_vblank_get(crtc);
index 25f4b2e9a44fcdd5cef668df014001e1c1b1c90e..9ae236036e324c345177bcb8f9cd84b20110a582 100644 (file)
@@ -152,6 +152,25 @@ static void drm_connector_free(struct kref *kref)
        connector->funcs->destroy(connector);
 }
 
+void drm_connector_free_work_fn(struct work_struct *work)
+{
+       struct drm_connector *connector, *n;
+       struct drm_device *dev =
+               container_of(work, struct drm_device, mode_config.connector_free_work);
+       struct drm_mode_config *config = &dev->mode_config;
+       unsigned long flags;
+       struct llist_node *freed;
+
+       spin_lock_irqsave(&config->connector_list_lock, flags);
+       freed = llist_del_all(&config->connector_free_list);
+       spin_unlock_irqrestore(&config->connector_list_lock, flags);
+
+       llist_for_each_entry_safe(connector, n, freed, free_node) {
+               drm_mode_object_unregister(dev, &connector->base);
+               connector->funcs->destroy(connector);
+       }
+}
+
 /**
  * drm_connector_init - Init a preallocated connector
  * @dev: DRM device
@@ -529,6 +548,25 @@ void drm_connector_list_iter_begin(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_connector_list_iter_begin);
 
+/*
+ * Extra-safe connector put function that works in any context. Should only be
+ * used from the connector_iter functions, where we never really expect to
+ * actually release the connector when dropping our final reference.
+ */
+static void
+__drm_connector_put_safe(struct drm_connector *conn)
+{
+       struct drm_mode_config *config = &conn->dev->mode_config;
+
+       lockdep_assert_held(&config->connector_list_lock);
+
+       if (!refcount_dec_and_test(&conn->base.refcount.refcount))
+               return;
+
+       llist_add(&conn->free_node, &config->connector_free_list);
+       schedule_work(&config->connector_free_work);
+}
+
 /**
  * drm_connector_list_iter_next - return next connector
  * @iter: connectr_list iterator
@@ -558,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
 
                /* loop until it's not a zombie connector */
        } while (!kref_get_unless_zero(&iter->conn->base.refcount));
-       spin_unlock_irqrestore(&config->connector_list_lock, flags);
 
        if (old_conn)
-               drm_connector_put(old_conn);
+               __drm_connector_put_safe(old_conn);
+       spin_unlock_irqrestore(&config->connector_list_lock, flags);
 
        return iter->conn;
 }
@@ -578,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
  */
 void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
 {
+       struct drm_mode_config *config = &iter->dev->mode_config;
+       unsigned long flags;
+
        iter->dev = NULL;
-       if (iter->conn)
-               drm_connector_put(iter->conn);
+       if (iter->conn) {
+               spin_lock_irqsave(&config->connector_list_lock, flags);
+               __drm_connector_put_safe(iter->conn);
+               spin_unlock_irqrestore(&config->connector_list_lock, flags);
+       }
        lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
 }
 EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1207,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
        if (edid)
                size = EDID_LENGTH * (1 + edid->extensions);
 
+       /* Set the display info, using edid if available, otherwise
+        * reseting the values to defaults. This duplicates the work
+        * done in drm_add_edid_modes, but that function is not
+        * consistently called before this one in all drivers and the
+        * computation is cheap enough that it seems better to
+        * duplicate it rather than attempt to ensure some arbitrary
+        * ordering of calls.
+        */
+       if (edid)
+               drm_add_display_info(connector, edid);
+       else
+               drm_reset_display_info(connector);
+
        drm_object_property_set_value(&connector->base,
                                      dev->mode_config.non_desktop_property,
                                      connector->display_info.non_desktop);
index 9ebb8841778cc99095a2235ab8b1d89f654816f2..af00f42ba269b0da3111ac5be480688815875da8 100644 (file)
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
                                    uint64_t value);
 int drm_connector_create_standard_properties(struct drm_device *dev);
 const char *drm_get_connector_force_name(enum drm_connector_force force);
+void drm_connector_free_work_fn(struct work_struct *work);
 
 /* IOCTL */
 int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
index 2e8fb51282ef36813873a18017a69f0c5abc3c47..cb487148359a8dca321fae3920c1dd89dd140056 100644 (file)
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
  *
  * Returns true if @vendor is in @edid, false otherwise
  */
-static bool edid_vendor(struct edid *edid, const char *vendor)
+static bool edid_vendor(const struct edid *edid, const char *vendor)
 {
        char edid_vendor[3];
 
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
  *
  * This tells subsequent routines what fixes they need to apply.
  */
-static u32 edid_get_quirks(struct edid *edid)
+static u32 edid_get_quirks(const struct edid *edid)
 {
        const struct edid_quirk *quirk;
        int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 /*
  * Search EDID for CEA extension block.
  */
-static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
+static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
 {
        u8 *edid_ext = NULL;
        int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
        return edid_ext;
 }
 
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(const struct edid *edid)
 {
        return drm_find_edid_extension(edid, CEA_EXT);
 }
 
-static u8 *drm_find_displayid_extension(struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid)
 {
        return drm_find_edid_extension(edid, DISPLAYID_EXT);
 }
@@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
 }
 
 static void drm_parse_cea_ext(struct drm_connector *connector,
-                             struct edid *edid)
+                             const struct edid *edid)
 {
        struct drm_display_info *info = &connector->display_info;
        const u8 *edid_ext;
@@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
        }
 }
 
-static void drm_add_display_info(struct drm_connector *connector,
-                                struct edid *edid, u32 quirks)
+/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
+ * all of the values which would have been set from EDID
+ */
+void
+drm_reset_display_info(struct drm_connector *connector)
+{
+       struct drm_display_info *info = &connector->display_info;
+
+       info->width_mm = 0;
+       info->height_mm = 0;
+
+       info->bpc = 0;
+       info->color_formats = 0;
+       info->cea_rev = 0;
+       info->max_tmds_clock = 0;
+       info->dvi_dual = false;
+
+       info->non_desktop = 0;
+}
+EXPORT_SYMBOL_GPL(drm_reset_display_info);
+
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
 {
        struct drm_display_info *info = &connector->display_info;
 
+       u32 quirks = edid_get_quirks(edid);
+
        info->width_mm = edid->width_cm * 10;
        info->height_mm = edid->height_cm * 10;
 
@@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector,
 
        info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
 
+       DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+
        if (edid->revision < 3)
-               return;
+               return quirks;
 
        if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
-               return;
+               return quirks;
 
        drm_parse_cea_ext(connector, edid);
 
@@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
 
        /* Only defined for 1.4 with digital displays */
        if (edid->revision < 4)
-               return;
+               return quirks;
 
        switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
        case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
                info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
        if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
                info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+       return quirks;
 }
+EXPORT_SYMBOL_GPL(drm_add_display_info);
 
 static int validate_displayid(u8 *displayid, int length, int idx)
 {
@@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
                return 0;
        }
 
-       quirks = edid_get_quirks(edid);
-
        /*
         * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
         * To avoid multiple parsing of same block, lets parse that map
         * from sink info, before parsing CEA modes.
         */
-       drm_add_display_info(connector, edid, quirks);
+       quirks = drm_add_display_info(connector, edid);
 
        /*
         * EDID spec says modes should be preferred in this order:
@@ -4831,7 +4855,8 @@ void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
                                   const struct drm_display_mode *mode,
                                   enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable)
+                                  bool rgb_quant_range_selectable,
+                                  bool is_hdmi2_sink)
 {
        /*
         * CEA-861:
@@ -4855,8 +4880,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
         *  YQ-field to match the RGB Quantization Range being transmitted
         *  (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
         *  set YQ=1) and the Sink shall ignore the YQ-field."
+        *
+        * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
+        * by non-zero YQ when receiving RGB. There doesn't seem to be any
+        * good way to tell which version of CEA-861 the sink supports, so
+        * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
+        * on on CEA-861-F.
         */
-       if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+       if (!is_hdmi2_sink ||
+           rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
                frame->ycc_quantization_range =
                        HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
        else
index 07374008f146fa946750b1cfd3bf13bfd1b9c47d..e561663344559b065643464cf669d8b84930007f 100644 (file)
@@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 
        if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
                DRM_INFO("Cannot find any crtc or sizes\n");
+
+               /* First time: disable all crtc's.. */
+               if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+                       restore_fbdev_mode(fb_helper);
                return -EAGAIN;
        }
 
index d1eb56a1eff4078d0d29b33ac2f17b06c4ae0d76..59849f02e2ad5bb74559ea85fbeb6fc1dd97bde6 100644 (file)
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
        return lessee;
 
 out_lessee:
-       drm_master_put(&lessee);
-
        mutex_unlock(&dev->mode_config.idr_mutex);
 
+       drm_master_put(&lessee);
+
        return ERR_PTR(error);
 }
 
index 61a1c8ea74bc5838b905a67e55cb80203f8ef58f..c3c79ee6119e0cbaf2e7ba0ebadd6904adefbb8d 100644 (file)
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
  */
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
+       struct drm_mm *mm = old->mm;
+
        DRM_MM_BUG_ON(!old->allocated);
 
        *new = *old;
 
        list_replace(&old->node_list, &new->node_list);
-       rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
+       rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
 
        if (drm_mm_hole_follows(old)) {
                list_replace(&old->hole_stack, &new->hole_stack);
                rb_replace_node(&old->rb_hole_size,
                                &new->rb_hole_size,
-                               &old->mm->holes_size);
+                               &mm->holes_size);
                rb_replace_node(&old->rb_hole_addr,
                                &new->rb_hole_addr,
-                               &old->mm->holes_addr);
+                               &mm->holes_addr);
        }
 
        old->allocated = false;
index cda8bfab6d3b49e3e31516ae5895b878ed4e7581..256de731361219b976146f0dc398d9a4dfa4dd9e 100644 (file)
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
        ida_init(&dev->mode_config.connector_ida);
        spin_lock_init(&dev->mode_config.connector_list_lock);
 
+       init_llist_head(&dev->mode_config.connector_free_list);
+       INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+
        drm_mode_create_standard_properties(dev);
 
        /* Just to be sure */
@@ -431,6 +434,8 @@ void drm_mode_config_cleanup(struct drm_device *dev)
                drm_connector_put(connector);
        }
        drm_connector_list_iter_end(&conn_iter);
+       /* connector_iter drops references in a work item. */
+       flush_work(&dev->mode_config.connector_free_work);
        if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
                drm_connector_list_iter_begin(dev, &conn_iter);
                drm_for_each_connector_iter(connector, &conn_iter)
index 19404e34cd592d4a19720efa8b64d3fc4854569f..37a93cdffb4ad0e7986a634df4d70ccc3fef286e 100644 (file)
@@ -1030,6 +1030,7 @@ retry:
                e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
                e->event.base.length = sizeof(e->event);
                e->event.vbl.user_data = page_flip->user_data;
+               e->event.vbl.crtc_id = crtc->base.id;
                ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
                if (ret) {
                        kfree(e);
index 09c1c4ff93ca4c00948b3104970a628b81a06893..3717b3df34a41fdc2170af50802490d4ee63ad20 100644 (file)
@@ -367,9 +367,9 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
        spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
 
-static void vblank_disable_fn(unsigned long arg)
+static void vblank_disable_fn(struct timer_list *t)
 {
-       struct drm_vblank_crtc *vblank = (void *)arg;
+       struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
        struct drm_device *dev = vblank->dev;
        unsigned int pipe = vblank->pipe;
        unsigned long irqflags;
@@ -436,8 +436,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
                vblank->dev = dev;
                vblank->pipe = i;
                init_waitqueue_head(&vblank->queue);
-               setup_timer(&vblank->disable_timer, vblank_disable_fn,
-                           (unsigned long)vblank);
+               timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
                seqlock_init(&vblank->seqlock);
        }
 
@@ -1019,7 +1018,7 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
                if (drm_vblank_offdelay == 0)
                        return;
                else if (drm_vblank_offdelay < 0)
-                       vblank_disable_fn((unsigned long)vblank);
+                       vblank_disable_fn(&vblank->disable_timer);
                else if (!dev->vblank_disable_immediate)
                        mod_timer(&vblank->disable_timer,
                                  jiffies + ((drm_vblank_offdelay * HZ)/1000));
@@ -1650,7 +1649,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
        spin_unlock_irqrestore(&dev->event_lock, irqflags);
 
        if (disable_irq)
-               vblank_disable_fn((unsigned long)vblank);
+               vblank_disable_fn(&vblank->disable_timer);
 
        return true;
 }
index 82b72425a42f7977c993134a2142434d8689227f..27e423b8726697c29acaccfdddb8e20774a32ac6 100644 (file)
@@ -37,8 +37,6 @@
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
-static struct device *exynos_drm_get_dma_device(void);
-
 int exynos_atomic_check(struct drm_device *dev,
                        struct drm_atomic_state *state)
 {
@@ -148,7 +146,7 @@ static struct drm_driver exynos_drm_driver = {
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_export       = drm_gem_prime_export,
-       .gem_prime_import       = drm_gem_prime_import,
+       .gem_prime_import       = exynos_drm_gem_prime_import,
        .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
        .gem_prime_import_sg_table      = exynos_drm_gem_prime_import_sg_table,
        .gem_prime_vmap         = exynos_drm_gem_prime_vmap,
@@ -301,6 +299,27 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
        return match ?: ERR_PTR(-ENODEV);
 }
 
+static struct device *exynos_drm_get_dma_device(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+               struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+               struct device *dev;
+
+               if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
+                       continue;
+
+               while ((dev = bus_find_device(&platform_bus_type, NULL,
+                                           &info->driver->driver,
+                                           (void *)platform_bus_type.match))) {
+                       put_device(dev);
+                       return dev;
+               }
+       }
+       return NULL;
+}
+
 static int exynos_drm_bind(struct device *dev)
 {
        struct exynos_drm_private *private;
@@ -469,27 +488,6 @@ static struct platform_driver exynos_drm_platform_driver = {
        },
 };
 
-static struct device *exynos_drm_get_dma_device(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
-               struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
-               struct device *dev;
-
-               if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
-                       continue;
-
-               while ((dev = bus_find_device(&platform_bus_type, NULL,
-                                           &info->driver->driver,
-                                           (void *)platform_bus_type.match))) {
-                       put_device(dev);
-                       return dev;
-               }
-       }
-       return NULL;
-}
-
 static void exynos_drm_unregister_devices(void)
 {
        int i;
index c6847fa708fa1d227fd09283a5d54b1f83aea19e..589d465a7f88937efaf4ab833fade659cfb46f66 100644 (file)
@@ -194,11 +194,6 @@ struct drm_exynos_file_private {
 /*
  * Exynos drm private structure.
  *
- * @da_start: start address to device address space.
- *     with iommu, device address space starts from this address
- *     otherwise default one.
- * @da_space_size: size of device address space.
- *     if 0 then default value is used for it.
  * @pending: the crtcs that have pending updates to finish
  * @lock: protect access to @pending
  * @wait: wait an atomic commit to finish
index 077de014d61017d77853c41a7e5a2d4084e092f1..11cc01b47bc0ad2990a7d419b320668d11b64696 100644 (file)
@@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
        if (IS_ERR(exynos_gem))
                return exynos_gem;
 
+       if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
+               /*
+                * when no IOMMU is available, all allocated buffers are
+                * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
+                */
+               flags &= ~EXYNOS_BO_NONCONTIG;
+               DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
+       }
+
        /* set memory type and cache attribute from user side. */
        exynos_gem->flags = flags;
 
@@ -506,6 +515,12 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 
 /* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+                                           struct dma_buf *dma_buf)
+{
+       return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
+}
+
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
index e86d1a9518c31bc4de5cbbf196303d8a6b833ce7..5a4c7de80f657a5219aedcf9edc266a8655e9d54 100644 (file)
@@ -117,6 +117,8 @@ int exynos_drm_gem_fault(struct vm_fault *vmf);
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 /* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+                                           struct dma_buf *dma_buf);
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *
 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
index 53e03f8af3d5ecd4dda4ec649a4e17017cc693c4..e6b0940b1ac273f95a12c9a747fd4097b1e3cd1c 100644 (file)
@@ -161,9 +161,9 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
        .atomic_flush = exynos_crtc_handle_event,
 };
 
-static void vidi_fake_vblank_timer(unsigned long arg)
+static void vidi_fake_vblank_timer(struct timer_list *t)
 {
-       struct vidi_context *ctx = (void *)arg;
+       struct vidi_context *ctx = from_timer(ctx, t, timer);
 
        if (drm_crtc_handle_vblank(&ctx->crtc->base))
                mod_timer(&ctx->timer,
@@ -449,7 +449,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        ctx->pdev = pdev;
 
-       setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
+       timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
 
        mutex_init(&ctx->lock);
 
index 4d1f45acf2cdbb0be6b0a9a53d55f6af6fbb28c5..127815253a84522b2e7c59f1f1af175a56b46158 100644 (file)
@@ -601,9 +601,9 @@ tda998x_reset(struct tda998x_priv *priv)
  * we have seen a HPD inactive->active transition.  This code implements
  * that delay.
  */
-static void tda998x_edid_delay_done(unsigned long data)
+static void tda998x_edid_delay_done(struct timer_list *t)
 {
-       struct tda998x_priv *priv = (struct tda998x_priv *)data;
+       struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
 
        priv->edid_delay_active = false;
        wake_up(&priv->edid_delay_waitq);
@@ -1492,8 +1492,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
        mutex_init(&priv->mutex);       /* protect the page access */
        init_waitqueue_head(&priv->edid_delay_waitq);
-       setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
-                   (unsigned long)priv);
+       timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
        INIT_WORK(&priv->detect_work, tda998x_detect_work);
 
        /* wake up the device: */
index ab19545d59a1898b4d1de5a615a6cc71723886c6..4ce2e6bd06803138a114ffde419411cd27ca8158 100644 (file)
@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
+       unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+       u32 new = *(u32 *)(p_data);
+
+       if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
+               /* We don't have rom, return size of 0. */
+               *pval = 0;
+       else
+               vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
+       return 0;
+}
+
 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
        void *p_data, unsigned int bytes)
 {
@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        }
 
        switch (rounddown(offset, 4)) {
+       case PCI_ROM_ADDRESS:
+               if (WARN_ON(!IS_ALIGNED(offset, 4)))
+                       return -EINVAL;
+               return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
+
        case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
                if (WARN_ON(!IS_ALIGNED(offset, 4)))
                        return -EINVAL;
@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
                                pci_resource_len(gvt->dev_priv->drm.pdev, 0);
        vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
                                pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+
+       memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
 }
 
 /**
index 701a3c6f16696f9615ff52d7d81b2589d31bb99f..85d4c57870fb7a2c577803d12e3c0bf219cf056f 100644 (file)
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
        struct intel_shadow_bb_entry *entry_obj;
        struct intel_vgpu *vgpu = s->vgpu;
        unsigned long gma = 0;
-       uint32_t bb_size;
+       int bb_size;
        void *dst = NULL;
        int ret = 0;
 
index 3c318439a65967366e106b9542ffa757bb540c07..355120865efd14873726e8eae2e1ec6d6fb31b9f 100644 (file)
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
                                    int type, unsigned int resolution)
 {
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 
        if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
        port->type = type;
 
        emulate_monitor_status_change(vgpu);
+       vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
        return 0;
 }
 
index 4427be18e4a93c72eaccae1fc263b044279c5fe8..940cdaaa3f2456009d5b90a5dd5595924324a993 100644 (file)
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
                goto err_unpin_mm;
        }
 
+       ret = intel_gvt_generate_request(workload);
+       if (ret) {
+               gvt_vgpu_err("fail to generate request\n");
+               goto err_unpin_mm;
+       }
+
        ret = prepare_shadow_batch_buffer(workload);
        if (ret) {
                gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
index 2801d70579d8cd3839ae904386127d4007efd3de..8e331142badbcbad4ceebbd0c0b2e2fa2fb8584c 100644 (file)
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
 
 #define GTT_HAW 46
 
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
 
 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
 {
index a5bed2e71b9260afbe5ee3db7bb6649f29b826a2..1f840f6b81bba6017e7dd897c093030d882a0799 100644 (file)
@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
        return 0;
 }
 
-static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+/**
+ * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * Ring ID on success, negative error code if failed.
+ */
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+               unsigned int offset)
 {
        enum intel_engine_id id;
        struct intel_engine_cs *engine;
 
-       reg &= ~GENMASK(11, 0);
+       offset &= ~GENMASK(11, 0);
        for_each_engine(engine, gvt->dev_priv, id) {
-               if (engine->mmio_base == reg)
+               if (engine->mmio_base == offset)
                        return id;
        }
-       return -1;
+       return -ENODEV;
 }
 
 #define offset_to_fence_num(offset) \
@@ -1381,40 +1390,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
        return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
 }
 
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
-{
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       u32 v = *(u32 *)p_data;
-
-       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
-               return intel_vgpu_default_mmio_write(vgpu,
-                               offset, p_data, bytes);
-
-       switch (offset) {
-       case 0x4ddc:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
-               break;
-       case 0x42080:
-               /* bypass WaCompressedResourceDisplayNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
-               break;
-       case 0xe194:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
-               break;
-       case 0x7014:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
@@ -1432,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       int ring_id;
+       u32 ring_base;
+
+       ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
+       /**
+        * Read HW reg in following case
+        * a. the offset isn't a ring mmio
+        * b. the offset's ring is running on hw.
+        * c. the offset is ring time stamp mmio
+        */
+       if (ring_id >= 0)
+               ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+       if (ring_id < 0 || vgpu  == gvt->scheduler.engine_owner[ring_id] ||
+           offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
+           offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
+               mmio_hw_access_pre(dev_priv);
+               vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+               mmio_hw_access_post(dev_priv);
+       }
 
-       mmio_hw_access_pre(dev_priv);
-       vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
-       mmio_hw_access_post(dev_priv);
        return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 }
 
 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
-       int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+       int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
        struct intel_vgpu_execlist *execlist;
        u32 data = *(u32 *)p_data;
        int ret = 0;
@@ -1470,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        u32 data = *(u32 *)p_data;
-       int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+       int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
        bool enable_execlist;
 
        write_vreg(vgpu, offset, p_data, bytes);
@@ -1671,8 +1664,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2557,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2607,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
-       MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+       MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
        MMIO_D(0x45504, D_SKL_PLUS);
        MMIO_D(0x45520, D_SKL_PLUS);
        MMIO_D(0x46000, D_SKL_PLUS);
index 32cd64ddad2668ed88d707c68f856f4b968bb3c4..dbc04ad2c7a1e0703f56fc8dbc515fc22b792abd 100644 (file)
@@ -65,6 +65,8 @@ struct intel_gvt_mmio_info {
        struct hlist_node node;
 };
 
+int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+               unsigned int reg);
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
 bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
 
index f6ded475bb2cc4dec19697b01e1e37c2c015f7dd..69f8f0d155b93db866facec8ed25b40709764f1c 100644 (file)
@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
        return i915_gem_context_force_single_submission(req->ctx);
 }
 
+static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+       i915_reg_t reg;
+
+       reg = RING_INSTDONE(ring_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+       reg = RING_ACTHD(ring_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+       reg = RING_ACTHD_UDW(ring_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+}
+
 static int shadow_context_status_change(struct notifier_block *nb,
                unsigned long action, void *data)
 {
@@ -140,9 +154,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        enum intel_engine_id ring_id = req->engine->id;
        struct intel_vgpu_workload *workload;
+       unsigned long flags;
 
        if (!is_gvt_request(req)) {
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (action == INTEL_CONTEXT_SCHEDULE_IN &&
                    scheduler->engine_owner[ring_id]) {
                        /* Switch ring from vGPU to host. */
@@ -150,7 +165,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
                                              NULL, ring_id);
                        scheduler->engine_owner[ring_id] = NULL;
                }
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
 
                return NOTIFY_OK;
        }
@@ -161,7 +176,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 
        switch (action) {
        case INTEL_CONTEXT_SCHEDULE_IN:
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (workload->vgpu != scheduler->engine_owner[ring_id]) {
                        /* Switch ring from host to vGPU or vGPU to vGPU. */
                        intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,13 +185,16 @@ static int shadow_context_status_change(struct notifier_block *nb,
                } else
                        gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
                                      ring_id, workload->vgpu->id);
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
                atomic_set(&workload->shadow_ctx_active, 1);
                break;
        case INTEL_CONTEXT_SCHEDULE_OUT:
-       case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+               save_ring_hw_state(workload->vgpu, ring_id);
                atomic_set(&workload->shadow_ctx_active, 0);
                break;
+       case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+               save_ring_hw_state(workload->vgpu, ring_id);
+               break;
        default:
                WARN_ON(1);
                return NOTIFY_OK;
@@ -253,7 +271,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct drm_i915_gem_request *rq;
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_ring *ring;
        int ret;
@@ -299,6 +316,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = populate_shadow_context(workload);
        if (ret)
                goto err_unpin;
+       workload->shadowed = true;
+       return 0;
+
+err_unpin:
+       engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+       release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+       return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+       int ring_id = workload->ring_id;
+       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+       struct drm_i915_gem_request *rq;
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+       int ret;
 
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
@@ -313,14 +350,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = copy_workload_to_ring_buffer(workload);
        if (ret)
                goto err_unpin;
-       workload->shadowed = true;
        return 0;
 
 err_unpin:
        engine->context_unpin(engine, shadow_ctx);
-err_shadow:
        release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
        return ret;
 }
 
@@ -723,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
        if (IS_ERR(vgpu->shadow_ctx))
                return PTR_ERR(vgpu->shadow_ctx);
 
+       if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
+               vgpu->shadow_ctx->priority = INT_MAX;
+
        vgpu->shadow_ctx->engine[RCS].initialised = true;
 
        bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
index 2d694f6c09076a31efc3165dc074312527450345..b9f872204d7e73fb48cd865641d9feee861fbbcc 100644 (file)
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
 
 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
 #endif
index 960d3d8b95b8e5d647b85b2baed7b2c5b68d83cf..2cf10d17acfbf3a3c6c6af30afb5f5a01d6103ad 100644 (file)
@@ -1714,6 +1714,7 @@ static int i915_drm_resume(struct drm_device *dev)
        intel_guc_resume(dev_priv);
 
        intel_modeset_init_hw(dev);
+       intel_init_clock_gating(dev_priv);
 
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
@@ -2618,6 +2619,8 @@ static int intel_runtime_resume(struct device *kdev)
                ret = vlv_resume_prepare(dev_priv, true);
        }
 
+       intel_uncore_runtime_resume(dev_priv);
+
        /*
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
index 3a140eedfc83079b734c39cea63b85932d002159..ad4050f7ab3b6965db1ce1b8d150036354464078 100644 (file)
@@ -4712,17 +4712,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
         * state. Fortunately, the kernel_context is disposable and we do
         * not rely on its state.
         */
-       ret = i915_gem_switch_to_kernel_context(dev_priv);
-       if (ret)
-               goto err_unlock;
+       if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+               ret = i915_gem_switch_to_kernel_context(dev_priv);
+               if (ret)
+                       goto err_unlock;
 
-       ret = i915_gem_wait_for_idle(dev_priv,
-                                    I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED);
-       if (ret && ret != -EIO)
-               goto err_unlock;
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
+               if (ret && ret != -EIO)
+                       goto err_unlock;
 
-       assert_kernel_context_is_current(dev_priv);
+               assert_kernel_context_is_current(dev_priv);
+       }
        i915_gem_contexts_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
@@ -4946,8 +4948,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-
        /*
         * We need to fallback to 4K pages since gvt gtt handling doesn't
         * support huge page entries - we will need to check either hypervisor
@@ -4967,18 +4967,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
        }
 
+       ret = i915_gem_init_userptr(dev_priv);
+       if (ret)
+               return ret;
+
        /* This is just a security blanket to placate dragons.
         * On some systems, we very sporadically observe that the first TLBs
         * used by the CS may be stale, despite us poking the TLB reset. If
         * we hold the forcewake during initialisation these problems
         * just magically go away.
         */
+       mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       ret = i915_gem_init_userptr(dev_priv);
-       if (ret)
-               goto out_unlock;
-
        ret = i915_gem_init_ggtt(dev_priv);
        if (ret)
                goto out_unlock;
index 135fc750a8375f172e130c6b45b85747535693d9..382a77a1097e735468558415d3fa860afbb8c818 100644 (file)
@@ -172,7 +172,9 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT_CACHED;
-       mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+       mn->wq = alloc_workqueue("i915-userptr-release",
+                                WQ_UNBOUND | WQ_MEM_RECLAIM,
+                                0);
        if (mn->wq == NULL) {
                kfree(mn);
                return ERR_PTR(-ENOMEM);
@@ -827,7 +829,7 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 
        dev_priv->mm.userptr_wq =
                alloc_workqueue("i915-userptr-acquire",
-                               WQ_HIGHPRI | WQ_MEM_RECLAIM,
+                               WQ_HIGHPRI | WQ_UNBOUND,
                                0);
        if (!dev_priv->mm.userptr_wq)
                return -ENOMEM;
index e2993857df37bb365225f2dcf1797d389ae73fb4..888b7d3f04c303412ce7de74d86f216a9434582d 100644 (file)
@@ -52,7 +52,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
 
        if (has_transparent_hugepage()) {
                struct super_block *sb = gemfs->mnt_sb;
-               char options[] = "huge=within_size";
+               /* FIXME: Disabled until we get W/A for read BW issue. */
+               char options[] = "huge=never";
                int flags = 0;
                int err;
 
index 68a58cce6ab1c88fb169732e323bc1fcfa9e0949..3866c49bc390ffd361f6f4e0070dffbd9120bf59 100644 (file)
@@ -2951,9 +2951,6 @@ enum i915_power_well_id {
 #define ILK_DPFC_CHICKEN       _MMIO(0x43224)
 #define   ILK_DPFC_DISABLE_DUMMY0 (1<<8)
 #define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1<<23)
-#define   GLK_SKIP_SEG_EN              (1<<12)
-#define   GLK_SKIP_SEG_COUNT_MASK      (3<<10)
-#define   GLK_SKIP_SEG_COUNT(x)                ((x)<<10)
 #define ILK_FBC_RT_BASE                _MMIO(0x2128)
 #define   ILK_FBC_RT_VALID     (1<<0)
 #define   SNB_FBC_FRONT_BUFFER (1<<1)
index 48e1ba01ccf88dba097a9e62e06c77907f01a9ce..5f8b9f1f40f19e84968c18e5fbd229731b392dea 100644 (file)
@@ -517,6 +517,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
 
        GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
        rb_erase(&wait->node, &b->waiters);
+       RB_CLEAR_NODE(&wait->node);
 
 out:
        GEM_BUG_ON(b->irq_wait == wait);
index 933c18fd4258abcb5bd543da50562e539790b1c8..e0843bb991699d0c81ec8242d1809a3e26527af8 100644 (file)
@@ -2131,6 +2131,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
        if (IS_CANNONLAKE(dev_priv)) {
                /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
                val = I915_READ(DPCLKA_CFGCR0);
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
                val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
                I915_WRITE(DPCLKA_CFGCR0, val);
 
index 878acc432a4b0c7ad3ea4774696928309756b207..e8ccf89cb17b6843b878ec0ef0641055d68e1cf8 100644 (file)
@@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
        return crtc->config->cpu_transcoder;
 }
 
-static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+                                   enum pipe pipe)
 {
        i915_reg_t reg = PIPEDSL(pipe);
        u32 line1, line2;
@@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
        msleep(5);
        line2 = I915_READ(reg) & line_mask;
 
-       return line1 == line2;
+       return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /* Wait for the display line to settle/start moving */
+       if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+               DRM_ERROR("pipe %c scanline %s wait timed out\n",
+                         pipe_name(pipe), onoff(state));
+}
+
+static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+       wait_for_pipe_scanline_moving(crtc, false);
+}
+
+static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+       wait_for_pipe_scanline_moving(crtc, true);
 }
 
 /*
@@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
-       enum pipe pipe = crtc->pipe;
 
        if (INTEL_GEN(dev_priv) >= 4) {
                i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
                                            100))
                        WARN(1, "pipe_off wait timed out\n");
        } else {
-               /* Wait for the display line to settle */
-               if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
-                       WARN(1, "pipe_off wait timed out\n");
+               intel_wait_for_pipe_scanline_stopped(crtc);
        }
 }
 
@@ -1936,15 +1955,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
        POSTING_READ(reg);
 
        /*
-        * Until the pipe starts DSL will read as 0, which would cause
-        * an apparent vblank timestamp jump, which messes up also the
-        * frame count when it's derived from the timestamps. So let's
-        * wait for the pipe to start properly before we call
-        * drm_crtc_vblank_on()
+        * Until the pipe starts PIPEDSL reads will return a stale value,
+        * which causes an apparent vblank timestamp jump when PIPEDSL
+        * resets to its proper value. That also messes up the frame count
+        * when it's derived from the timestamps. So let's wait for the
+        * pipe to start properly before we call drm_crtc_vblank_on()
         */
-       if (dev->max_vblank_count == 0 &&
-           wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
-               DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
+       if (dev->max_vblank_count == 0)
+               intel_wait_for_pipe_scanline_moving(crtc);
 }
 
 /**
@@ -14643,6 +14661,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 
 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
        DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
                      pipe_name(pipe));
 
@@ -14652,8 +14672,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
        I915_WRITE(PIPECONF(pipe), 0);
        POSTING_READ(PIPECONF(pipe));
 
-       if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
-               DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
+       intel_wait_for_pipe_scanline_stopped(crtc);
 
        I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
        POSTING_READ(DPLL(pipe));
index 7bc60c848940f95d1352ac79b7040867fa6268a3..6c7f8bca574eb4414f9a1d95894a5e10c25882b5 100644 (file)
@@ -1736,7 +1736,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
 int intel_backlight_device_register(struct intel_connector *connector);
 void intel_backlight_device_unregister(struct intel_connector *connector);
 #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
 {
        return 0;
 }
index b8af35187d226df3273380448aded0f5bc0d7c7c..ea96682568e880077fec27e489b3d345c44953ae 100644 (file)
@@ -697,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
 
        /* Due to peculiar init order wrt to hpd handling this is separate. */
        if (drm_fb_helper_initial_config(&ifbdev->helper,
-                                        ifbdev->preferred_bpp)) {
+                                        ifbdev->preferred_bpp))
                intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-               intel_fbdev_fini(to_i915(ifbdev->helper.dev));
-       }
 }
 
 void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -800,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
-       if (ifbdev)
+       if (!ifbdev)
+               return;
+
+       intel_fbdev_sync(ifbdev);
+       if (ifbdev->vma)
                drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
index 5132dc8147884f9ace0af2615f7047f63b15a9a2..4dea833f9d1b78c17239eeade593a72c88fbc166 100644 (file)
@@ -487,7 +487,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                                           crtc_state->limited_color_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
                                           HDMI_QUANTIZATION_RANGE_FULL,
-                                          intel_hdmi->rgb_quant_range_selectable);
+                                          intel_hdmi->rgb_quant_range_selectable,
+                                          is_hdmi2_sink);
 
        /* TODO: handle pixel repetition for YCBCR420 outputs */
        intel_write_infoframe(encoder, crtc_state, &frame);
index eb5827110d8ffca08cf6b46e6671265a43f40307..49fdf09f9919c8f29d85f83ee0f3f731796f7713 100644 (file)
@@ -438,7 +438,9 @@ static bool
 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
 {
        return (i + 1 < num &&
-               !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+               msgs[i].addr == msgs[i + 1].addr &&
+               !(msgs[i].flags & I2C_M_RD) &&
+               (msgs[i].len == 1 || msgs[i].len == 2) &&
                (msgs[i + 1].flags & I2C_M_RD));
 }
 
index f4a4e9496893232a6dd26ae953e45d356879328e..f0d0dbab4150a34b53721d0212bfe12ca05bf4da 100644 (file)
@@ -124,7 +124,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       u32 val;
        gen9_init_clock_gating(dev_priv);
 
        /*
@@ -144,11 +143,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
                I915_WRITE(CHICKEN_MISC_2, val);
        }
 
-       /* Display WA #1133: WaFbcSkipSegments:glk */
-       val = I915_READ(ILK_DPFC_CHICKEN);
-       val &= ~GLK_SKIP_SEG_COUNT_MASK;
-       val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
-       I915_WRITE(ILK_DPFC_CHICKEN, val);
 }
 
 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -8517,7 +8511,6 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       u32 val;
        cnp_init_clock_gating(dev_priv);
 
        /* This is not an Wa. Enable for better image quality */
@@ -8537,12 +8530,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
                I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
                           I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
                           SARBUNIT_CLKGATE_DIS);
-
-       /* Display WA #1133: WaFbcSkipSegments:cnl */
-       val = I915_READ(ILK_DPFC_CHICKEN);
-       val &= ~GLK_SKIP_SEG_COUNT_MASK;
-       val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
-       I915_WRITE(ILK_DPFC_CHICKEN, val);
 }
 
 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
index 20e3c65c0999f88ee68c09c3aa13533bbe0b1a5c..8c2ce81f01c2e922c168e98c3fdf5d2c504e0e40 100644 (file)
@@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
        i915_check_and_clear_faults(dev_priv);
 }
 
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+       iosf_mbi_register_pmic_bus_access_notifier(
+               &dev_priv->uncore.pmic_bus_access_nb);
+}
+
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
 {
        i915_modparams.enable_rc6 =
@@ -1240,8 +1246,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
                 * bus, which will be busy after this notification, leading to:
                 * "render: timed out waiting for forcewake ack request."
                 * errors.
+                *
+                * The notifier is unregistered during intel_runtime_suspend(),
+                * so it's ok to access the HW here without holding a RPM
+                * wake reference -> disable wakeref asserts for the time of
+                * the access.
                 */
+               disable_rpm_wakeref_asserts(dev_priv);
                intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+               enable_rpm_wakeref_asserts(dev_priv);
                break;
        case MBI_PMIC_BUS_ACCESS_END:
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
index 582771251b57a28122f98a8092bfd1a3211c0590..9ce079b5dd0d85d55e0aeca2b45ce54c5070bcb9 100644 (file)
@@ -134,6 +134,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
 void intel_uncore_fini(struct drm_i915_private *dev_priv);
 void intel_uncore_suspend(struct drm_i915_private *dev_priv);
 void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
index 3790fdf44a1ab7feb4429c76d8524ea50926864b..b26f07b55d861c04b45dbad8d56c4ea9cd414540 100644 (file)
@@ -49,9 +49,9 @@ void onstack_fence_fini(struct i915_sw_fence *fence)
        i915_sw_fence_fini(fence);
 }
 
-static void timed_fence_wake(unsigned long data)
+static void timed_fence_wake(struct timer_list *t)
 {
-       struct timed_fence *tf = (struct timed_fence *)data;
+       struct timed_fence *tf = from_timer(tf, t, timer);
 
        i915_sw_fence_commit(&tf->fence);
 }
@@ -60,7 +60,7 @@ void timed_fence_init(struct timed_fence *tf, unsigned long expires)
 {
        onstack_fence_init(&tf->fence);
 
-       setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+       timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
 
        if (time_after(expires, jiffies))
                mod_timer(&tf->timer, expires);
index 93c7e3f9b4a88d776959be4bf8db22c6c6ac4d97..17d2f3a1c562bcb34e132c855965d1b2ace89726 100644 (file)
@@ -133,9 +133,16 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
                        plane_disabling = true;
        }
 
-       if (plane_disabling) {
-               drm_atomic_helper_wait_for_vblanks(dev, state);
+       /*
+        * The flip done wait is only strictly required by imx-drm if a deferred
+        * plane disable is in-flight. As the core requires blocking commits
+        * to wait for the flip it is done here unconditionally. This keeps the
+        * workitem around a bit longer than required for the majority of
+        * non-blocking commits, but we accept that for the sake of simplicity.
+        */
+       drm_atomic_helper_wait_for_flip_done(dev, state);
 
+       if (plane_disabling) {
                for_each_old_plane_in_state(state, plane, old_plane_state, i)
                        ipu_plane_disable_deferred(plane);
 
index 40f4840ef98e8273c327b0f350025fa9885f5890..970c7963ae29bfd781a01ceac976abf5fe24764f 100644 (file)
@@ -82,9 +82,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
        return NULL;
 }
 
-static void a5xx_preempt_timer(unsigned long data)
+static void a5xx_preempt_timer(struct timer_list *t)
 {
-       struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+       struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
        struct msm_gpu *gpu = &a5xx_gpu->base.base;
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
@@ -300,6 +300,5 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
                }
        }
 
-       setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
-               (unsigned long) a5xx_gpu);
+       timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
 }
index 8d4477818ec216124952c142189ba4c5a9fa47d2..2322014034398110d879d6c97f7d2e5fe25c68fe 100644 (file)
@@ -353,9 +353,9 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
                        round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
 }
 
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
 {
-       struct msm_gpu *gpu = (struct msm_gpu *)data;
+       struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
@@ -703,8 +703,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        INIT_WORK(&gpu->recover_work, recover_worker);
 
 
-       setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
-                       (unsigned long)gpu);
+       timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
 
        spin_lock_init(&gpu->perf_lock);
 
index c226da145fb3cd62c3bd91ac3070850f0429f474..a349cb61961e03672c0bd23b489c7d9f1ccbbbed 100644 (file)
@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
 
 config DRM_OMAP_PANEL_DPI
        tristate "Generic DPI panel"
+       depends on BACKLIGHT_CLASS_DEVICE
        help
          Driver for generic DPI panels.
 
index daf286fc8a4082a7fc7f8f8bf6bf38ab74362c09..ca1e3b489540fe8171b7eaa1d6839963a2533e08 100644 (file)
@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
 }
 
 static const struct soc_device_attribute dpi_soc_devices[] = {
-       { .family = "OMAP3[456]*" },
-       { .family = "[AD]M37*" },
+       { .machine = "OMAP3[456]*" },
+       { .machine = "[AD]M37*" },
        { /* sentinel */ }
 };
 
index b56a05730314ffe0257d157ea27d44859829fb40..c2cf6d98e577bdf7f367042cde89ae6fd196f133 100644 (file)
@@ -4095,7 +4095,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
 }
 
 #ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
 {
        DSSERR("TE not received for 250ms!\n");
 }
@@ -5449,9 +5449,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
                             dsi_framedone_timeout_work_callback);
 
 #ifdef DSI_CATCH_MISSING_TE
-       init_timer(&dsi->te_timer);
-       dsi->te_timer.function = dsi_te_timeout;
-       dsi->te_timer.data = 0;
+       timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
 #endif
 
        dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
index d86873f2abe6a57897dd62f324eefd0322c7a669..e626eddf24d5e2231c2434a76d45ddd29067c6e4 100644 (file)
@@ -352,7 +352,7 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
 {
        const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
                         CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
-       unsigned int ret;
+       int ret;
 
        core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
                "omap4", caps, CEC_MAX_LOG_ADDRS);
index 62e451162d96f6fe4064c05bf60eaee430d15788..b06f9956e73321352eb048a6b54d54957db42f37 100644 (file)
@@ -886,25 +886,36 @@ struct hdmi4_features {
        bool audio_use_mclk;
 };
 
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
        .cts_swmode = false,
        .audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
        .cts_swmode = true,
        .audio_use_mclk = false,
 };
 
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
        .cts_swmode = true,
        .audio_use_mclk = true,
 };
 
 static const struct soc_device_attribute hdmi4_soc_devices[] = {
-       { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
-       { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
-       { .family = "OMAP4",                      .data = &hdmi4_es3_features },
+       {
+               .machine = "OMAP4430",
+               .revision = "ES1.?",
+               .data = &hdmi4430_es1_features,
+       },
+       {
+               .machine = "OMAP4430",
+               .revision = "ES2.?",
+               .data = &hdmi4430_es2_features,
+       },
+       {
+               .family = "OMAP4",
+               .data = &hdmi4_features,
+       },
        { /* sentinel */ }
 };
 
index 1dd3dafc59afd25b5048ac105eb7627bafb16c9a..c60a85e82c6d8a529aa40062fdde21354d73b505 100644 (file)
@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
                match = of_match_node(dmm_of_match, dev->dev.of_node);
                if (!match) {
                        dev_err(&dev->dev, "failed to find matching device node\n");
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto fail;
                }
 
                omap_dmm->plat_data = match->data;
index 898f9a07883043bb7d7238016b4744a2c95bc1ac..a6511918f632586372a90e430c8ac5128a44b03d 100644 (file)
@@ -5451,28 +5451,6 @@ void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
        WREG32(VM_INVALIDATE_REQUEST, 0x1);
 }
 
-static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
-{
-       int i;
-       uint32_t sh_mem_bases, sh_mem_config;
-
-       sh_mem_bases = 0x6000 | 0x6000 << 16;
-       sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
-       sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
-       mutex_lock(&rdev->srbm_mutex);
-       for (i = 8; i < 16; i++) {
-               cik_srbm_select(rdev, 0, 0, 0, i);
-               /* CP and shaders */
-               WREG32(SH_MEM_CONFIG, sh_mem_config);
-               WREG32(SH_MEM_APE1_BASE, 1);
-               WREG32(SH_MEM_APE1_LIMIT, 0);
-               WREG32(SH_MEM_BASES, sh_mem_bases);
-       }
-       cik_srbm_select(rdev, 0, 0, 0, 0);
-       mutex_unlock(&rdev->srbm_mutex);
-}
-
 /**
  * cik_pcie_gart_enable - gart enable
  *
@@ -5586,8 +5564,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
        cik_srbm_select(rdev, 0, 0, 0, 0);
        mutex_unlock(&rdev->srbm_mutex);
 
-       cik_pcie_init_compute_vmid(rdev);
-
        cik_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(rdev->mc.gtt_size >> 20),
index b15755b6129c2b3aa37e016f5d11f97da6427ba8..b1fe0639227e4bc499fb581ca494217f084a7c6f 100644 (file)
@@ -1285,8 +1285,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
                goto err_pllref;
        }
 
-       pm_runtime_enable(dev);
-
        dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
        dsi->dsi_host.dev = dev;
        ret = mipi_dsi_host_register(&dsi->dsi_host);
@@ -1301,6 +1299,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
        }
 
        dev_set_drvdata(dev, dsi);
+       pm_runtime_enable(dev);
        return 0;
 
 err_mipi_dsi_host:
index a553e182ff538b69a212ce121185d10a573966ab..3acfd576b7df894ec9dcc278fc7e02165e606d99 100644 (file)
@@ -101,9 +101,9 @@ static void psr_set_state(struct psr_drv *psr, enum psr_state state)
        spin_unlock_irqrestore(&psr->lock, flags);
 }
 
-static void psr_flush_handler(unsigned long data)
+static void psr_flush_handler(struct timer_list *t)
 {
-       struct psr_drv *psr = (struct psr_drv *)data;
+       struct psr_drv *psr = from_timer(psr, t, flush_timer);
        unsigned long flags;
 
        /* If the state has changed since we initiated the flush, do nothing */
@@ -232,7 +232,7 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
        if (!psr)
                return -ENOMEM;
 
-       setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+       timer_setup(&psr->flush_timer, psr_flush_handler, 0);
        spin_lock_init(&psr->lock);
 
        psr->active = true;
index 316f831ad5f044d99be8bcfc40db59da8e3fdc7a..44343a2bf55c65458a196b5968b0c494f1c569b0 100644 (file)
@@ -81,6 +81,7 @@ struct ttm_page_pool {
        char                    *name;
        unsigned long           nfrees;
        unsigned long           nrefills;
+       unsigned int            order;
 };
 
 /**
@@ -222,6 +223,17 @@ static struct kobj_type ttm_pool_kobj_type = {
 static struct ttm_pool_manager *_manager;
 
 #ifndef CONFIG_X86
+static int set_pages_wb(struct page *page, int numpages)
+{
+#if IS_ENABLED(CONFIG_AGP)
+       int i;
+
+       for (i = 0; i < numpages; i++)
+               unmap_page_from_agp(page++);
+#endif
+       return 0;
+}
+
 static int set_pages_array_wb(struct page **pages, int addrinarray)
 {
 #if IS_ENABLED(CONFIG_AGP)
@@ -284,13 +296,23 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
 }
 
 /* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages)
+static void ttm_pages_put(struct page *pages[], unsigned npages,
+               unsigned int order)
 {
-       unsigned i;
-       if (set_pages_array_wb(pages, npages))
-               pr_err("Failed to set %d pages to wb!\n", npages);
-       for (i = 0; i < npages; ++i)
-               __free_page(pages[i]);
+       unsigned int i, pages_nr = (1 << order);
+
+       if (order == 0) {
+               if (set_pages_array_wb(pages, npages))
+                       pr_err("Failed to set %d pages to wb!\n", npages);
+       }
+
+       for (i = 0; i < npages; ++i) {
+               if (order > 0) {
+                       if (set_pages_wb(pages[i], pages_nr))
+                               pr_err("Failed to set %d pages to wb!\n", pages_nr);
+               }
+               __free_pages(pages[i], order);
+       }
 }
 
 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -353,7 +375,7 @@ restart:
                         */
                        spin_unlock_irqrestore(&pool->lock, irq_flags);
 
-                       ttm_pages_put(pages_to_free, freed_pages);
+                       ttm_pages_put(pages_to_free, freed_pages, pool->order);
                        if (likely(nr_free != FREE_ALL_PAGES))
                                nr_free -= freed_pages;
 
@@ -388,7 +410,7 @@ restart:
        spin_unlock_irqrestore(&pool->lock, irq_flags);
 
        if (freed_pages)
-               ttm_pages_put(pages_to_free, freed_pages);
+               ttm_pages_put(pages_to_free, freed_pages, pool->order);
 out:
        if (pages_to_free != static_buf)
                kfree(pages_to_free);
@@ -412,6 +434,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        struct ttm_page_pool *pool;
        int shrink_pages = sc->nr_to_scan;
        unsigned long freed = 0;
+       unsigned int nr_free_pool;
 
        if (!mutex_trylock(&lock))
                return SHRINK_STOP;
@@ -419,12 +442,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        /* select start pool in round robin fashion */
        for (i = 0; i < NUM_POOLS; ++i) {
                unsigned nr_free = shrink_pages;
+               unsigned page_nr;
+
                if (shrink_pages == 0)
                        break;
+
                pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+               page_nr = (1 << pool->order);
                /* OK to use static buffer since global mutex is held. */
-               shrink_pages = ttm_page_pool_free(pool, nr_free, true);
-               freed += nr_free - shrink_pages;
+               nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
+               shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
+               freed += (nr_free_pool - shrink_pages) << pool->order;
+               if (freed >= sc->nr_to_scan)
+                       break;
        }
        mutex_unlock(&lock);
        return freed;
@@ -436,9 +466,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
        unsigned i;
        unsigned long count = 0;
+       struct ttm_page_pool *pool;
 
-       for (i = 0; i < NUM_POOLS; ++i)
-               count += _manager->pools[i].npages;
+       for (i = 0; i < NUM_POOLS; ++i) {
+               pool = &_manager->pools[i];
+               count += (pool->npages << pool->order);
+       }
 
        return count;
 }
@@ -510,8 +543,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
        int r = 0;
        unsigned i, j, cpages;
        unsigned npages = 1 << order;
-       unsigned max_cpages = min(count,
-                       (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+       unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
 
        /* allocate array for page caching change */
        caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
@@ -744,12 +776,14 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               if (p++ != pages[i + j])
-                                   break;
+                       if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+                               for (j = 0; j < HPAGE_PMD_NR; ++j)
+                                       if (p++ != pages[i + j])
+                                           break;
 
-                       if (j == HPAGE_PMD_NR)
-                               order = HPAGE_PMD_ORDER;
+                               if (j == HPAGE_PMD_NR)
+                                       order = HPAGE_PMD_ORDER;
+                       }
 #endif
 
                        if (page_count(pages[i]) != 1)
@@ -843,7 +877,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 #endif
        struct list_head plist;
        struct page *p = NULL;
-       unsigned count;
+       unsigned count, first;
        int r;
 
        /* No pool for cached pages */
@@ -865,23 +899,26 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 
                i = 0;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               while (npages >= HPAGE_PMD_NR) {
-                       gfp_t huge_flags = gfp_flags;
+               if (!(gfp_flags & GFP_DMA32)) {
+                       while (npages >= HPAGE_PMD_NR) {
+                               gfp_t huge_flags = gfp_flags;
 
-                       huge_flags |= GFP_TRANSHUGE;
-                       huge_flags &= ~__GFP_MOVABLE;
-                       huge_flags &= ~__GFP_COMP;
-                       p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
-                       if (!p)
-                               break;
+                               huge_flags |= GFP_TRANSHUGE;
+                               huge_flags &= ~__GFP_MOVABLE;
+                               huge_flags &= ~__GFP_COMP;
+                               p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+                               if (!p)
+                                       break;
 
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               pages[i++] = p++;
+                               for (j = 0; j < HPAGE_PMD_NR; ++j)
+                                       pages[i++] = p++;
 
-                       npages -= HPAGE_PMD_NR;
+                               npages -= HPAGE_PMD_NR;
+                       }
                }
 #endif
 
+               first = i;
                while (npages) {
                        p = alloc_page(gfp_flags);
                        if (!p) {
@@ -889,6 +926,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
                                return -ENOMEM;
                        }
 
+                       /* Swap the pages if we detect consecutive order */
+                       if (i > first && pages[i - 1] == p - 1)
+                               swap(p, pages[i - 1]);
+
                        pages[i++] = p;
                        --npages;
                }
@@ -917,8 +958,15 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
        r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
                                    npages - count, 0);
 
-       list_for_each_entry(p, &plist, lru)
-               pages[count++] = p;
+       first = count;
+       list_for_each_entry(p, &plist, lru) {
+               struct page *tmp = p;
+
+               /* Swap the pages if we detect consecutive order */
+               if (count > first && pages[count - 1] == tmp - 1)
+                       swap(tmp, pages[count - 1]);
+               pages[count++] = tmp;
+       }
 
        if (r) {
                /* If there is any pages in the list put them back to
@@ -933,7 +981,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 }
 
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
-               char *name)
+               char *name, unsigned int order)
 {
        spin_lock_init(&pool->lock);
        pool->fill_lock = false;
@@ -941,11 +989,17 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
        pool->npages = pool->nfrees = 0;
        pool->gfp_flags = flags;
        pool->name = name;
+       pool->order = order;
 }
 
 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 {
        int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       unsigned order = HPAGE_PMD_ORDER;
+#else
+       unsigned order = 0;
+#endif
 
        WARN_ON(_manager);
 
@@ -953,23 +1007,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 
        _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
-       ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+       ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
 
-       ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+       ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
 
        ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
-                                 GFP_USER | GFP_DMA32, "wc dma");
+                                 GFP_USER | GFP_DMA32, "wc dma", 0);
 
        ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
-                                 GFP_USER | GFP_DMA32, "uc dma");
+                                 GFP_USER | GFP_DMA32, "uc dma", 0);
 
        ttm_page_pool_init_locked(&_manager->wc_pool_huge,
                                  GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
-                                 "wc huge");
+                                 "wc huge", order);
 
        ttm_page_pool_init_locked(&_manager->uc_pool_huge,
                                  GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
-                                 , "uc huge");
+                                 , "uc huge", order);
 
        _manager->options.max_size = max_pages;
        _manager->options.small = SMALL_ALLOCATION;
@@ -1058,7 +1112,6 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
 }
 EXPORT_SYMBOL(ttm_pool_unpopulate);
 
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
 {
        unsigned i, j;
@@ -1129,7 +1182,6 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
        ttm_pool_unpopulate(&tt->ttm);
 }
 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-#endif
 
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
 {
index 98a6cb9f44fc84fc8d66b8f2b6e35b72745e8363..2decc8e2c79f58aad6ea7626a32a0f26665d23eb 100644 (file)
@@ -637,7 +637,8 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
        mutex_lock(&bo->madv_lock);
        switch (bo->madv) {
        case VC4_MADV_WILLNEED:
-               refcount_inc(&bo->usecnt);
+               if (!refcount_inc_not_zero(&bo->usecnt))
+                       refcount_set(&bo->usecnt, 1);
                ret = 0;
                break;
        case VC4_MADV_DONTNEED:
@@ -674,10 +675,9 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
        mutex_unlock(&bo->madv_lock);
 }
 
-static void vc4_bo_cache_time_timer(unsigned long data)
+static void vc4_bo_cache_time_timer(struct timer_list *t)
 {
-       struct drm_device *dev = (struct drm_device *)data;
-       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
 
        schedule_work(&vc4->bo_cache.time_work);
 }
@@ -1039,9 +1039,7 @@ int vc4_bo_cache_init(struct drm_device *dev)
        INIT_LIST_HEAD(&vc4->bo_cache.time_list);
 
        INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
-       setup_timer(&vc4->bo_cache.time_timer,
-                   vc4_bo_cache_time_timer,
-                   (unsigned long)dev);
+       timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
 
        return 0;
 }
index e00ac2f3a264b362e6deddff1ffb65714e30ae97..638540943c61a5e095c87be8d2b2bf543ea933b1 100644 (file)
@@ -312,10 +312,10 @@ vc4_reset_work(struct work_struct *work)
 }
 
 static void
-vc4_hangcheck_elapsed(unsigned long data)
+vc4_hangcheck_elapsed(struct timer_list *t)
 {
-       struct drm_device *dev = (struct drm_device *)data;
-       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+       struct drm_device *dev = vc4->dev;
        uint32_t ct0ca, ct1ca;
        unsigned long irqflags;
        struct vc4_exec_info *bin_exec, *render_exec;
@@ -888,8 +888,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
        /* If we got force-completed because of GPU reset rather than
         * through our IRQ handler, signal the fence now.
         */
-       if (exec->fence)
+       if (exec->fence) {
                dma_fence_signal(exec->fence);
+               dma_fence_put(exec->fence);
+       }
 
        if (exec->bo) {
                for (i = 0; i < exec->bo_count; i++) {
@@ -1154,9 +1156,7 @@ vc4_gem_init(struct drm_device *dev)
        spin_lock_init(&vc4->job_lock);
 
        INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
-       setup_timer(&vc4->hangcheck.timer,
-                   vc4_hangcheck_elapsed,
-                   (unsigned long)dev);
+       timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
 
        INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
 
index fa37a1c07cf695900b0cb5b681f727169af51a62..0b2088264039131f05c2f09bc9ba84d2d3793e39 100644 (file)
@@ -424,7 +424,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
                                           vc4_encoder->limited_rgb_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
                                           HDMI_QUANTIZATION_RANGE_FULL,
-                                          vc4_encoder->rgb_range_selectable);
+                                          vc4_encoder->rgb_range_selectable,
+                                          false);
 
        vc4_hdmi_write_infoframe(encoder, &frame);
 }
index 7d7af3a93d941bb9552afc1140c408c8da86be37..26eddbb628936b91f20a000c405bfbc536324e89 100644 (file)
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
        list_move_tail(&exec->head, &vc4->job_done_list);
        if (exec->fence) {
                dma_fence_signal_locked(exec->fence);
+               dma_fence_put(exec->fence);
                exec->fence = NULL;
        }
        vc4_submit_next_render_job(dev);
@@ -208,6 +209,9 @@ vc4_irq_postinstall(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
+       /* Undo the effects of a previous vc4_irq_uninstall. */
+       enable_irq(dev->irq);
+
        /* Enable both the render done and out of memory interrupts. */
        V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
 
@@ -225,6 +229,9 @@ vc4_irq_uninstall(struct drm_device *dev)
        /* Clear any pending interrupts we might have left. */
        V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
 
+       /* Finish any interrupt handler still in flight. */
+       disable_irq(dev->irq);
+
        cancel_work_sync(&vc4->overflow_mem_work);
 }
 
index 8fd52f211e9d9623d0225eb0b549aeac7dd31bca..b28876c222b46c6f9b314354d29fb3fd406a979b 100644 (file)
@@ -85,9 +85,9 @@ static const struct dma_fence_ops vgem_fence_ops = {
        .timeline_value_str = vgem_fence_timeline_value_str,
 };
 
-static void vgem_fence_timeout(unsigned long data)
+static void vgem_fence_timeout(struct timer_list *t)
 {
-       struct vgem_fence *fence = (struct vgem_fence *)data;
+       struct vgem_fence *fence = from_timer(fence, t, timer);
 
        dma_fence_signal(&fence->base);
 }
@@ -105,7 +105,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
        dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
                       dma_fence_context_alloc(1), 1);
 
-       setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+       timer_setup(&fence->timer, vgem_fence_timeout, 0);
 
        /* We force the fence to expire within 10s to prevent driver hangs */
        mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
index 32c9938e1e1eae51923e5953284d33632ab36581..d6e84a589ef1161241950cae10ffe8f0b8323499 100644 (file)
@@ -452,9 +452,9 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
 
 
 static void
-via_dmablit_timer(unsigned long data)
+via_dmablit_timer(struct timer_list *t)
 {
-       drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+       drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
        struct drm_device *dev = blitq->dev;
        int engine = (int)
                (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -559,8 +559,7 @@ via_init_dmablit(struct drm_device *dev)
                        init_waitqueue_head(blitq->blit_queue + j);
                init_waitqueue_head(&blitq->busy_queue);
                INIT_WORK(&blitq->wq, via_dmablit_workqueue);
-               setup_timer(&blitq->poll_timer, via_dmablit_timer,
-                               (unsigned long)blitq);
+               timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
        }
 }
 
index 07cbc70f00e7dfc8ddbfc7554f8e9c134721bc1f..eae7d52cf1a824acb32ad435f55324011aec28c1 100644 (file)
@@ -173,9 +173,9 @@ static void battery_flat(struct appleir *appleir)
        dev_err(&appleir->input_dev->dev, "possible flat battery?\n");
 }
 
-static void key_up_tick(unsigned long data)
+static void key_up_tick(struct timer_list *t)
 {
-       struct appleir *appleir = (struct appleir *)data;
+       struct appleir *appleir = from_timer(appleir, t, key_up_timer);
        struct hid_device *hid = appleir->hid;
        unsigned long flags;
 
@@ -303,8 +303,7 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
        hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
 
        spin_lock_init(&appleir->lock);
-       setup_timer(&appleir->key_up_timer,
-                   key_up_tick, (unsigned long) appleir);
+       timer_setup(&appleir->key_up_timer, key_up_tick, 0);
 
        hid_set_drvdata(hid, appleir);
 
index 49c4bd34b3c508259fae68e5714d17d7b0a83b70..87eda34ea2f86aa2abb26ac40793ad2e9df40f56 100644 (file)
@@ -239,9 +239,9 @@ drop_note:
        return;
 }
 
-static void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(struct timer_list *t)
 {
-       struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+       struct pcmidi_sustain *pms = from_timer(pms, t, timer);
 
        pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
        pms->in_use = 0;
@@ -256,8 +256,7 @@ static void init_sustain_timers(struct pcmidi_snd *pm)
                pms = &pm->sustained_notes[i];
                pms->in_use = 0;
                pms->pm = pm;
-               setup_timer(&pms->timer, pcmidi_sustained_note_release,
-                       (unsigned long)pms);
+               timer_setup(&pms->timer, pcmidi_sustained_note_release, 0);
        }
 }
 
index d00391418d1ae307ad1ceead4ca39e289a04dfac..579884ebd94db1eb685a1e9477a4dca5a2dcf674 100644 (file)
@@ -1226,9 +1226,9 @@ static void wiimote_schedule(struct wiimote_data *wdata)
        spin_unlock_irqrestore(&wdata->state.lock, flags);
 }
 
-static void wiimote_init_timeout(unsigned long arg)
+static void wiimote_init_timeout(struct timer_list *t)
 {
-       struct wiimote_data *wdata = (void*)arg;
+       struct wiimote_data *wdata = from_timer(wdata, t, timer);
 
        wiimote_schedule(wdata);
 }
@@ -1740,7 +1740,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
        wdata->state.cmd_battery = 0xff;
 
        INIT_WORK(&wdata->init_worker, wiimote_init_worker);
-       setup_timer(&wdata->timer, wiimote_init_timeout, (long)wdata);
+       timer_setup(&wdata->timer, wiimote_init_timeout, 0);
 
        return wdata;
 }
index 19f0cf37e0ed0748be1c0c5cd8cf482cb3384eec..ba0a092ae085d64e309ec9c5b19a5d80d6372a93 100644 (file)
@@ -659,22 +659,28 @@ void vmbus_close(struct vmbus_channel *channel)
                 */
                return;
        }
-       mutex_lock(&vmbus_connection.channel_mutex);
        /*
         * Close all the sub-channels first and then close the
         * primary channel.
         */
        list_for_each_safe(cur, tmp, &channel->sc_list) {
                cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
-               vmbus_close_internal(cur_channel);
                if (cur_channel->rescind) {
+                       wait_for_completion(&cur_channel->rescind_event);
+                       mutex_lock(&vmbus_connection.channel_mutex);
+                       vmbus_close_internal(cur_channel);
                        hv_process_channel_removal(
                                           cur_channel->offermsg.child_relid);
+               } else {
+                       mutex_lock(&vmbus_connection.channel_mutex);
+                       vmbus_close_internal(cur_channel);
                }
+               mutex_unlock(&vmbus_connection.channel_mutex);
        }
        /*
         * Now close the primary.
         */
+       mutex_lock(&vmbus_connection.channel_mutex);
        vmbus_close_internal(channel);
        mutex_unlock(&vmbus_connection.channel_mutex);
 }
index ec5454f3f4a698219fe42e1899537c44f8397a45..c21020b69114b18648ff83562aa97ab95b3b65b3 100644 (file)
@@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void)
                return NULL;
 
        spin_lock_init(&channel->lock);
+       init_completion(&channel->rescind_event);
 
        INIT_LIST_HEAD(&channel->sc_list);
        INIT_LIST_HEAD(&channel->percpu_list);
@@ -898,6 +899,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        /*
         * Now wait for offer handling to complete.
         */
+       vmbus_rescind_cleanup(channel);
        while (READ_ONCE(channel->probe_done) == false) {
                /*
                 * We wait here until any channel offer is currently
@@ -913,7 +915,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        if (channel->device_obj) {
                if (channel->chn_rescind_callback) {
                        channel->chn_rescind_callback(channel);
-                       vmbus_rescind_cleanup(channel);
                        return;
                }
                /*
@@ -922,7 +923,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 */
                dev = get_device(&channel->device_obj->device);
                if (dev) {
-                       vmbus_rescind_cleanup(channel);
                        vmbus_device_unregister(channel->device_obj);
                        put_device(dev);
                }
@@ -936,13 +936,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 * 2. Then close the primary channel.
                 */
                mutex_lock(&vmbus_connection.channel_mutex);
-               vmbus_rescind_cleanup(channel);
                if (channel->state == CHANNEL_OPEN_STATE) {
                        /*
                         * The channel is currently not open;
                         * it is safe for us to cleanup the channel.
                         */
                        hv_process_channel_removal(rescind->child_relid);
+               } else {
+                       complete(&channel->rescind_event);
                }
                mutex_unlock(&vmbus_connection.channel_mutex);
        }
index 5f11dc014ed619918afd11c9e7ac79fc66d77d96..e5234f953a6d16213920df252ac8ef23d857f924 100644 (file)
@@ -22,6 +22,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
 #define JC42_REG_TEMP          0x05
 #define JC42_REG_MANID         0x06
 #define JC42_REG_DEVICEID      0x07
+#define JC42_REG_SMBUS         0x22 /* NXP and Atmel, possibly others? */
 
 /* Status bits in temperature register */
 #define JC42_ALARM_CRIT_BIT    15
@@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = {
 #define GT_MANID               0x1c68  /* Giantec */
 #define GT_MANID2              0x132d  /* Giantec, 2nd mfg ID */
 
+/* SMBUS register */
+#define SMBUS_STMOUT           BIT(7)  /* SMBus time-out, active low */
+
 /* Supported chips */
 
 /* Analog Devices */
@@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        data->extended = !!(cap & JC42_CAP_RANGE);
 
+       if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+               int smbus;
+
+               /*
+                * Not all chips support this register, but from a
+                * quick read of various datasheets no chip appears
+                * incompatible with the below attempt to disable
+                * the timeout. And the whole thing is opt-in...
+                */
+               smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+               if (smbus < 0)
+                       return smbus;
+               i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+                                            smbus | SMBUS_STMOUT);
+       }
+
        config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
        if (config < 0)
                return config;
index 52a58b8b6e1bd002f6b91b17e1c06cda94a1069b..a139940cd991a39544feb1cc8f529a7c92e4bba8 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
@@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
 static long pmbus_reg2data_direct(struct pmbus_data *data,
                                  struct pmbus_sensor *sensor)
 {
-       long val = (s16) sensor->data;
-       long m, b, R;
+       s64 b, val = (s16)sensor->data;
+       s32 m, R;
 
        m = data->info->m[sensor->class];
        b = data->info->b[sensor->class];
@@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
                R--;
        }
        while (R < 0) {
-               val = DIV_ROUND_CLOSEST(val, 10);
+               val = div_s64(val + 5LL, 10L);  /* round closest */
                R++;
        }
 
-       return (val - b) / m;
+       val = div_s64(val - b, m);
+       return clamp_val(val, LONG_MIN, LONG_MAX);
 }
 
 /*
@@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
 static u16 pmbus_data2reg_direct(struct pmbus_data *data,
                                 struct pmbus_sensor *sensor, long val)
 {
-       long m, b, R;
+       s64 b, val64 = val;
+       s32 m, R;
 
        m = data->info->m[sensor->class];
        b = data->info->b[sensor->class];
@@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
                R -= 3;         /* Adjust R and b for data in milli-units */
                b *= 1000;
        }
-       val = val * m + b;
+       val64 = val64 * m + b;
 
        while (R > 0) {
-               val *= 10;
+               val64 *= 10;
                R--;
        }
        while (R < 0) {
-               val = DIV_ROUND_CLOSEST(val, 10);
+               val64 = div_s64(val64 + 5LL, 10L);  /* round closest */
                R++;
        }
 
-       return val;
+       return (u16)clamp_val(val64, S16_MIN, S16_MAX);
 }
 
 static u16 pmbus_data2reg_vid(struct pmbus_data *data,
index bd126a7c6da2aa1c16aaa5456644198cacfb5222..7da75644c7507c1f6161ea784a318b1aef76aac4 100644 (file)
@@ -42,9 +42,11 @@ static struct stm_ftrace {
  * @len:       length of the data packet
  */
 static void notrace
-stm_ftrace_write(const void *buf, unsigned int len)
+stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
 {
-       stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+       struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
+
+       stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
 }
 
 static int stm_ftrace_link(struct stm_source_data *data)
index 0d05dadb2dc58a1d8599869cab82baed63ad165c..44cffad43701f4839096bbde5c5937ee22cce135 100644 (file)
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct platform_device_id cht_wc_i2c_adap_id_table[] = {
+static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
        { .name = "cht_wcove_ext_chgr" },
        {},
 };
index 9e12a53ef7b8cf2cdccf9de473af8e2cec9c5f36..8eac00efadc1ad8f8e477094e26790a1ec317117 100644 (file)
@@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        /* Default timeout in interrupt mode: 200 ms */
        priv->adapter.timeout = HZ / 5;
 
+       if (dev->irq == IRQ_NOTCONNECTED)
+               priv->features &= ~FEATURE_IRQ;
+
        if (priv->features & FEATURE_IRQ) {
                u16 pcictl, pcists;
 
index 174579d32e5f39ecdc44d2c230b55fbfb5d073e2..462948e2c5354e64a09fa769eb1519f9d421d629 100644 (file)
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
 
        if (adapdata->smba) {
                i2c_del_adapter(adap);
-               if (adapdata->port == (0 << 1)) {
+               if (adapdata->port == (0 << piix4_port_shift_sb800)) {
                        release_region(adapdata->smba, SMBIOSIZE);
                        if (adapdata->sb800_main)
                                release_region(SB800_PIIX4_SMB_IDX, 2);
index dab51761f8c52b0aab12e4b49334aa1607d0a7a0..d4f9cef251acf457f1dba743e141dadc7dc59d1b 100644 (file)
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * i2c-stm32.h
  *
  * Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #ifndef _I2C_STM32_H
index 4ec108496f15cdf5dcedd3bad1b368cac5482bd8..47c8d00de53f95377e857633035118ba9ed5be31 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for STMicroelectronics STM32 I2C controller
  *
@@ -6,11 +7,11 @@
  * http://www.st.com/resource/en/reference_manual/DM00031020.pdf
  *
  * Copyright (C) M'boumba Cedric Madianga 2016
+ * Copyright (C) STMicroelectronics 2017
  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  *
  * This driver is based on i2c-st.c
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
 
 #include <linux/clk.h>
index d4a6e9c2e9aaeaa679bb159ade420bcdb37e6988..b445b3bb0bb11fe262363042a2eb56e458f6ffc2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for STMicroelectronics STM32F7 I2C controller
  *
@@ -7,11 +8,11 @@
  * http://www.st.com/resource/en/reference_manual/dm00124865.pdf
  *
  * Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  *
  * This driver is based on i2c-stm32f4.c
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
 #include <linux/clk.h>
 #include <linux/delay.h>
index 31186ead5a40717dc491753eb1ebd37f7a1b0e52..509a6007cdf659129917d4dd156983509bc3353b 100644 (file)
@@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
                                        property_entries_dup(info->properties);
                        if (IS_ERR(devinfo->board_info.properties)) {
                                status = PTR_ERR(devinfo->board_info.properties);
+                               kfree(devinfo);
                                break;
                        }
                }
@@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
                                        GFP_KERNEL);
                        if (!devinfo->board_info.resources) {
                                status = -ENOMEM;
+                               kfree(devinfo);
                                break;
                        }
                }
index 3576ec73ec232a9b33538377bf9a924e743e26bf..9ad60421d360539db31636b65baf42fe4bf67824 100644 (file)
@@ -1011,7 +1011,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, indio_dev);
 
        ddata->irq = platform_get_irq_byname(pdev, "adcdone");
-       if (!ddata->irq)
+       if (ddata->irq < 0)
                return -ENODEV;
 
        error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
index 9c6932ffc0afdbc66b3dd1cb7032adf84fb8720e..36047147ce7c727003a0f08df34880302ebd2e08 100644 (file)
@@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel {
 
 struct meson_sar_adc_data {
        bool                                    has_bl30_integration;
+       u32                                     bandgap_reg;
        unsigned int                            resolution;
        const char                              *name;
+       const struct regmap_config              *regmap_config;
 };
 
 struct meson_sar_adc_priv {
@@ -242,13 +244,20 @@ struct meson_sar_adc_priv {
        int                                     calibscale;
 };
 
-static const struct regmap_config meson_sar_adc_regmap_config = {
+static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
        .reg_bits = 8,
        .val_bits = 32,
        .reg_stride = 4,
        .max_register = MESON_SAR_ADC_REG13,
 };
 
+static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
+       .reg_bits = 8,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = MESON_SAR_ADC_DELTA_10,
+};
+
 static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
 {
        struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -600,7 +609,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
        init.num_parents = 1;
 
        priv->clk_gate.reg = base + MESON_SAR_ADC_REG3;
-       priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN);
+       priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
        priv->clk_gate.hw.init = &init;
 
        priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
@@ -685,6 +694,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
        return 0;
 }
 
+static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
+{
+       struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+       u32 enable_mask;
+
+       if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
+               enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
+       else
+               enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
+
+       regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
+                          on_off ? enable_mask : 0);
+}
+
 static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
 {
        struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -717,9 +740,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
        regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
                           MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN);
+
+       meson_sar_adc_set_bandgap(indio_dev, true);
+
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN,
                           MESON_SAR_ADC_REG3_ADC_EN);
@@ -739,8 +762,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
 err_adc_clk:
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN, 0);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+       meson_sar_adc_set_bandgap(indio_dev, false);
        clk_disable_unprepare(priv->sana_clk);
 err_sana_clk:
        clk_disable_unprepare(priv->core_clk);
@@ -765,8 +787,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
 
        regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
                           MESON_SAR_ADC_REG3_ADC_EN, 0);
-       regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
-                          MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
+
+       meson_sar_adc_set_bandgap(indio_dev, false);
 
        clk_disable_unprepare(priv->sana_clk);
        clk_disable_unprepare(priv->core_clk);
@@ -844,30 +866,40 @@ static const struct iio_info meson_sar_adc_iio_info = {
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
        .has_bl30_integration = false,
+       .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+       .regmap_config = &meson_sar_adc_regmap_config_meson8,
        .resolution = 10,
        .name = "meson-meson8-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
        .has_bl30_integration = false,
+       .bandgap_reg = MESON_SAR_ADC_DELTA_10,
+       .regmap_config = &meson_sar_adc_regmap_config_meson8,
        .resolution = 10,
        .name = "meson-meson8b-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 10,
        .name = "meson-gxbb-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 12,
        .name = "meson-gxl-saradc",
 };
 
 static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
        .has_bl30_integration = true,
+       .bandgap_reg = MESON_SAR_ADC_REG11,
+       .regmap_config = &meson_sar_adc_regmap_config_gxbb,
        .resolution = 12,
        .name = "meson-gxm-saradc",
 };
@@ -945,7 +977,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
                return ret;
 
        priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
-                                            &meson_sar_adc_regmap_config);
+                                            priv->data->regmap_config);
        if (IS_ERR(priv->regmap))
                return PTR_ERR(priv->regmap);
 
index ea7adb638d99a71c31366ee89361fd1618e76ed6..2ba2ff5e59c47c125af52e86c32badba3d633249 100644 (file)
@@ -175,9 +175,9 @@ static void ssp_wdt_work_func(struct work_struct *work)
        data->timeout_cnt = 0;
 }
 
-static void ssp_wdt_timer_func(unsigned long ptr)
+static void ssp_wdt_timer_func(struct timer_list *t)
 {
-       struct ssp_data *data = (struct ssp_data *)ptr;
+       struct ssp_data *data = from_timer(data, t, wdt_timer);
 
        switch (data->fw_dl_state) {
        case SSP_FW_DL_STATE_FAIL:
@@ -571,7 +571,7 @@ static int ssp_probe(struct spi_device *spi)
        INIT_WORK(&data->work_wdt, ssp_wdt_work_func);
        INIT_DELAYED_WORK(&data->work_refresh, ssp_refresh_task);
 
-       setup_timer(&data->wdt_timer, ssp_wdt_timer_func, (unsigned long)data);
+       timer_setup(&data->wdt_timer, ssp_wdt_timer_func, 0);
 
        ret = request_threaded_irq(data->spi->irq, NULL,
                                   ssp_irq_thread_fn,
index 203ffb9cad6a2a3a98ac1eaf71951039303099ea..147a8c14235f3f8b39302c8f78beec811f6871dd 100644 (file)
@@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
                mutex_unlock(&indio_dev->mlock);
                break;
        case IIO_CHAN_INFO_SCALE:
-               *val = 1;  /* 0.0625 */
+               *val = 1000;  /* 62.5 */
                *val2 = 16;
                ret = IIO_VAL_FRACTIONAL;
                break;
index 9c4cfd19b7398677a8b1ac3c0fee5985160880e0..2f0998ebeed214dc0f062664595e1d4362d27e3e 100644 (file)
@@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
  * iio_format_value() - Formats a IIO value into its string representation
  * @buf:       The buffer to which the formatted value gets written
  *             which is assumed to be big enough (i.e. PAGE_SIZE).
- * @type:      One of the IIO_VAL_... constants. This decides how the val
+ * @type:      One of the IIO_VAL_* constants. This decides how the val
  *             and val2 parameters are formatted.
  * @size:      Number of IIO value entries contained in vals
  * @vals:      Pointer to the values, exact meaning depends on the
@@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
  *
  * Return: 0 by default, a negative number on failure or the
  *        total number of characters written for a type that belongs
- *        to the IIO_VAL_... constant.
+ *        to the IIO_VAL_* constant.
  */
 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
 {
index 53c5d653e7809560ea7393f249e4d15a7e22bce0..df23dbcc030aea5b2d6e0ddde7237b4d906bf365 100644 (file)
@@ -869,6 +869,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev)
 static void sx9500_gpio_probe(struct i2c_client *client,
                              struct sx9500_data *data)
 {
+       struct gpio_desc *gpiod_int;
        struct device *dev;
 
        if (!client)
@@ -876,6 +877,14 @@ static void sx9500_gpio_probe(struct i2c_client *client,
 
        dev = &client->dev;
 
+       if (client->irq <= 0) {
+               gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN);
+               if (IS_ERR(gpiod_int))
+                       dev_err(dev, "gpio get irq failed\n");
+               else
+                       client->irq = gpiod_to_irq(gpiod_int);
+       }
+
        data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
        if (IS_ERR(data->gpiod_rst)) {
                dev_warn(dev, "gpio get reset pin failed\n");
index 98ac46ed7214f574fbe13d5f617b9f2b0836bc40..cbf186522016f97f3024ac84bdac4330ddf1a26f 100644 (file)
@@ -1,6 +1,6 @@
 menuconfig INFINIBAND
        tristate "InfiniBand support"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAS_DMA
        depends on NET
        depends on INET
        depends on m || IPV6 != m
index 1fdb473b5df7be38c09ecd49121d516b4893ef13..6294a7001d33bee54b4c516fcfa5faff7556b28b 100644 (file)
@@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
        INIT_LIST_HEAD(&id_priv->mc_list);
        get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
        id_priv->id.route.addr.dev_addr.net = get_net(net);
+       id_priv->seq_num &= 0x00ffffff;
 
        return &id_priv->id;
 }
@@ -4457,7 +4458,7 @@ out:
        return skb->len;
 }
 
-static const struct rdma_nl_cbs cma_cb_table[] = {
+static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
        [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
 };
 
index 84fc32a2c8b3e8fc4dfed5be20cbea0921ee737e..30914f3baa5f1ee5b43695c96f56c54b8ce2a2df 100644 (file)
@@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
 }
 EXPORT_SYMBOL(ib_get_net_dev_by_params);
 
-static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
+static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
        [RDMA_NL_LS_OP_RESOLVE] = {
                .doit = ib_nl_handle_resolve_resp,
                .flags = RDMA_NL_ADMIN_PERM,
@@ -1253,5 +1253,5 @@ static void __exit ib_core_cleanup(void)
 
 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
 
-module_init(ib_core_init);
+subsys_initcall(ib_core_init);
 module_exit(ib_core_cleanup);
index e9e189ec7502ca90ccdb25a58ade3bb0167ee731..5d676cff41f496ce519f4dc000eda17f6fd43999 100644 (file)
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
 }
 EXPORT_SYMBOL(iwcm_reject_msg);
 
-static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
+static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
        [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
        [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
        [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
index 2fae850a3eff6a92703aed33975af9b1e0fc3835..9a05245a1acf4a7fd010fb7f038e84d7bbf5003e 100644 (file)
@@ -303,7 +303,7 @@ out:        cb->args[0] = idx;
        return skb->len;
 }
 
-static const struct rdma_nl_cbs nldev_cb_table[] = {
+static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
        [RDMA_NLDEV_CMD_GET] = {
                .doit = nldev_get_doit,
                .dump = nldev_get_dumpit,
index 23278ed5be4517fd42d8bba6eb07ccd485e5f7f6..feafdb961c485c61e3842d6a946d83b1bf7176b8 100644 (file)
@@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
 
 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
 {
+       u8 i = rdma_start_port(dev);
+       bool is_ib = false;
        int ret;
 
+       while (i <= rdma_end_port(dev) && !is_ib)
+               is_ib = rdma_protocol_ib(dev, i++);
+
+       /* If this isn't an IB device don't create the security context */
+       if (!is_ib)
+               return 0;
+
        qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
        if (!qp->qp_sec)
                return -ENOMEM;
@@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
 
 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
 {
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        mutex_lock(&sec->mutex);
 
        /* Remove the QP from the lists so it won't get added to
@@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
        int ret;
        int i;
 
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        /* If a concurrent cache update is in progress this
         * QP security could be marked for an error state
         * transition.  Wait for this to complete.
@@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
 {
        int i;
 
+       /* Return if not IB */
+       if (!sec)
+               return;
+
        /* If a concurrent cache update is occurring we must
         * wait until this QP security structure is processed
         * in the QP to error flow before destroying it because
@@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
 {
        int ret = 0;
        struct ib_ports_pkeys *tmp_pps;
-       struct ib_ports_pkeys *new_pps;
+       struct ib_ports_pkeys *new_pps = NULL;
        struct ib_qp *real_qp = qp->real_qp;
        bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
                           real_qp->qp_type == IB_QPT_GSI ||
@@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
        bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
                           (qp_attr_mask & IB_QP_ALT_PATH));
 
+       WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
+                  rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
+                  !real_qp->qp_sec),
+                  "%s: QP security is not initialized for IB QP: %d\n",
+                  __func__, real_qp->qp_num);
+
        /* The port/pkey settings are maintained only for the real QP. Open
         * handles on the real QP will be in the shared_qp_list. When
         * enforcing security on the real QP all the shared QPs will be
         * checked as well.
         */
 
-       if (pps_change && !special_qp) {
+       if (pps_change && !special_qp && real_qp->qp_sec) {
                mutex_lock(&real_qp->qp_sec->mutex);
                new_pps = get_new_pps(real_qp,
                                      qp_attr,
                                      qp_attr_mask);
-
+               if (!new_pps) {
+                       mutex_unlock(&real_qp->qp_sec->mutex);
+                       return -ENOMEM;
+               }
                /* Add this QP to the lists for the new port
                 * and pkey settings before checking for permission
                 * in case there is a concurrent cache update
@@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
                                                 qp_attr_mask,
                                                 udata);
 
-       if (pps_change && !special_qp) {
+       if (new_pps) {
                /* Clean up the lists and free the appropriate
                 * ports_pkeys structure.
                 */
@@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
        u16 pkey;
        int ret;
 
+       if (!rdma_protocol_ib(dev, port_num))
+               return 0;
+
        ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
        if (ret)
                return ret;
@@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 {
        int ret;
 
+       if (!rdma_protocol_ib(agent->device, agent->port_num))
+               return 0;
+
        ret = security_ib_alloc_security(&agent->security);
        if (ret)
                return ret;
@@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
 
 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 {
+       if (!rdma_protocol_ib(agent->device, agent->port_num))
+               return;
+
        security_ib_free_security(agent->security);
        if (agent->lsm_nb_reg)
                unregister_lsm_notifier(&agent->lsm_nb);
@@ -697,8 +736,14 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
 
 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
 {
-       if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
-               return -EACCES;
+       if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
+               return 0;
+
+       if (map->agent.qp->qp_type == IB_QPT_SMI) {
+               if (!map->agent.smp_allowed)
+                       return -EACCES;
+               return 0;
+       }
 
        return ib_security_pkey_access(map->agent.device,
                                       map->agent.port_num,
index 21e60b1e2ff41b1c27e98ebad68e5f4b0ccb7f42..130606c3b07c15f03e5481b1cf22831a7c9a8e85 100644 (file)
@@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        sg_list_start = umem->sg_head.sgl;
 
        while (npages) {
-               ret = get_user_pages(cur_base,
+               ret = get_user_pages_longterm(cur_base,
                                     min_t(unsigned long, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
                                     gup_flags, page_list, vma_list);
index 16d55710b1162aa11ca2d8ed3dd1c1690f60acc8..d0202bb176a4a6a826b27f2b4327691e334ad4ea 100644 (file)
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
                goto release_qp;
        }
 
+       if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
+           !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
+               ret = -EINVAL;
+               goto release_qp;
+       }
+
        attr->qp_state            = cmd->base.qp_state;
        attr->cur_qp_state        = cmd->base.cur_qp_state;
        attr->path_mtu            = cmd->base.path_mtu;
index ea55e95cd2c5df33bf9de567eb685fae3cbaea1c..b7bfc536e00fd8c7b241c0f56539d1394d235acf 100644 (file)
@@ -395,6 +395,11 @@ next_cqe:
 
 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 {
+       if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
+               WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
+               return 0;
+       }
+
        if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
                return 0;
 
index 5ee7fe433136bc22dc7a55ce9bad4930ee49ae1d..38bddd02a9437470e0f3ed98a7e55afbc8cc7384 100644 (file)
@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
        qhp = to_c4iw_qp(ibqp);
        spin_lock_irqsave(&qhp->lock, flag);
-       if (t4_wq_in_error(&qhp->wq)) {
+
+       /*
+        * If the qp has been flushed, then just insert a special
+        * drain cqe.
+        */
+       if (qhp->wq.flushed) {
                spin_unlock_irqrestore(&qhp->lock, flag);
                complete_sq_drain_wr(qhp, wr);
                return err;
@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
        qhp = to_c4iw_qp(ibqp);
        spin_lock_irqsave(&qhp->lock, flag);
-       if (t4_wq_in_error(&qhp->wq)) {
+
+       /*
+        * If the qp has been flushed, then just insert a special
+        * drain cqe.
+        */
+       if (qhp->wq.flushed) {
                spin_unlock_irqrestore(&qhp->lock, flag);
                complete_rq_drain_wr(qhp, wr);
                return err;
@@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
        spin_unlock_irqrestore(&rchp->lock, flag);
 
        if (schp == rchp) {
-               if (t4_clear_cq_armed(&rchp->cq) &&
-                   (rq_flushed || sq_flushed)) {
+               if ((rq_flushed || sq_flushed) &&
+                   t4_clear_cq_armed(&rchp->cq)) {
                        spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                        (*rchp->ibcq.comp_handler)(&rchp->ibcq,
                                                   rchp->ibcq.cq_context);
                        spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
                }
        } else {
-               if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+               if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
                        spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                        (*rchp->ibcq.comp_handler)(&rchp->ibcq,
                                                   rchp->ibcq.cq_context);
                        spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
                }
-               if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+               if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
                        spin_lock_irqsave(&schp->comp_handler_lock, flag);
                        (*schp->ibcq.comp_handler)(&schp->ibcq,
                                                   schp->ibcq.cq_context);
index fd01a760259fa1887d2f233a2fcbb3ee581e6f5f..af5f7936f7e5ed9eac26598a76fa31e5f5155046 100644 (file)
@@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
        struct hfi1_16b_header *hdr = &opa_hdr->opah;
        struct ib_other_headers *ohdr;
-       u32 bth0, bth1;
+       u32 bth0, bth1 = 0;
        u16 len, pkey;
        u8 becn = !!is_fecn;
        u8 l4 = OPA_16B_L4_IB_LOCAL;
index 3e4c5253ab5c23d5cd9d7b8789f38fef205b02f2..a40ec939ece58236cfcbbf285fb2a389b23ccadc 100644 (file)
@@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
 {
        int i;
        struct device *dev = hr_dev->dev;
-       u32 bits_per_long = BITS_PER_LONG;
 
        if (buf->nbufs == 1) {
                dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
        } else {
-               if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
-                       vunmap(buf->direct.buf);
-
                for (i = 0; i < buf->nbufs; ++i)
                        if (buf->page_list[i].buf)
                                dma_free_coherent(dev, 1 << buf->page_shift,
@@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 {
        int i = 0;
        dma_addr_t t;
-       struct page **pages;
        struct device *dev = hr_dev->dev;
-       u32 bits_per_long = BITS_PER_LONG;
        u32 page_size = 1 << page_shift;
        u32 order;
 
@@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
                        buf->page_list[i].map = t;
                        memset(buf->page_list[i].buf, 0, page_size);
                }
-               if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
-                       pages = kmalloc_array(buf->nbufs, sizeof(*pages),
-                                             GFP_KERNEL);
-                       if (!pages)
-                               goto err_free;
-
-                       for (i = 0; i < buf->nbufs; ++i)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
-                                              PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
-               } else {
-                       buf->direct.buf = NULL;
-               }
        }
 
        return 0;
index 01d3d695cbba1f2926929f0b59b47691cb08dd18..b154ce40cded846676feba3cabfdbd5240ad0b5a 100644 (file)
@@ -726,11 +726,9 @@ static inline struct hns_roce_qp
 
 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
 {
-       u32 bits_per_long_val = BITS_PER_LONG;
        u32 page_size = 1 << buf->page_shift;
 
-       if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
-           buf->nbufs == 1)
+       if (buf->nbufs == 1)
                return (char *)(buf->direct.buf) + offset;
        else
                return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
index 8b733a66fae5f27da8c3b183ba8e4b0ad60d018d..0eeabfbee192efed31c46d948ee6db264d1085fd 100644 (file)
@@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
                        sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
                        chunk->npages = 0;
                        chunk->nsg = 0;
+                       memset(chunk->buf, 0, sizeof(chunk->buf));
                        list_add_tail(&chunk->list, &hem->chunk_list);
                }
 
@@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
                if (!buf)
                        goto fail;
 
-               sg_set_buf(mem, buf, PAGE_SIZE << order);
-               WARN_ON(mem->offset);
+               chunk->buf[chunk->npages] = buf;
                sg_dma_len(mem) = PAGE_SIZE << order;
 
                ++chunk->npages;
@@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
        list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
                for (i = 0; i < chunk->npages; ++i)
                        dma_free_coherent(hr_dev->dev,
-                                  chunk->mem[i].length,
-                                  lowmem_page_address(sg_page(&chunk->mem[i])),
+                                  sg_dma_len(&chunk->mem[i]),
+                                  chunk->buf[i],
                                   sg_dma_address(&chunk->mem[i]));
                kfree(chunk);
        }
@@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
        struct hns_roce_hem_chunk *chunk;
        struct hns_roce_hem_mhop mhop;
        struct hns_roce_hem *hem;
-       struct page *page = NULL;
+       void *addr = NULL;
        unsigned long mhop_obj = obj;
        unsigned long obj_per_chunk;
        unsigned long idx_offset;
        int offset, dma_offset;
+       int length;
        int i, j;
        u32 hem_idx = 0;
 
@@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
 
        list_for_each_entry(chunk, &hem->chunk_list, list) {
                for (i = 0; i < chunk->npages; ++i) {
+                       length = sg_dma_len(&chunk->mem[i]);
                        if (dma_handle && dma_offset >= 0) {
-                               if (sg_dma_len(&chunk->mem[i]) >
-                                   (u32)dma_offset)
+                               if (length > (u32)dma_offset)
                                        *dma_handle = sg_dma_address(
                                                &chunk->mem[i]) + dma_offset;
-                               dma_offset -= sg_dma_len(&chunk->mem[i]);
+                               dma_offset -= length;
                        }
 
-                       if (chunk->mem[i].length > (u32)offset) {
-                               page = sg_page(&chunk->mem[i]);
+                       if (length > (u32)offset) {
+                               addr = chunk->buf[i] + offset;
                                goto out;
                        }
-                       offset -= chunk->mem[i].length;
+                       offset -= length;
                }
        }
 
 out:
        mutex_unlock(&table->mutex);
-       return page ? lowmem_page_address(page) + offset : NULL;
+       return addr;
 }
 EXPORT_SYMBOL_GPL(hns_roce_table_find);
 
index db66db12075e2b42151fbc24d148615e0841b8f9..e8850d59e7804caa45dd5e2cd77b140c7bfd7047 100644 (file)
@@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
        int                      npages;
        int                      nsg;
        struct scatterlist       mem[HNS_ROCE_HEM_CHUNK_LEN];
+       void                     *buf[HNS_ROCE_HEM_CHUNK_LEN];
 };
 
 struct hns_roce_hem {
index 8f719c00467b833e15a507e522ac05c7857c5940..8e18445714a96db307d6e4cdda09117333c567ae 100644 (file)
@@ -1126,9 +1126,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 {
        struct hns_roce_v2_mpt_entry *mpt_entry;
        struct scatterlist *sg;
+       u64 page_addr;
        u64 *pages;
+       int i, j;
+       int len;
        int entry;
-       int i;
 
        mpt_entry = mb_buf;
        memset(mpt_entry, 0, sizeof(*mpt_entry));
@@ -1186,14 +1188,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 
        i = 0;
        for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
-               pages[i] = ((u64)sg_dma_address(sg)) >> 6;
-
-               /* Record the first 2 entry directly to MTPT table */
-               if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
-                       break;
-               i++;
+               len = sg_dma_len(sg) >> PAGE_SHIFT;
+               for (j = 0; j < len; ++j) {
+                       page_addr = sg_dma_address(sg) +
+                                   (j << mr->umem->page_shift);
+                       pages[i] = page_addr >> 6;
+
+                       /* Record the first 2 entry directly to MTPT table */
+                       if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+                               goto found;
+                       i++;
+               }
        }
 
+found:
        mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
        roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
                       V2_MPT_BYTE_56_PA0_H_S,
index 493d6ef3d2d57e4f1e020fd680ab4fc009f8cb7e..77870f9e173684d91f99a5a0627d0490b14342e9 100644 (file)
@@ -1043,7 +1043,7 @@ negotiate_done:
  * i40iw_schedule_cm_timer
  * @@cm_node: connection's node
  * @sqbuf: buffer to send
- * @type: if it es send ot close
+ * @type: if it is send or close
  * @send_retrans: if rexmits to be done
  * @close_when_complete: is cm_node to be removed
  *
@@ -1067,7 +1067,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
 
        new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
        if (!new_send) {
-               i40iw_free_sqbuf(vsi, (void *)sqbuf);
+               if (type != I40IW_TIMER_TYPE_CLOSE)
+                       i40iw_free_sqbuf(vsi, (void *)sqbuf);
                return -ENOMEM;
        }
        new_send->retrycount = I40IW_DEFAULT_RETRYS;
@@ -1082,7 +1083,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
                new_send->timetosend += (HZ / 10);
                if (cm_node->close_entry) {
                        kfree(new_send);
-                       i40iw_free_sqbuf(vsi, (void *)sqbuf);
                        i40iw_pr_err("already close entry\n");
                        return -EINVAL;
                }
@@ -2947,8 +2947,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
                        loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
                        cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
                        loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
-                       loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
-                       i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
                }
                return cm_node;
        }
@@ -3689,11 +3687,16 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        cm_id->add_ref(cm_id);
        i40iw_add_ref(&iwqp->ibqp);
 
-       i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
-
        attr.qp_state = IB_QPS_RTS;
        cm_node->qhash_set = false;
        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+       cm_node->accelerated = 1;
+       status =
+               i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+       if (status)
+               i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
+
        if (cm_node->loopbackpartner) {
                cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
 
@@ -3704,7 +3707,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
        }
 
-       cm_node->accelerated = 1;
        if (cm_node->accept_pend) {
                atomic_dec(&cm_node->listener->pend_accepts_cnt);
                cm_node->accept_pend = 0;
@@ -3864,6 +3866,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                        goto err;
        }
 
+       if (cm_node->loopbackpartner) {
+               cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
+               i40iw_create_event(cm_node->loopbackpartner,
+                                  I40IW_CM_EVENT_MPA_REQ);
+       }
+
        i40iw_debug(cm_node->dev,
                    I40IW_DEBUG_CM,
                    "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
@@ -4044,9 +4052,6 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
        dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
        if (iwqp->page)
                kunmap(iwqp->page);
-       status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
-       if (status)
-               i40iw_pr_err("send cm event\n");
 
        memset(&attr, 0, sizeof(attr));
        attr.qp_state = IB_QPS_RTS;
@@ -4054,6 +4059,10 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
 
        cm_node->accelerated = 1;
+       status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+                                    0);
+       if (status)
+               i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
 
        return;
 
index d88c6cf47cf275565ea98aba66f9638387db702c..da9821a10e0dfe8e3cb2fcede8f34b92f8a8f41e 100644 (file)
@@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
 
        ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
                                          &cqp->sdbuf,
-                                         128,
+                                         I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
                                          I40IW_SD_BUF_ALIGNMENT);
 
        if (ret_code)
@@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
 }
 
 /**
- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
- * @cqp: struct for cqp hw
- * @wqe_idx: we index of cqp ring
+ * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
+ * @cqp: pointer to CQP structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index for next WQE on CQP SQ
  */
-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
+                                              u64 scratch, u32 *wqe_idx)
 {
        u64 *wqe = NULL;
-       u32     wqe_idx;
        enum i40iw_status_code ret_code;
 
        if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
@@ -616,20 +617,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
                            cqp->sq_ring.size);
                return NULL;
        }
-       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
        cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
        if (ret_code)
                return NULL;
-       if (!wqe_idx)
+       if (!*wqe_idx)
                cqp->polarity = !cqp->polarity;
 
-       wqe = cqp->sq_base[wqe_idx].elem;
-       cqp->scratch_array[wqe_idx] = scratch;
+       wqe = cqp->sq_base[*wqe_idx].elem;
+       cqp->scratch_array[*wqe_idx] = scratch;
        I40IW_CQP_INIT_WQE(wqe);
 
        return wqe;
 }
 
+/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+       u32 wqe_idx;
+
+       return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+
 /**
  * i40iw_sc_cqp_destroy - destroy cqp during close
  * @cqp: struct for cqp hw
@@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
        u64 *wqe;
        int mem_entries, wqe_entries;
        struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+       u64 offset;
+       u32 wqe_idx;
 
-       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
        if (!wqe)
                return I40IW_ERR_RING_FULL;
 
@@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
                 LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
 
        if (mem_entries) {
-               memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
-               data = sdbuf->pa;
+               offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
+               memcpy((char *)sdbuf->va + offset, &info->entry[3],
+                      mem_entries << 4);
+               data = (u64)sdbuf->pa + offset;
        } else {
                data = 0;
        }
index 65ec39e3746b42fe351e3c8efaf1fc4e9960f3fa..029083cb81d53d1809c3ea0967fa95a2230d3345 100644 (file)
 #define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
 
 #define I40IWQPC_ARPIDX_SHIFT 48
-#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
+#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
 
 #define I40IWQPC_FLOWLABEL_SHIFT 0
 #define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
@@ -1526,7 +1526,7 @@ enum i40iw_alignment {
        I40IW_AEQ_ALIGNMENT =           0x100,
        I40IW_CEQ_ALIGNMENT =           0x100,
        I40IW_CQ0_ALIGNMENT =           0x100,
-       I40IW_SD_BUF_ALIGNMENT =        0x100
+       I40IW_SD_BUF_ALIGNMENT =        0x80
 };
 
 #define I40IW_WQE_SIZE_64      64
@@ -1534,6 +1534,8 @@ enum i40iw_alignment {
 #define I40IW_QP_WQE_MIN_SIZE  32
 #define I40IW_QP_WQE_MAX_SIZE  128
 
+#define I40IW_UPDATE_SD_BUF_SIZE 128
+
 #define I40IW_CQE_QTYPE_RQ 0
 #define I40IW_CQE_QTYPE_SQ 1
 
index 013049bcdb53d5fb95ca61d1ab9640aaa62b0756..caf490ab24c809e403f07bf2471bd2e1290e36b7 100644 (file)
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
                return (-EOPNOTSUPP);
        }
 
+       if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4      |
+                                         MLX4_IB_RX_HASH_DST_IPV4      |
+                                         MLX4_IB_RX_HASH_SRC_IPV6      |
+                                         MLX4_IB_RX_HASH_DST_IPV6      |
+                                         MLX4_IB_RX_HASH_SRC_PORT_TCP  |
+                                         MLX4_IB_RX_HASH_DST_PORT_TCP  |
+                                         MLX4_IB_RX_HASH_SRC_PORT_UDP  |
+                                         MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+               pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+                        ucmd->rx_hash_fields_mask);
+               return (-EOPNOTSUPP);
+       }
+
        if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
            (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
                rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
                        return (-EOPNOTSUPP);
                }
 
-               if (rss_ctx->flags & MLX4_RSS_IPV4) {
+               if (rss_ctx->flags & MLX4_RSS_IPV4)
                        rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
-               } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+               if (rss_ctx->flags & MLX4_RSS_IPV6)
                        rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
-               } else {
+               if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
                        pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
                        return (-EOPNOTSUPP);
                }
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
 
        if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
            (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
-               if (rss_ctx->flags & MLX4_RSS_IPV4) {
+               if (rss_ctx->flags & MLX4_RSS_IPV4)
                        rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
-               } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+               if (rss_ctx->flags & MLX4_RSS_IPV6)
                        rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
-               } else {
+               if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
                        pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
                        return (-EOPNOTSUPP);
                }
-
        } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
                   (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
                pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
index 9beee9cef137719f4cdc9f092a4a6f982adfcec2..ee0ee1f9994b4fae933d8590b72ba138d29e176d 100644 (file)
@@ -642,9 +642,9 @@ err:
        return -ENOMEM;
 }
 
-static void delay_time_func(unsigned long ctx)
+static void delay_time_func(struct timer_list *t)
 {
-       struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+       struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
 
        dev->fill_delay = 0;
 }
@@ -663,7 +663,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
                return -ENOMEM;
        }
 
-       setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
+       timer_setup(&dev->delay_timer, delay_time_func, 0);
        for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
                ent = &cache->ent[i];
                INIT_LIST_HEAD(&ent->head);
index f6474c24f193c4edb379dd37847093b7fb217321..ffb98eaaf1c2036e1bdea340a1c3c679bf1062e5 100644 (file)
@@ -130,9 +130,9 @@ static void handle_catas(struct mthca_dev *dev)
        spin_unlock_irqrestore(&catas_lock, flags);
 }
 
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
 {
-       struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
+       struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
        int i;
 
        for (i = 0; i < dev->catas_err.size; ++i)
@@ -149,7 +149,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
 {
        phys_addr_t addr;
 
-       init_timer(&dev->catas_err.timer);
+       timer_setup(&dev->catas_err.timer, poll_catas, 0);
        dev->catas_err.map  = NULL;
 
        addr = pci_resource_start(dev->pdev, 0) +
@@ -164,8 +164,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
                return;
        }
 
-       dev->catas_err.timer.data     = (unsigned long) dev;
-       dev->catas_err.timer.function = poll_catas;
        dev->catas_err.timer.expires  = jiffies + MTHCA_CATAS_POLL_INTERVAL;
        INIT_LIST_HEAD(&dev->catas_err.list);
        add_timer(&dev->catas_err.timer);
index db46b7b53fb4f94a8727864120106fa1e01bbf52..162475aeeedd7ffd30ca0478e9bcbe0c16204e54 100644 (file)
@@ -3819,7 +3819,7 @@ void  nes_port_ibevent(struct nes_vnic *nesvnic)
        if (!nesvnic->event_timer.function) {
                ib_dispatch_event(&event);
                nesvnic->last_dispatched_event = event.event;
-               nesvnic->event_timer.function = (TIMER_FUNC_TYPE)nes_handle_delayed_event;
+               nesvnic->event_timer.function = nes_handle_delayed_event;
                nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
                add_timer(&nesvnic->event_timer);
        } else {
index 87f4bd99cdf7102e1e6e43c23a03615103d13b2c..2c13123bfd69499e3ac7661871d176c57979664b 100644 (file)
@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
        noio_flag = memalloc_noio_save();
        p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
        if (!p->tx_ring) {
+               memalloc_noio_restore(noio_flag);
                ret = -ENOMEM;
                goto err_tx;
        }
index cedc665364cd6e872ee8a86795b0de19913b8a28..73862a836062ed48b16def4f8ca31930a2227d26 100644 (file)
@@ -202,9 +202,9 @@ void gameport_stop_polling(struct gameport *gameport)
 }
 EXPORT_SYMBOL(gameport_stop_polling);
 
-static void gameport_run_poll_handler(unsigned long d)
+static void gameport_run_poll_handler(struct timer_list *t)
 {
-       struct gameport *gameport = (struct gameport *)d;
+       struct gameport *gameport = from_timer(gameport, t, poll_timer);
 
        gameport->poll_handler(gameport);
        if (gameport->poll_cnt)
@@ -542,8 +542,7 @@ static void gameport_init_port(struct gameport *gameport)
 
        INIT_LIST_HEAD(&gameport->node);
        spin_lock_init(&gameport->timer_lock);
-       setup_timer(&gameport->poll_timer, gameport_run_poll_handler,
-                   (unsigned long)gameport);
+       timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
 }
 
 /*
index 44916ef4a424391199b9738a2e499440cb4ad357..e30642db50d5208cc7f4fe6e3868f6a8a596321b 100644 (file)
@@ -2047,7 +2047,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
  */
 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
 {
-       dev->timer.function = (TIMER_FUNC_TYPE)input_repeat_key;
+       dev->timer.function = input_repeat_key;
        dev->rep[REP_DELAY] = delay;
        dev->rep[REP_PERIOD] = period;
 }
index f4ad83eab67f66c7a6ebfa956976e02668e9e32a..de0dd4756c8408c7ee877c68b74a64e109bf5a39 100644 (file)
@@ -364,9 +364,9 @@ static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
        return 0;
 }
 
-static void db9_timer(unsigned long private)
+static void db9_timer(struct timer_list *t)
 {
-       struct db9 *db9 = (void *) private;
+       struct db9 *db9 = from_timer(db9, t, timer);
        struct parport *port = db9->pd->port;
        struct input_dev *dev = db9->dev[0];
        struct input_dev *dev2 = db9->dev[1];
@@ -609,7 +609,7 @@ static void db9_attach(struct parport *pp)
        db9->pd = pd;
        db9->mode = mode;
        db9->parportno = pp->number;
-       setup_timer(&db9->timer, db9_timer, (long)db9);
+       timer_setup(&db9->timer, db9_timer, 0);
 
        for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
 
index ca734ea97e53e9fed5dca03f53a59e20607fe569..2ffb2e8bdc3bf456692754a7e6a625e2b572d671 100644 (file)
@@ -743,9 +743,9 @@ static void gc_psx_process_packet(struct gc *gc)
  * gc_timer() initiates reads of console pads data.
  */
 
-static void gc_timer(unsigned long private)
+static void gc_timer(struct timer_list *t)
 {
-       struct gc *gc = (void *) private;
+       struct gc *gc = from_timer(gc, t, timer);
 
 /*
  * N64 pads - must be read first, any read confuses them for 200 us
@@ -974,7 +974,7 @@ static void gc_attach(struct parport *pp)
        mutex_init(&gc->mutex);
        gc->pd = pd;
        gc->parportno = pp->number;
-       setup_timer(&gc->timer, gc_timer, (long) gc);
+       timer_setup(&gc->timer, gc_timer, 0);
 
        for (i = 0; i < n_pads && i < GC_MAX_DEVICES; i++) {
                if (!pads[i])
index a1fdc75a438d10f2d5a7cd9a0edcc75b2403190c..e2685753e460e252a6645ccc479f3f4b2cd4ec02 100644 (file)
@@ -89,9 +89,9 @@ static struct tgfx {
  * tgfx_timer() reads and analyzes TurboGraFX joystick data.
  */
 
-static void tgfx_timer(unsigned long private)
+static void tgfx_timer(struct timer_list *t)
 {
-       struct tgfx *tgfx = (void *) private;
+       struct tgfx *tgfx = from_timer(tgfx, t, timer);
        struct input_dev *dev;
        int data1, data2, i;
 
@@ -200,7 +200,7 @@ static void tgfx_attach(struct parport *pp)
        mutex_init(&tgfx->sem);
        tgfx->pd = pd;
        tgfx->parportno = pp->number;
-       setup_timer(&tgfx->timer, tgfx_timer, (long)tgfx);
+       timer_setup(&tgfx->timer, tgfx_timer, 0);
 
        for (i = 0; i < n_devs; i++) {
                if (n_buttons[i] < 1)
index d3265b6b58b8ebc06b69e8a40983d5303b938054..1173890f6719ca5c801276f3585d0e9ab6b67e0c 100644 (file)
@@ -102,7 +102,7 @@ static inline bool get_down(unsigned long data0, unsigned long data1)
                !(data1 & S3C2410_ADCDAT0_UPDOWN));
 }
 
-static void touch_timer_fire(unsigned long data)
+static void touch_timer_fire(struct timer_list *unused)
 {
        unsigned long data0;
        unsigned long data1;
index a0babdbf71460dda5dc156bfe7b7b9313e0da679..4a2de34895ec3177eb07082afe46a8921fa9f958 100644 (file)
@@ -2250,10 +2250,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                uint64_t tmp;
 
                if (!sg_res) {
+                       unsigned int pgoff = sg->offset & ~PAGE_MASK;
+
                        sg_res = aligned_nrpages(sg->offset, sg->length);
-                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
                        sg->dma_length = sg->length;
-                       pteval = page_to_phys(sg_page(sg)) | prot;
+                       pteval = (sg_phys(sg) - pgoff) | prot;
                        phys_pfn = pteval >> VTD_PAGE_SHIFT;
                }
 
@@ -3787,7 +3789,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 
        for_each_sg(sglist, sg, nelems, i) {
                BUG_ON(!sg_page(sg));
-               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+               sg->dma_address = sg_phys(sg);
                sg->dma_length = sg->length;
        }
        return nelems;
index 466aaa8ba841c3253226543adfecc030b4e5d6f6..83fe2621effe72bc1cbeecd80df4030235d87328 100644 (file)
@@ -36,7 +36,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
 static void init_iova_rcaches(struct iova_domain *iovad);
 static void free_iova_rcaches(struct iova_domain *iovad);
 static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(unsigned long data);
+static void fq_flush_timeout(struct timer_list *t);
 
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -107,7 +107,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
                spin_lock_init(&fq->lock);
        }
 
-       setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
+       timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
        atomic_set(&iovad->fq_timer_on, 0);
 
        return 0;
@@ -519,9 +519,9 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
        }
 }
 
-static void fq_flush_timeout(unsigned long data)
+static void fq_flush_timeout(struct timer_list *t)
 {
-       struct iova_domain *iovad = (struct iova_domain *)data;
+       struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
        int cpu;
 
        atomic_set(&iovad->fq_timer_on, 0);
index 53380bd72ea4068ec394a60e371b093727a6e543..c70476b34a53f1c76f9bb470ac2a34269acfbf24 100644 (file)
@@ -41,8 +41,15 @@ config ARM_GIC_V3
 
 config ARM_GIC_V3_ITS
        bool
+       select GENERIC_MSI_IRQ_DOMAIN
+       default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+       bool
+       depends on ARM_GIC_V3_ITS
        depends on PCI
        depends on PCI_MSI
+       default ARM_GIC_V3_ITS
 
 config ARM_NVIC
        bool
index dae7282bfdef31fc0ce7475854cdff995dbcae93..d2df34a54d38b1aa00fc6cdc71bf754b4ccecace 100644 (file)
@@ -30,7 +30,8 @@ obj-$(CONFIG_ARM_GIC_PM)              += irq-gic-pm.o
 obj-$(CONFIG_ARCH_REALVIEW)            += irq-gic-realview.o
 obj-$(CONFIG_ARM_GIC_V2M)              += irq-gic-v2m.o
 obj-$(CONFIG_ARM_GIC_V3)               += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS)           += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS)           += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI)       += irq-gic-v3-its-pci-msi.o
 obj-$(CONFIG_PARTITION_PERCPU)         += irq-partition-percpu.o
 obj-$(CONFIG_HISILICON_IRQ_MBIGEN)     += irq-mbigen.o
 obj-$(CONFIG_ARM_NVIC)                 += irq-nvic.o
index 17221143f5057ce35f84f6021bf972f32b61bc48..b56c3e23f0af921142ded1e8bd5438e449bd7cc9 100644 (file)
@@ -1103,18 +1103,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
        int nr_parts;
        struct partition_affinity *parts;
 
-       parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+       parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
        if (!parts_node)
                return;
 
        nr_parts = of_get_child_count(parts_node);
 
        if (!nr_parts)
-               return;
+               goto out_put_node;
 
        parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
        if (WARN_ON(!parts))
-               return;
+               goto out_put_node;
 
        for_each_child_of_node(parts_node, child_part) {
                struct partition_affinity *part;
@@ -1181,6 +1181,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
 
                gic_data.ppi_descs[i] = desc;
        }
+
+out_put_node:
+       of_node_put(parts_node);
 }
 
 static void __init gic_of_setup_kvm_info(struct device_node *node)
@@ -1523,7 +1526,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
 
        err = gic_validate_dist_version(acpi_data.dist_base);
        if (err) {
-               pr_err("No distributor detected at @%p, giving up",
+               pr_err("No distributor detected at @%p, giving up\n",
                       acpi_data.dist_base);
                goto out_dist_unmap;
        }
index cd0bcc3b7e33709a472c1952c5ee7ccdf1ea0382..dba9d67cb9c138856aec6d5b6917408bf7e87355 100644 (file)
@@ -177,6 +177,7 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
                        .map      = map,
                },
        };
+       int ret;
 
        /*
         * The host will never see that interrupt firing again, so it
@@ -184,7 +185,11 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
         */
        irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
 
-       return irq_set_vcpu_affinity(irq, &info);
+       ret = irq_set_vcpu_affinity(irq, &info);
+       if (ret)
+               irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+       return ret;
 }
 
 int its_get_vlpi(int irq, struct its_vlpi_map *map)
index 1f59998e03f806e72ad48434885192ffacf98c70..e80263e16c4c8fe37f7fca956523d1a810567931 100644 (file)
@@ -325,7 +325,7 @@ static int pdc_intc_probe(struct platform_device *pdev)
 
        /* Ioremap the registers */
        priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
-                                     res_regs->end - res_regs->start);
+                                     resource_size(res_regs));
        if (!priv->pdc_base)
                return -EIO;
 
index c25ce5af091addb2f23653feb8fcbac27d5c9180..ec0e6a8cdb7558433d933868520d8d72e7b28b2b 100644 (file)
@@ -156,7 +156,7 @@ static int s3c_irq_type(struct irq_data *data, unsigned int type)
                irq_set_handler(data->irq, handle_level_irq);
                break;
        default:
-               pr_err("No such irq type %d", type);
+               pr_err("No such irq type %d\n", type);
                return -EINVAL;
        }
 
@@ -204,7 +204,7 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
                        break;
 
                default:
-                       pr_err("No such irq type %d", type);
+                       pr_err("No such irq type %d\n", type);
                        return -EINVAL;
        }
 
index 1b6e2f7c59af67ba5e3bdbf1d2387e85ccee1d56..1927b2f36ff6e5e760fce6bd7b5776c51ff9179b 100644 (file)
@@ -196,8 +196,8 @@ static int __init exiu_init(struct device_node *node,
        }
 
        data->base = of_iomap(node, 0);
-       if (IS_ERR(data->base)) {
-               err = PTR_ERR(data->base);
+       if (!data->base) {
+               err = -ENODEV;
                goto out_free;
        }
 
index 6aa3ea4792148d057b9b5d766acd0a7dd5813979..f31265937439608314bf55d70665b55a299a4666 100644 (file)
@@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev)
 {
        struct combiner *combiner;
        size_t alloc_sz;
-       u32 nregs;
+       int nregs;
        int err;
 
        nregs = count_registers(pdev);
index 89dd1303a98a13925c90a09d2089a2870c3ea928..49fef08858c5370e9a0ed67872a8f9ff9aca6cff 100644 (file)
@@ -2235,9 +2235,9 @@ static void send_listen(capidrv_contr *card)
        send_message(card, &cmdcmsg);
 }
 
-static void listentimerfunc(unsigned long x)
+static void listentimerfunc(struct timer_list *t)
 {
-       capidrv_contr *card = (capidrv_contr *)x;
+       capidrv_contr *card = from_timer(card, t, listentimer);
        if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
                printk(KERN_ERR "%s: controller dead ??\n", card->name);
        send_listen(card);
@@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
                return -1;
        }
        card->owner = THIS_MODULE;
-       setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
+       timer_setup(&card->listentimer, listentimerfunc, 0);
        strcpy(card->name, id);
        card->contrnr = contr;
        card->nbchan = profp->nbchannel;
index 6f423bc49d0dcfa0a0bd4ef9321213dbd14de07e..5620fd2c6009dfb4e24664e17fad9ca2db3079d5 100644 (file)
@@ -55,10 +55,10 @@ DEFINE_SPINLOCK(divert_lock);
 /***************************/
 /* timer callback function */
 /***************************/
-static void deflect_timer_expire(ulong arg)
+static void deflect_timer_expire(struct timer_list *t)
 {
        unsigned long flags;
-       struct call_struc *cs = (struct call_struc *) arg;
+       struct call_struc *cs = from_timer(cs, t, timer);
 
        spin_lock_irqsave(&divert_lock, flags);
        del_timer(&cs->timer); /* delete active timer */
@@ -157,7 +157,7 @@ int cf_command(int drvid, int mode,
        /* allocate mem for information struct */
        if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
                return (-ENOMEM); /* no memory */
-       setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs);
+       timer_setup(&cs->timer, deflect_timer_expire, 0);
        cs->info[0] = '\0';
        cs->ics.driver = drvid;
        cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
@@ -450,8 +450,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
                                        return (0); /* no external deflection needed */
                        if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
                                return (0); /* no memory */
-                       setup_timer(&cs->timer, deflect_timer_expire,
-                                   (ulong)cs);
+                       timer_setup(&cs->timer, deflect_timer_expire, 0);
                        cs->info[0] = '\0';
 
                        cs->ics = *ic; /* copy incoming data */
index c61049585cbd7b67f24e057238244b8938500a9e..0033d74a72917e18dd58be87fa99f312ad7b9a70 100644 (file)
@@ -78,7 +78,7 @@ static unsigned int um_idi_poll(struct file *file, poll_table *wait);
 static int um_idi_open(struct inode *inode, struct file *file);
 static int um_idi_release(struct inode *inode, struct file *file);
 static int remove_entity(void *entity);
-static void diva_um_timer_function(unsigned long data);
+static void diva_um_timer_function(struct timer_list *t);
 
 /*
  * proc entry
@@ -300,8 +300,7 @@ static int um_idi_open_adapter(struct file *file, int adapter_nr)
        p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
        init_waitqueue_head(&p_os->read_wait);
        init_waitqueue_head(&p_os->close_wait);
-       setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function,
-                   (unsigned long)p_os);
+       timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
        p_os->aborted = 0;
        p_os->adapter_nr = adapter_nr;
        return (1);
@@ -457,9 +456,9 @@ void diva_os_wakeup_close(void *os_context)
 }
 
 static
-void diva_um_timer_function(unsigned long data)
+void diva_um_timer_function(struct timer_list *t)
 {
-       diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) data;
+       diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
 
        p_os->aborted = 1;
        wake_up_interruptible(&p_os->read_wait);
index 3cf07b8ced1c067c43c3aeae463e306ae1b22c99..4d85645c87f78721a83fcef94be1feb3bce8c094 100644 (file)
@@ -2855,7 +2855,7 @@ irq_notforus:
  */
 
 static void
-hfcmulti_dbusy_timer(struct hfc_multi *hc)
+hfcmulti_dbusy_timer(struct timer_list *t)
 {
 }
 
@@ -3877,8 +3877,7 @@ hfcmulti_initmode(struct dchannel *dch)
                if (hc->dnum[pt]) {
                        mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
                                      -1, 0, -1, 0);
-                       setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
-                                   (long)dch);
+                       timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
                }
                for (i = 1; i <= 31; i++) {
                        if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
@@ -3984,8 +3983,7 @@ hfcmulti_initmode(struct dchannel *dch)
                hc->chan[i].slot_rx = -1;
                hc->chan[i].conf = -1;
                mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
-               setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
-                           (long)dch);
+               timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
                hc->chan[i - 2].slot_tx = -1;
                hc->chan[i - 2].slot_rx = -1;
                hc->chan[i - 2].conf = -1;
index e4ebbee863a17442e96cd6e3e5bb3c9041e5d6da..34c93874af23bc43565119eab4b8b9b5d84a0815 100644 (file)
@@ -301,8 +301,9 @@ reset_hfcpci(struct hfc_pci *hc)
  * Timer function called when kernel timer expires
  */
 static void
-hfcpci_Timer(struct hfc_pci *hc)
+hfcpci_Timer(struct timer_list *t)
 {
+       struct hfc_pci *hc = from_timer(hc, t, hw.timer);
        hc->hw.timer.expires = jiffies + 75;
        /* WD RESET */
 /*
@@ -1241,7 +1242,7 @@ hfcpci_int(int intno, void *dev_id)
  * timer callback for D-chan busy resolution. Currently no function
  */
 static void
-hfcpci_dbusy_timer(struct hfc_pci *hc)
+hfcpci_dbusy_timer(struct timer_list *t)
 {
 }
 
@@ -1717,8 +1718,7 @@ static void
 inithfcpci(struct hfc_pci *hc)
 {
        printk(KERN_DEBUG "inithfcpci: entered\n");
-       setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer,
-                   (long)&hc->dch);
+       timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
        hc->chanlimit = 2;
        mode_hfcpci(&hc->bch[0], 1, -1);
        mode_hfcpci(&hc->bch[1], 2, -1);
@@ -2043,7 +2043,7 @@ setup_hw(struct hfc_pci *hc)
        Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
        /* At this point the needed PCI config is done */
        /* fifos are still not enabled */
-       setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc);
+       timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
        /* default PCM master */
        test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
        return 0;
index 5b078591b6ee846455ac25642c25c8d932bde99d..b791688d0228ccb8921b57775bf6330adb8a96ea 100644 (file)
@@ -1146,9 +1146,9 @@ mISDNisar_irq(struct isar_hw *isar)
 EXPORT_SYMBOL(mISDNisar_irq);
 
 static void
-ftimer_handler(unsigned long data)
+ftimer_handler(struct timer_list *t)
 {
-       struct isar_ch *ch = (struct isar_ch *)data;
+       struct isar_ch *ch = from_timer(ch, t, ftimer);
 
        pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
        test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
@@ -1635,11 +1635,9 @@ init_isar(struct isar_hw *isar)
        }
        if (isar->version != 1)
                return -EINVAL;
-       setup_timer(&isar->ch[0].ftimer, &ftimer_handler,
-                   (long)&isar->ch[0]);
+       timer_setup(&isar->ch[0].ftimer, ftimer_handler, 0);
        test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
-       setup_timer(&isar->ch[1].ftimer, &ftimer_handler,
-                   (long)&isar->ch[1]);
+       timer_setup(&isar->ch[1].ftimer, ftimer_handler, 0);
        test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
        return 0;
 }
index 38a5bb764c7b55cb8b742639e49756e413b4ab26..8b03d618185e3c7c0c74c7dc8bad6fe1169f7e19 100644 (file)
@@ -231,7 +231,7 @@ static int isdn_timer_cnt2 = 0;
 static int isdn_timer_cnt3 = 0;
 
 static void
-isdn_timer_funct(ulong dummy)
+isdn_timer_funct(struct timer_list *unused)
 {
        int tf = dev->tflags;
        if (tf & ISDN_TIMER_FAST) {
@@ -2294,8 +2294,7 @@ static int __init isdn_init(void)
                printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
                return -EIO;
        }
-       init_timer(&dev->timer);
-       dev->timer.function = isdn_timer_funct;
+       timer_setup(&dev->timer, isdn_timer_funct, 0);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->timerlock);
 #ifdef MODULE
index f63a110b7bcb2d2257869484bd9b894d94b9b5d2..c138f66f26595bcfe714f1b3e1f0838ef4ab6c3f 100644 (file)
@@ -1509,9 +1509,9 @@ static int isdn_net_ioctl(struct net_device *dev,
 
 /* called via cisco_timer.function */
 static void
-isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
+isdn_net_ciscohdlck_slarp_send_keepalive(struct timer_list *t)
 {
-       isdn_net_local *lp = (isdn_net_local *) data;
+       isdn_net_local *lp = from_timer(lp, t, cisco_timer);
        struct sk_buff *skb;
        unsigned char *p;
        unsigned long last_cisco_myseq = lp->cisco_myseq;
@@ -1615,9 +1615,8 @@ isdn_net_ciscohdlck_connected(isdn_net_local *lp)
        /* send slarp request because interface/seq.no.s reset */
        isdn_net_ciscohdlck_slarp_send_request(lp);
 
-       init_timer(&lp->cisco_timer);
-       lp->cisco_timer.data = (unsigned long) lp;
-       lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive;
+       timer_setup(&lp->cisco_timer,
+                   isdn_net_ciscohdlck_slarp_send_keepalive, 0);
        lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
        add_timer(&lp->cisco_timer);
 }
index cd2b3c69771a24b62a7952a8e7c626fde5459c6b..e07aefb9151ded8b057715ec689286d35ef09733 100644 (file)
@@ -50,7 +50,7 @@ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is);
 static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
 static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
                                          unsigned char id);
-static void isdn_ppp_ccp_timer_callback(unsigned long closure);
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t);
 static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
                                                                   unsigned char id);
 static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
@@ -2327,10 +2327,10 @@ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
 
 /* The timer callback function which is called when a ResetReq has timed out,
    aka has never been answered by a ResetAck */
-static void isdn_ppp_ccp_timer_callback(unsigned long closure)
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t)
 {
        struct ippp_ccp_reset_state *rs =
-               (struct ippp_ccp_reset_state *)closure;
+               from_timer(rs, t, timer);
 
        if (!rs) {
                printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
@@ -2376,8 +2376,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
                rs->state = CCPResetIdle;
                rs->is = is;
                rs->id = id;
-               setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback,
-                           (unsigned long)rs);
+               timer_setup(&rs->timer, isdn_ppp_ccp_timer_callback, 0);
                is->reset->rs[id] = rs;
        }
        return rs;
index d30130c8d0f3d356b25aad643fc4c5736e24692e..960f26348bb58e00f81166444a53d7544f8e7f68 100644 (file)
@@ -541,9 +541,9 @@ isdn_tty_senddown(modem_info *info)
  * into the tty's buffer.
  */
 static void
-isdn_tty_modem_do_ncarrier(unsigned long data)
+isdn_tty_modem_do_ncarrier(struct timer_list *t)
 {
-       modem_info *info = (modem_info *) data;
+       modem_info *info = from_timer(info, t, nc_timer);
        isdn_tty_modem_result(RESULT_NO_CARRIER, info);
 }
 
@@ -1812,8 +1812,7 @@ isdn_tty_modem_init(void)
                info->isdn_channel = -1;
                info->drv_index = -1;
                info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
-               setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier,
-                           (unsigned long)info);
+               timer_setup(&info->nc_timer, isdn_tty_modem_do_ncarrier, 0);
                skb_queue_head_init(&info->xmit_queue);
 #ifdef CONFIG_ISDN_AUDIO
                skb_queue_head_init(&info->dtmf_queue);
index ce90213a42faea35805a5d964739a54c835a00d3..76516ee84e9adb63ecc6c670aad17f30edc72fb2 100644 (file)
@@ -270,9 +270,9 @@ static void pblk_write_kick(struct pblk *pblk)
        mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
 }
 
-void pblk_write_timer_fn(unsigned long data)
+void pblk_write_timer_fn(struct timer_list *t)
 {
-       struct pblk *pblk = (struct pblk *)data;
+       struct pblk *pblk = from_timer(pblk, t, wtimer);
 
        /* kick the write thread every tick to flush outstanding data */
        pblk_write_kick(pblk);
index 00d5698d64a9a58852b2f91dcc1d4bac6c395320..9c8e114c8a545ca45cbea56660a2838f4a67e812 100644 (file)
@@ -442,9 +442,9 @@ next_gc_group:
                goto next_gc_group;
 }
 
-static void pblk_gc_timer(unsigned long data)
+static void pblk_gc_timer(struct timer_list *t)
 {
-       struct pblk *pblk = (struct pblk *)data;
+       struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
 
        pblk_gc_kick(pblk);
 }
@@ -601,7 +601,7 @@ int pblk_gc_init(struct pblk *pblk)
                goto fail_free_writer_kthread;
        }
 
-       setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
+       timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
        mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
 
        gc->gc_active = 0;
index f62112ba5482a345e33fa4bddb537b25d3c2db95..695826a06b5d2f87150f63348db49c67d96f38eb 100644 (file)
@@ -866,7 +866,7 @@ fail:
 
 static int pblk_writer_init(struct pblk *pblk)
 {
-       setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
+       timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
        mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
 
        pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
index abae31fd434e5bd667e3ba761ace2189f250397a..dacc71922260b8bd2ccb80e2732338b09a73bb2d 100644 (file)
@@ -158,9 +158,9 @@ int pblk_rl_max_io(struct pblk_rl *rl)
        return rl->rb_max_io;
 }
 
-static void pblk_rl_u_timer(unsigned long data)
+static void pblk_rl_u_timer(struct timer_list *t)
 {
-       struct pblk_rl *rl = (struct pblk_rl *)data;
+       struct pblk_rl *rl = from_timer(rl, t, u_timer);
 
        /* Release user I/O state. Protect from GC */
        smp_store_release(&rl->rb_user_active, 0);
@@ -202,7 +202,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
        atomic_set(&rl->rb_gc_cnt, 0);
        atomic_set(&rl->rb_space, -1);
 
-       setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
+       timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
 
        rl->rb_user_active = 0;
        rl->rb_gc_active = 0;
index 90961033a79fcd2af5c8ec425aec3978c29e9fa5..59a64d461a5dcf1e25c07000293fab0a7642a7ee 100644 (file)
@@ -797,7 +797,7 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
  * pblk write thread
  */
 int pblk_write_ts(void *data);
-void pblk_write_timer_fn(unsigned long data);
+void pblk_write_timer_fn(struct timer_list *t);
 void pblk_write_should_kick(struct pblk *pblk);
 
 /*
index 267f01ae87e447b6fba7e2fe47f39a1a926416f4..0993c14be86011c63d91373bada4c9e8363865ec 100644 (file)
@@ -267,9 +267,9 @@ static void rrpc_gc_kick(struct rrpc *rrpc)
 /*
  * timed GC every interval.
  */
-static void rrpc_gc_timer(unsigned long data)
+static void rrpc_gc_timer(struct timer_list *t)
 {
-       struct rrpc *rrpc = (struct rrpc *)data;
+       struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
 
        rrpc_gc_kick(rrpc);
        mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
@@ -1063,7 +1063,7 @@ static int rrpc_gc_init(struct rrpc *rrpc)
        if (!rrpc->kgc_wq)
                return -ENOMEM;
 
-       setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
+       timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
 
        return 0;
 }
index a27d85232ce1343ce802576ab3584ed67be6f35d..a0cc1bc6d88445a3ced1e5dcd1906100b5e619c6 100644 (file)
@@ -490,7 +490,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
                if (b == -1)
                        goto err;
 
-               k->ptr[i] = PTR(ca->buckets[b].gen,
+               k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
                                bucket_to_sector(c, b),
                                ca->sb.nr_this_dev);
 
index 11c5503d31dc3029df2cde14f8f9e9fc48514bc2..81e8dc3dbe5e30604438ca0658d39900cde7e2c8 100644 (file)
@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
        c->shrink.scan_objects = bch_mca_scan;
        c->shrink.seeks = 4;
        c->shrink.batch = c->btree_pages * 2;
-       register_shrinker(&c->shrink);
+
+       if (register_shrinker(&c->shrink))
+               pr_warn("bcache: %s: could not register shrinker",
+                               __func__);
 
        return 0;
 }
index 41c238fc37338073632c017d705143b62e344e6f..f9d391711595fb87c479251fbf382459174d7e76 100644 (file)
@@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
                return false;
 
        for (i = 0; i < KEY_PTRS(l); i++)
-               if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+               if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
                    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
                        return false;
 
index 02a98ddb592d3b7aaaf0f12cc31b6a5937988ffe..a87165c1d8e5262d01962eb706b60ff9fd02cb78 100644 (file)
@@ -170,6 +170,11 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
                 * find a sequence of buckets with valid journal entries
                 */
                for (i = 0; i < ca->sb.njournal_buckets; i++) {
+                       /*
+                        * We must try the index l with ZERO first for
+                        * correctness due to the scenario that the journal
+                        * bucket is circular buffer which might have wrapped
+                        */
                        l = (i * 2654435769U) % ca->sb.njournal_buckets;
 
                        if (test_bit(l, bitmap))
@@ -507,7 +512,7 @@ static void journal_reclaim(struct cache_set *c)
                        continue;
 
                ja->cur_idx = next;
-               k->ptr[n++] = PTR(0,
+               k->ptr[n++] = MAKE_PTR(0,
                                  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
                                  ca->sb.nr_this_dev);
        }
index 3a7aed7282b2a0227e9f04cec6d82404bad55f23..643c3021624faa1fc6af0b84bdf49d07135d6238 100644 (file)
@@ -708,16 +708,15 @@ static void cached_dev_read_error(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
        struct bio *bio = &s->bio.bio;
-       struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
        /*
-        * If cache device is dirty (dc->has_dirty is non-zero), then
-        * recovery a failed read request from cached device may get a
-        * stale data back. So read failure recovery is only permitted
-        * when cache device is clean.
+        * If read request hit dirty data (s->read_dirty_data is true),
+        * then recovery a failed read request from cached device may
+        * get a stale data back. So read failure recovery is only
+        * permitted when read request hit clean data in cache device,
+        * or when cache read race happened.
         */
-       if (s->recoverable &&
-           (dc && !atomic_read(&dc->has_dirty))) {
+       if (s->recoverable && !s->read_dirty_data) {
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
index b8ac591aaaa7070bfbd6d32c20993fb9130961f8..c546b567f3b50a3f43b0c074e9319ca908ec5971 100644 (file)
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
        int l;
        struct dm_buffer *b, *tmp;
        unsigned long freed = 0;
-       unsigned long count = nr_to_scan;
+       unsigned long count = c->n_buffers[LIST_CLEAN] +
+                             c->n_buffers[LIST_DIRTY];
        unsigned long retain_target = get_retain_buffers(c);
 
        for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
+       unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
+                             READ_ONCE(c->n_buffers[LIST_DIRTY]);
+       unsigned long retain_target = get_retain_buffers(c);
 
-       return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+       return (count < retain_target) ? 0 : (count - retain_target);
 }
 
 /*
index cf23a14f9c6a6572955746f040802dcfd34d801d..47407e43b96a168eed8660a7284a60132284d189 100644 (file)
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
 {
        int r;
 
-       r = dm_register_target(&cache_target);
-       if (r) {
-               DMERR("cache target registration failed: %d", r);
-               return r;
-       }
-
        migration_cache = KMEM_CACHE(dm_cache_migration, 0);
        if (!migration_cache) {
                dm_unregister_target(&cache_target);
                return -ENOMEM;
        }
 
+       r = dm_register_target(&cache_target);
+       if (r) {
+               DMERR("cache target registration failed: %d", r);
+               return r;
+       }
+
        return 0;
 }
 
index c8faa2b8584268f75a8177f39677b94edba55289..f7810cc869ac883e11b60e0ad3f29253444d411a 100644 (file)
@@ -457,6 +457,38 @@ do {                                                                       \
                 dm_noflush_suspending((m)->ti));                       \
 } while (0)
 
+/*
+ * Check whether bios must be queued in the device-mapper core rather
+ * than here in the target.
+ *
+ * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
+ * the same value then we are not between multipath_presuspend()
+ * and multipath_resume() calls and we have no need to check
+ * for the DMF_NOFLUSH_SUSPENDING flag.
+ */
+static bool __must_push_back(struct multipath *m, unsigned long flags)
+{
+       return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
+                test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
+               dm_noflush_suspending(m->ti));
+}
+
+/*
+ * Following functions use READ_ONCE to get atomic access to
+ * all m->flags to avoid taking spinlock
+ */
+static bool must_push_back_rq(struct multipath *m)
+{
+       unsigned long flags = READ_ONCE(m->flags);
+       return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+       unsigned long flags = READ_ONCE(m->flags);
+       return __must_push_back(m, flags);
+}
+
 /*
  * Map cloned requests (request-based multipath)
  */
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
                pgpath = choose_pgpath(m, nr_bytes);
 
        if (!pgpath) {
-               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+               if (must_push_back_rq(m))
                        return DM_MAPIO_DELAY_REQUEUE;
                dm_report_EIO(m);       /* Failed */
                return DM_MAPIO_KILL;
@@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
        }
 
        if (!pgpath) {
-               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+               if (must_push_back_bio(m))
                        return DM_MAPIO_REQUEUE;
                dm_report_EIO(m);
                return DM_MAPIO_KILL;
@@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
        assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
                   (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
                   (!save_old_value && queue_if_no_path));
-       assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
-                  queue_if_no_path || dm_noflush_suspending(m->ti));
+       assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
        spin_unlock_irqrestore(&m->lock, flags);
 
        if (!queue_if_no_path) {
@@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
                        fail_path(pgpath);
 
                if (atomic_read(&m->nr_valid_paths) == 0 &&
-                   !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+                   !must_push_back_rq(m)) {
                        if (error == BLK_STS_IOERR)
                                dm_report_EIO(m);
                        /* complete with the original error */
@@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
 
        if (atomic_read(&m->nr_valid_paths) == 0 &&
            !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-               dm_report_EIO(m);
-               *error = BLK_STS_IOERR;
+               if (must_push_back_bio(m)) {
+                       r = DM_ENDIO_REQUEUE;
+               } else {
+                       dm_report_EIO(m);
+                       *error = BLK_STS_IOERR;
+               }
                goto done;
        }
 
@@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void)
 {
        int r;
 
-       r = dm_register_target(&multipath_target);
-       if (r < 0) {
-               DMERR("request-based register failed %d", r);
-               r = -EINVAL;
-               goto bad_register_target;
-       }
-
        kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
        if (!kmultipathd) {
                DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void)
                goto bad_alloc_kmpath_handlerd;
        }
 
+       r = dm_register_target(&multipath_target);
+       if (r < 0) {
+               DMERR("request-based register failed %d", r);
+               r = -EINVAL;
+               goto bad_register_target;
+       }
+
        return 0;
 
+bad_register_target:
+       destroy_workqueue(kmpath_handlerd);
 bad_alloc_kmpath_handlerd:
        destroy_workqueue(kmultipathd);
 bad_alloc_kmultipathd:
-       dm_unregister_target(&multipath_target);
-bad_register_target:
        return r;
 }
 
index 1113b42e1edae4029f550b71c635ea80c76a46b9..a0613bd8ed00efc17d545a3335c39cb6cfb83919 100644 (file)
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
                return r;
        }
 
-       r = dm_register_target(&snapshot_target);
-       if (r < 0) {
-               DMERR("snapshot target register failed %d", r);
-               goto bad_register_snapshot_target;
-       }
-
-       r = dm_register_target(&origin_target);
-       if (r < 0) {
-               DMERR("Origin target register failed %d", r);
-               goto bad_register_origin_target;
-       }
-
-       r = dm_register_target(&merge_target);
-       if (r < 0) {
-               DMERR("Merge target register failed %d", r);
-               goto bad_register_merge_target;
-       }
-
        r = init_origin_hash();
        if (r) {
                DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
                goto bad_pending_cache;
        }
 
+       r = dm_register_target(&snapshot_target);
+       if (r < 0) {
+               DMERR("snapshot target register failed %d", r);
+               goto bad_register_snapshot_target;
+       }
+
+       r = dm_register_target(&origin_target);
+       if (r < 0) {
+               DMERR("Origin target register failed %d", r);
+               goto bad_register_origin_target;
+       }
+
+       r = dm_register_target(&merge_target);
+       if (r < 0) {
+               DMERR("Merge target register failed %d", r);
+               goto bad_register_merge_target;
+       }
+
        return 0;
 
-bad_pending_cache:
-       kmem_cache_destroy(exception_cache);
-bad_exception_cache:
-       exit_origin_hash();
-bad_origin_hash:
-       dm_unregister_target(&merge_target);
 bad_register_merge_target:
        dm_unregister_target(&origin_target);
 bad_register_origin_target:
        dm_unregister_target(&snapshot_target);
 bad_register_snapshot_target:
+       kmem_cache_destroy(pending_cache);
+bad_pending_cache:
+       kmem_cache_destroy(exception_cache);
+bad_exception_cache:
+       exit_origin_hash();
+bad_origin_hash:
        dm_exception_store_exit();
 
        return r;
index 88130b5d95f909ead8441dec7f3fb5d80a7914c7..aaffd0c0ee9a76c71f23f9bb1074ec8057b8c6f7 100644 (file)
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 
                refcount_set(&dd->count, 1);
                list_add(&dd->list, &t->devices);
+               goto out;
 
        } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
                r = upgrade_mode(dd, mode, t->md);
                if (r)
                        return r;
-               refcount_inc(&dd->count);
        }
-
+       refcount_inc(&dd->count);
+out:
        *result = dd->dm_dev;
        return 0;
 }
index 89e5dff9b4cfc1b87049529238c5c01978345b81..f91d771fff4b6e9d9a488a7a67916326a1e85897 100644 (file)
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
 
 static int __init dm_thin_init(void)
 {
-       int r;
+       int r = -ENOMEM;
 
        pool_table_init();
 
+       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+       if (!_new_mapping_cache)
+               return r;
+
        r = dm_register_target(&thin_target);
        if (r)
-               return r;
+               goto bad_new_mapping_cache;
 
        r = dm_register_target(&pool_target);
        if (r)
-               goto bad_pool_target;
-
-       r = -ENOMEM;
-
-       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
-       if (!_new_mapping_cache)
-               goto bad_new_mapping_cache;
+               goto bad_thin_target;
 
        return 0;
 
-bad_new_mapping_cache:
-       dm_unregister_target(&pool_target);
-bad_pool_target:
+bad_thin_target:
        dm_unregister_target(&thin_target);
+bad_new_mapping_cache:
+       kmem_cache_destroy(_new_mapping_cache);
 
        return r;
 }
index 41c050b59ec454f139b359fdcdc116d964103d61..4e4dee0ec2de336eba90e2400f1c051b2fff1733 100644 (file)
@@ -7605,7 +7605,9 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
                if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
                        /* Still cleaning up */
                        resync = max_sectors;
-       } else
+       } else if (resync > max_sectors)
+               resync = max_sectors;
+       else
                resync -= atomic_read(&mddev->recovery_active);
 
        if (resync == 0) {
index cc9d337a1ed37e84994622f5de76717734cca4a1..6df398e3a008801f91198b9d9556776d919deb41 100644 (file)
@@ -809,11 +809,15 @@ static void flush_pending_writes(struct r1conf *conf)
        spin_lock_irq(&conf->device_lock);
 
        if (conf->pending_bio_list.head) {
+               struct blk_plug plug;
                struct bio *bio;
+
                bio = bio_list_get(&conf->pending_bio_list);
                conf->pending_count = 0;
                spin_unlock_irq(&conf->device_lock);
+               blk_start_plug(&plug);
                flush_bio_list(conf, bio);
+               blk_finish_plug(&plug);
        } else
                spin_unlock_irq(&conf->device_lock);
 }
index b9edbc747a95932424b42c5d16c1e9d5d61e9491..c131835cf008c2df85e240259cac8182115e4e62 100644 (file)
@@ -894,10 +894,13 @@ static void flush_pending_writes(struct r10conf *conf)
        spin_lock_irq(&conf->device_lock);
 
        if (conf->pending_bio_list.head) {
+               struct blk_plug plug;
                struct bio *bio;
+
                bio = bio_list_get(&conf->pending_bio_list);
                conf->pending_count = 0;
                spin_unlock_irq(&conf->device_lock);
+               blk_start_plug(&plug);
                /* flush any pending bitmap writes to disk
                 * before proceeding w/ I/O */
                bitmap_unplug(conf->mddev->bitmap);
@@ -918,6 +921,7 @@ static void flush_pending_writes(struct r10conf *conf)
                                generic_make_request(bio);
                        bio = next;
                }
+               blk_finish_plug(&plug);
        } else
                spin_unlock_irq(&conf->device_lock);
 }
index f1c86d938502e61c93d8951680767f1af5de98e5..39f31f07ffe9ece14d9764f0eb511ea00f812618 100644 (file)
@@ -2577,31 +2577,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
 int r5c_journal_mode_set(struct mddev *mddev, int mode)
 {
        struct r5conf *conf;
-       int err;
 
        if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
            mode > R5C_JOURNAL_MODE_WRITE_BACK)
                return -EINVAL;
 
-       err = mddev_lock(mddev);
-       if (err)
-               return err;
        conf = mddev->private;
-       if (!conf || !conf->log) {
-               mddev_unlock(mddev);
+       if (!conf || !conf->log)
                return -ENODEV;
-       }
 
        if (raid5_calc_degraded(conf) > 0 &&
-           mode == R5C_JOURNAL_MODE_WRITE_BACK) {
-               mddev_unlock(mddev);
+           mode == R5C_JOURNAL_MODE_WRITE_BACK)
                return -EINVAL;
-       }
 
        mddev_suspend(mddev);
        conf->log->r5c_journal_mode = mode;
        mddev_resume(mddev);
-       mddev_unlock(mddev);
 
        pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
                 mdname(mddev), mode, r5c_journal_mode_str[mode]);
@@ -2614,6 +2605,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
 {
        int mode = ARRAY_SIZE(r5c_journal_mode_str);
        size_t len = length;
+       int ret;
 
        if (len < 2)
                return -EINVAL;
@@ -2625,8 +2617,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
                if (strlen(r5c_journal_mode_str[mode]) == len &&
                    !strncmp(page, r5c_journal_mode_str[mode], len))
                        break;
-
-       return r5c_journal_mode_set(mddev, mode) ?: length;
+       ret = mddev_lock(mddev);
+       if (ret)
+               return ret;
+       ret = r5c_journal_mode_set(mddev, mode);
+       mddev_unlock(mddev);
+       return ret ?: length;
 }
 
 struct md_sysfs_entry
index 31dc25e2871ad3942a1fba66f1c7c999612ce83b..98ce4272ace9631045f6e00cf3fd7ea8f96b8d77 100644 (file)
@@ -2677,13 +2677,13 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
        pr_debug("raid456: error called\n");
 
        spin_lock_irqsave(&conf->device_lock, flags);
+       set_bit(Faulty, &rdev->flags);
        clear_bit(In_sync, &rdev->flags);
        mddev->degraded = raid5_calc_degraded(conf);
        spin_unlock_irqrestore(&conf->device_lock, flags);
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 
        set_bit(Blocked, &rdev->flags);
-       set_bit(Faulty, &rdev->flags);
        set_mask_bits(&mddev->sb_flags, 0,
                      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
        pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
index ce8d78c137f05264763296fc45666c3cb230378c..e1d369b976edc836f4c4ac2b4326faf01eda24d6 100644 (file)
@@ -402,7 +402,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
                            sizeof(struct saa7146_buf),
                            file, &dev->v4l2_lock);
 
-       vv->vbi_read_timeout.function = (TIMER_FUNC_TYPE)vbi_read_timeout;
+       vv->vbi_read_timeout.function = vbi_read_timeout;
        vv->vbi_read_timeout_file = file;
 
        /* initialize the brs */
index e4ea2a0c7a240abb9b57deffc9bb988eda407bc7..c5c827e11b640dc40a762550be7677f5ac6db5b6 100644 (file)
@@ -521,13 +521,13 @@ static void list_add_locked(struct list_head *new, struct list_head *head,
        spin_unlock_irqrestore(lock, flags);
 }
 
-/**
+/*
  * register a client callback that called when device plugged in/unplugged
  * NOTE: if devices exist callback is called immediately for each device
  *
  * @param hotplug callback
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_register_hotplug(hotplug_t hotplug)
 {
@@ -562,7 +562,7 @@ int smscore_register_hotplug(hotplug_t hotplug)
 }
 EXPORT_SYMBOL_GPL(smscore_register_hotplug);
 
-/**
+/*
  * unregister a client callback that called when device plugged in/unplugged
  *
  * @param hotplug callback
@@ -636,7 +636,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
        return cb;
 }
 
-/**
+/*
  * creates coredev object for a device, prepares buffers,
  * creates buffer mappings, notifies registered hotplugs about new device.
  *
@@ -644,7 +644,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
  *               and handlers
  * @param coredev pointer to a value that receives created coredev object
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_register_device(struct smsdevice_params_t *params,
                            struct smscore_device_t **coredev,
@@ -764,10 +764,10 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
                        0 : -ETIME;
 }
 
-/**
+/*
  * Starts & enables IR operations
  *
- * @return 0 on success, < 0 on error.
+ * return: 0 on success, < 0 on error.
  */
 static int smscore_init_ir(struct smscore_device_t *coredev)
 {
@@ -812,13 +812,13 @@ static int smscore_init_ir(struct smscore_device_t *coredev)
        return 0;
 }
 
-/**
+/*
  * configures device features according to board configuration structure.
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int smscore_configure_board(struct smscore_device_t *coredev)
 {
@@ -861,13 +861,13 @@ static int smscore_configure_board(struct smscore_device_t *coredev)
        return 0;
 }
 
-/**
+/*
  * sets initial device mode and notifies client hotplugs that device is ready
  *
  * @param coredev pointer to a coredev object returned by
  *               smscore_register_device
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_start_device(struct smscore_device_t *coredev)
 {
@@ -1087,7 +1087,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
        },
 };
 
-/**
+/*
  * get firmware file name from one of the two mechanisms : sms_boards or
  * smscore_fw_lkup.
  * @param coredev pointer to a coredev object returned by
@@ -1096,7 +1096,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
  * @param lookup if 1, always get the fw filename from smscore_fw_lkup
  *      table. if 0, try first to get from sms_boards
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
                              int mode)
@@ -1125,7 +1125,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
        return fw[mode];
 }
 
-/**
+/*
  * loads specified firmware into a buffer and calls device loadfirmware_handler
  *
  * @param coredev pointer to a coredev object returned by
@@ -1133,7 +1133,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
  * @param filename null-terminated string specifies firmware file name
  * @param loadfirmware_handler device handler that loads firmware
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
                                           int mode,
@@ -1182,14 +1182,14 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
        return rc;
 }
 
-/**
+/*
  * notifies all clients registered with the device, notifies hotplugs,
  * frees all buffers and coredev object
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 void smscore_unregister_device(struct smscore_device_t *coredev)
 {
@@ -1282,14 +1282,14 @@ static int smscore_detect_mode(struct smscore_device_t *coredev)
        return rc;
 }
 
-/**
+/*
  * send init device request and wait for response
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  * @param mode requested mode of operation
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int smscore_init_device(struct smscore_device_t *coredev, int mode)
 {
@@ -1315,7 +1315,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
        return rc;
 }
 
-/**
+/*
  * calls device handler to change mode of operation
  * NOTE: stellar/usb may disconnect when changing mode
  *
@@ -1323,7 +1323,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
  *                smscore_register_device
  * @param mode requested mode of operation
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
 {
@@ -1411,13 +1411,13 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
        return rc;
 }
 
-/**
+/*
  * calls device handler to get current mode of operation
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return current mode
+ * return: current mode
  */
 int smscore_get_device_mode(struct smscore_device_t *coredev)
 {
@@ -1425,7 +1425,7 @@ int smscore_get_device_mode(struct smscore_device_t *coredev)
 }
 EXPORT_SYMBOL_GPL(smscore_get_device_mode);
 
-/**
+/*
  * find client by response id & type within the clients list.
  * return client handle or NULL.
  *
@@ -1462,7 +1462,7 @@ found:
        return client;
 }
 
-/**
+/*
  * find client by response id/type, call clients onresponse handler
  * return buffer to pool on error
  *
@@ -1615,13 +1615,13 @@ void smscore_onresponse(struct smscore_device_t *coredev,
 }
 EXPORT_SYMBOL_GPL(smscore_onresponse);
 
-/**
+/*
  * return pointer to next free buffer descriptor from core pool
  *
  * @param coredev pointer to a coredev object returned by
  *                smscore_register_device
  *
- * @return pointer to descriptor on success, NULL on error.
+ * return: pointer to descriptor on success, NULL on error.
  */
 
 static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
@@ -1648,7 +1648,7 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
 }
 EXPORT_SYMBOL_GPL(smscore_getbuffer);
 
-/**
+/*
  * return buffer descriptor to a pool
  *
  * @param coredev pointer to a coredev object returned by
@@ -1693,7 +1693,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
        return 0;
 }
 
-/**
+/*
  * creates smsclient object, check that id is taken by another client
  *
  * @param coredev pointer to a coredev object from clients hotplug
@@ -1705,7 +1705,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
  * @param context client-specific context
  * @param client pointer to a value that receives created smsclient object
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smscore_register_client(struct smscore_device_t *coredev,
                            struct smsclient_params_t *params,
@@ -1740,7 +1740,7 @@ int smscore_register_client(struct smscore_device_t *coredev,
 }
 EXPORT_SYMBOL_GPL(smscore_register_client);
 
-/**
+/*
  * frees smsclient object and all subclients associated with it
  *
  * @param client pointer to smsclient object returned by
@@ -1771,7 +1771,7 @@ void smscore_unregister_client(struct smscore_client_t *client)
 }
 EXPORT_SYMBOL_GPL(smscore_unregister_client);
 
-/**
+/*
  * verifies that source id is not taken by another client,
  * calls device handler to send requests to the device
  *
@@ -1780,7 +1780,7 @@ EXPORT_SYMBOL_GPL(smscore_unregister_client);
  * @param buffer pointer to a request buffer
  * @param size size (in bytes) of request buffer
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 int smsclient_sendrequest(struct smscore_client_t *client,
                          void *buffer, size_t size)
index 95b3723282f4623b725e1e1c22214f653fa65c74..d48b61eb01f4cdc7f11913208a6beef0136a7bff 100644 (file)
@@ -206,7 +206,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
  * @hlen: Number of bytes in haystack.
  * @needle: Buffer to find.
  * @nlen: Number of bytes in needle.
- * @return Pointer into haystack needle was found at, or NULL if not found.
+ * return: Pointer into haystack needle was found at, or NULL if not found.
  */
 static char *findstr(char *haystack, int hlen, char *needle, int nlen)
 {
@@ -226,7 +226,7 @@ static char *findstr(char *haystack, int hlen, char *needle, int nlen)
 /* ************************************************************************** */
 /* EN50221 physical interface functions */
 
-/**
+/*
  * dvb_ca_en50221_check_camstatus - Check CAM status.
  */
 static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
@@ -275,9 +275,9 @@ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
  * @ca: CA instance.
  * @slot: Slot on interface.
  * @waitfor: Flags to wait for.
- * @timeout_ms: Timeout in milliseconds.
+ * @timeout_hz: Timeout in milliseconds.
  *
- * @return 0 on success, nonzero on error.
+ * return: 0 on success, nonzero on error.
  */
 static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
                                         u8 waitfor, int timeout_hz)
@@ -325,7 +325,7 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
  * @ca: CA instance.
  * @slot: Slot id.
  *
- * @return 0 on success, nonzero on failure.
+ * return: 0 on success, nonzero on failure.
  */
 static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
 {
@@ -397,11 +397,11 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
  * @ca: CA instance.
  * @slot: Slot id.
  * @address: Address to read from. Updated.
- * @tupleType: Tuple id byte. Updated.
- * @tupleLength: Tuple length. Updated.
+ * @tuple_type: Tuple id byte. Updated.
+ * @tuple_length: Tuple length. Updated.
  * @tuple: Dest buffer for tuple (must be 256 bytes). Updated.
  *
- * @return 0 on success, nonzero on error.
+ * return: 0 on success, nonzero on error.
  */
 static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
                                     int *address, int *tuple_type,
@@ -455,7 +455,7 @@ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
  * @ca: CA instance.
  * @slot: Slot id.
  *
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
  */
 static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
 {
@@ -632,10 +632,11 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
  * @ca: CA instance.
  * @slot: Slot to read from.
  * @ebuf: If non-NULL, the data will be written to this buffer. If NULL,
- * the data will be added into the buffering system as a normal fragment.
+ *       the data will be added into the buffering system as a normal
+ *       fragment.
  * @ecount: Size of ebuf. Ignored if ebuf is NULL.
  *
- * @return Number of bytes read, or < 0 on error
+ * return: Number of bytes read, or < 0 on error
  */
 static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
                                    u8 *ebuf, int ecount)
@@ -784,11 +785,11 @@ exit:
  *
  * @ca: CA instance.
  * @slot: Slot to write to.
- * @ebuf: The data in this buffer is treated as a complete link-level packet to
- * be written.
- * @count: Size of ebuf.
+ * @buf: The data in this buffer is treated as a complete link-level packet to
+ *      be written.
+ * @bytes_write: Size of ebuf.
  *
- * @return Number of bytes written, or < 0 on error.
+ * return: Number of bytes written, or < 0 on error.
  */
 static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
                                     u8 *buf, int bytes_write)
@@ -933,7 +934,7 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
 /**
  * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
  *
- * @ca: CA instance.
+ * @pubca: CA instance.
  * @slot: Slot concerned.
  * @change_type: One of the DVB_CA_CAMCHANGE_* values.
  */
@@ -963,7 +964,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
 /**
  * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
  *
- * @ca: CA instance.
+ * @pubca: CA instance.
  * @slot: Slot concerned.
  */
 void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -983,7 +984,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
 /**
  * dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred.
  *
- * @ca: CA instance.
+ * @pubca: CA instance.
  * @slot: Slot concerned.
  */
 void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -1091,7 +1092,7 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
  *
  * @ca: CA instance.
  * @slot: Slot to process.
- * @return: 0 .. no change
+ * return:: 0 .. no change
  *          1 .. CAM state changed
  */
 
@@ -1296,7 +1297,7 @@ static void dvb_ca_en50221_thread_state_machine(struct dvb_ca_private *ca,
        mutex_unlock(&sl->slot_lock);
 }
 
-/**
+/*
  * Kernel thread which monitors CA slots for CAM changes, and performs data
  * transfers.
  */
@@ -1336,12 +1337,11 @@ static int dvb_ca_en50221_thread(void *data)
  * Real ioctl implementation.
  * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them.
  *
- * @inode: Inode concerned.
  * @file: File concerned.
  * @cmd: IOCTL command.
- * @arg: Associated argument.
+ * @parg: Associated argument.
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static int dvb_ca_en50221_io_do_ioctl(struct file *file,
                                      unsigned int cmd, void *parg)
@@ -1420,12 +1420,11 @@ out_unlock:
 /**
  * Wrapper for ioctl implementation.
  *
- * @inode: Inode concerned.
  * @file: File concerned.
  * @cmd: IOCTL command.
  * @arg: Associated argument.
  *
- * @return 0 on success, <0 on error.
+ * return: 0 on success, <0 on error.
  */
 static long dvb_ca_en50221_io_ioctl(struct file *file,
                                    unsigned int cmd, unsigned long arg)
@@ -1441,7 +1440,7 @@ static long dvb_ca_en50221_io_ioctl(struct file *file,
  * @count: Size of source buffer.
  * @ppos: Position in file (ignored).
  *
- * @return Number of bytes read, or <0 on error.
+ * return: Number of bytes read, or <0 on error.
  */
 static ssize_t dvb_ca_en50221_io_write(struct file *file,
                                       const char __user *buf, size_t count,
@@ -1536,7 +1535,7 @@ exit:
        return status;
 }
 
-/**
+/*
  * Condition for waking up in dvb_ca_en50221_io_read_condition
  */
 static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca,
@@ -1593,7 +1592,7 @@ nextslot:
  * @count: Size of destination buffer.
  * @ppos: Position in file (ignored).
  *
- * @return Number of bytes read, or <0 on error.
+ * return: Number of bytes read, or <0 on error.
  */
 static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
                                      size_t count, loff_t *ppos)
@@ -1702,7 +1701,7 @@ exit:
  * @inode: Inode concerned.
  * @file: File concerned.
  *
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
  */
 static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
 {
@@ -1752,7 +1751,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
  * @inode: Inode concerned.
  * @file: File concerned.
  *
- * @return 0 on success, <0 on failure.
+ * return: 0 on success, <0 on failure.
  */
 static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
 {
@@ -1781,7 +1780,7 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
  * @file: File concerned.
  * @wait: poll wait table.
  *
- * @return Standard poll mask.
+ * return: Standard poll mask.
  */
 static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
 {
@@ -1838,11 +1837,11 @@ static const struct dvb_device dvbdev_ca = {
  * Initialise a new DVB CA EN50221 interface device.
  *
  * @dvb_adapter: DVB adapter to attach the new CA device to.
- * @ca: The dvb_ca instance.
+ * @pubca: The dvb_ca instance.
  * @flags: Flags describing the CA device (DVB_CA_FLAG_*).
  * @slot_count: Number of slots supported.
  *
- * @return 0 on success, nonzero on failure
+ * return: 0 on success, nonzero on failure
  */
 int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
                        struct dvb_ca_en50221 *pubca, int flags, int slot_count)
@@ -1929,8 +1928,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_init);
 /**
  * Release a DVB CA EN50221 interface device.
  *
- * @ca_dev: The dvb_device_t instance for the CA device.
- * @ca: The associated dvb_ca instance.
+ * @pubca: The associated dvb_ca instance.
  */
 void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
 {
index 3ad83359098bde793f9789aaeb800bebc76ddd31..2afaa82263421b3e729dd41ac5d951084d4f003c 100644 (file)
@@ -369,11 +369,14 @@ static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepr
 }
 
 /**
- * Performs automatic twiddling of frontend parameters.
+ * dvb_frontend_swzigzag_autotune - Performs automatic twiddling of frontend
+ *     parameters.
  *
- * @param fe The frontend concerned.
- * @param check_wrapped Checks if an iteration has completed. DO NOT SET ON THE FIRST ATTEMPT
- * @returns Number of complete iterations that have been performed.
+ * @fe: The frontend concerned.
+ * @check_wrapped: Checks if an iteration has completed.
+ *                DO NOT SET ON THE FIRST ATTEMPT.
+ *
+ * return: Number of complete iterations that have been performed.
  */
 static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped)
 {
@@ -1253,7 +1256,7 @@ dtv_property_legacy_params_sync(struct dvb_frontend *fe,
  * dtv_get_frontend - calls a callback for retrieving DTV parameters
  * @fe:                struct dvb_frontend pointer
  * @c:         struct dtv_frontend_properties pointer (DVBv5 cache)
- * @p_out      struct dvb_frontend_parameters pointer (DVBv3 FE struct)
+ * @p_out:     struct dvb_frontend_parameters pointer (DVBv3 FE struct)
  *
  * This routine calls either the DVBv3 or DVBv5 get_frontend call.
  * If c is not null, it will update the DVBv5 cache struct pointed by it.
index 06b0dcc13695af9ff25292de21ef1c3859b732d5..c018e3c06d5df229a4dd23527fd18383ca9742bb 100644 (file)
@@ -125,7 +125,7 @@ struct dvb_net_priv {
 };
 
 
-/**
+/*
  *     Determine the packet's protocol ID. The rule here is that we
  *     assume 802.3 if the type field is short enough to be a length.
  *     This is normal practice and works for any 'now in use' protocol.
@@ -155,7 +155,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
 
        rawp = skb->data;
 
-       /**
+       /*
         *      This is a magic hack to spot IPX packets. Older Novell breaks
         *      the protocol design and runs IPX over 802.3 without an 802.2 LLC
         *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
@@ -164,7 +164,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
        if (*(unsigned short *)rawp == 0xFFFF)
                return htons(ETH_P_802_3);
 
-       /**
+       /*
         *      Real 802.2 LLC
         */
        return htons(ETH_P_802_2);
@@ -215,7 +215,8 @@ static int ule_exthdr_padding(struct dvb_net_priv *p)
        return 0;
 }
 
-/** Handle ULE extension headers.
+/*
+ * Handle ULE extension headers.
  *  Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
  *  Returns: >= 0: nr. of bytes consumed by next extension header
  *          -1:   Mandatory extension header that is not recognized or TEST SNDU; discard.
@@ -291,7 +292,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
 }
 
 
-/** Prepare for a new ULE SNDU: reset the decoder state. */
+/* Prepare for a new ULE SNDU: reset the decoder state. */
 static inline void reset_ule( struct dvb_net_priv *p )
 {
        p->ule_skb = NULL;
@@ -304,7 +305,7 @@ static inline void reset_ule( struct dvb_net_priv *p )
        p->ule_bridged = 0;
 }
 
-/**
+/*
  * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
  * TS cells of a single PID.
  */
@@ -1005,7 +1006,7 @@ static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
 {
        struct net_device *dev = filter->priv;
 
-       /**
+       /*
         * we rely on the DVB API definition where exactly one complete
         * section is delivered in buffer1
         */
index 353274524f1b29d72fa06dc66cd0466e1b8189ef..a290722c04fd2167d7908e110cab56bb47a754e1 100644 (file)
  * @api_version: Firmware API version.
  * @gpio: GPIOs.
  * @get_dvb_frontend: Get DVB frontend callback.
+ *
+ * AF9013/5 GPIOs (mostly guessed):
+ *   * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
+ *   * demod#1-gpio#1 - xtal setting (?)
+ *   * demod#1-gpio#3 - tuner#1
+ *   * demod#2-gpio#0 - tuner#2
+ *   * demod#2-gpio#1 - xtal setting (?)
  */
 struct af9013_platform_data {
        /*
@@ -89,16 +96,15 @@ struct af9013_platform_data {
 #define AF9013_TS_PARALLEL  AF9013_TS_MODE_PARALLEL
 #define AF9013_TS_SERIAL    AF9013_TS_MODE_SERIAL
 
-/*
- * AF9013/5 GPIOs (mostly guessed)
- * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
- * demod#1-gpio#1 - xtal setting (?)
- * demod#1-gpio#3 - tuner#1
- * demod#2-gpio#0 - tuner#2
- * demod#2-gpio#1 - xtal setting (?)
- */
-
 #if IS_REACHABLE(CONFIG_DVB_AF9013)
+/**
+ * Attach an af9013 demod
+ *
+ * @config: pointer to &struct af9013_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *af9013_attach(const struct af9013_config *config,
        struct i2c_adapter *i2c);
 #else
index dc61bf7d1b0982e10b969976dbc5b014a8ee7ead..418c565baf83ff04b0037e60eb6f4a08b1de859a 100644 (file)
@@ -41,6 +41,15 @@ struct ascot2e_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_ASCOT2E)
+/**
+ * Attach an ascot2e tuner
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct ascot2e_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
                                        const struct ascot2e_config *config,
                                        struct i2c_adapter *i2c);
index f3ff8f6eb3bb91ab7eab2695a7374acf1ab15641..a49400c0e28eac26d2a689080bb7eb209c855377 100644 (file)
@@ -49,7 +49,6 @@
  * @gpio_chip_base: GPIO.
  * @get_dvb_frontend: Get DVB frontend.
  */
-
 struct cxd2820r_platform_data {
        u8 ts_mode;
        bool ts_clk_inv;
@@ -62,6 +61,17 @@ struct cxd2820r_platform_data {
        bool attach_in_use;
 };
 
+/**
+ * struct cxd2820r_config - configuration for cxd2020r demod
+ *
+ * @i2c_address: Demodulator I2C address. Driver determines DVB-C slave I2C
+ *              address automatically from master address.
+ *              Default: none, must set. Values: 0x6c, 0x6d.
+ * @ts_mode:   TS output mode. Default: none, must set. Values: FIXME?
+ * @ts_clock_inv: TS clock inverted. Default: 0. Values: 0, 1.
+ * @if_agc_polarity: Default: 0. Values: 0, 1
+ * @spec_inv:  Spectrum inversion. Default: 0. Values: 0, 1.
+ */
 struct cxd2820r_config {
        /* Demodulator I2C address.
         * Driver determines DVB-C slave I2C address automatically from master
@@ -98,6 +108,18 @@ struct cxd2820r_config {
 
 
 #if IS_REACHABLE(CONFIG_DVB_CXD2820R)
+/**
+ * Attach a cxd2820r demod
+ *
+ * @config: pointer to &struct cxd2820r_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ * @gpio_chip_base: if zero, disables GPIO setting. Otherwise, if
+ *                 CONFIG_GPIOLIB is set dynamically allocate
+ *                 gpio base; if is not set, use its value to
+ *                 setup the GPIO pins.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *cxd2820r_attach(
        const struct cxd2820r_config *config,
        struct i2c_adapter *i2c,
index 5b5421f703886a68da09c1e4a64a92f94d43ee0d..2b3af247a1f120d2e36c734d9e83167618bd8a48 100644 (file)
@@ -52,7 +52,7 @@ struct i2c_device_addr {
 };
 
 
-/**
+/*
 * \def IS_I2C_10BIT( addr )
 * \brief Determine if I2C address 'addr' is a 10 bits address or not.
 * \param addr The I2C address.
@@ -67,7 +67,7 @@ struct i2c_device_addr {
 Exported FUNCTIONS
 ------------------------------------------------------------------------------*/
 
-/**
+/*
 * \fn drxbsp_i2c_init()
 * \brief Initialize I2C communication module.
 * \return drx_status_t Return status.
@@ -76,7 +76,7 @@ Exported FUNCTIONS
 */
        drx_status_t drxbsp_i2c_init(void);
 
-/**
+/*
 * \fn drxbsp_i2c_term()
 * \brief Terminate I2C communication module.
 * \return drx_status_t Return status.
@@ -85,7 +85,7 @@ Exported FUNCTIONS
 */
        drx_status_t drxbsp_i2c_term(void);
 
-/**
+/*
 * \fn drx_status_t drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
 *                                       u16 w_count,
 *                                       u8 *wData,
@@ -121,7 +121,7 @@ Exported FUNCTIONS
                                         struct i2c_device_addr *r_dev_addr,
                                         u16 r_count, u8 *r_data);
 
-/**
+/*
 * \fn drxbsp_i2c_error_text()
 * \brief Returns a human readable error.
 * Counter part of numerical drx_i2c_error_g.
@@ -130,7 +130,7 @@ Exported FUNCTIONS
 */
        char *drxbsp_i2c_error_text(void);
 
-/**
+/*
 * \var drx_i2c_error_g;
 * \brief I2C specific error codes, platform dependent.
 */
index cd69e187ba7a9c0eea95855531a82db3e09a528b..855685b6b38669d035d9b14b420033f4a1d1c487 100644 (file)
@@ -46,7 +46,7 @@ struct i2c_device_addr {
        void *user_data;                /* User data pointer */
 };
 
-/**
+/*
 * \def IS_I2C_10BIT( addr )
 * \brief Determine if I2C address 'addr' is a 10 bits address or not.
 * \param addr The I2C address.
@@ -61,7 +61,7 @@ struct i2c_device_addr {
 Exported FUNCTIONS
 ------------------------------------------------------------------------------*/
 
-/**
+/*
 * \fn drxbsp_i2c_init()
 * \brief Initialize I2C communication module.
 * \return int Return status.
@@ -70,7 +70,7 @@ Exported FUNCTIONS
 */
 int drxbsp_i2c_init(void);
 
-/**
+/*
 * \fn drxbsp_i2c_term()
 * \brief Terminate I2C communication module.
 * \return int Return status.
@@ -79,7 +79,7 @@ int drxbsp_i2c_init(void);
 */
 int drxbsp_i2c_term(void);
 
-/**
+/*
 * \fn int drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
 *                                       u16 w_count,
 *                                       u8 * wData,
@@ -115,7 +115,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
                                        struct i2c_device_addr *r_dev_addr,
                                        u16 r_count, u8 *r_data);
 
-/**
+/*
 * \fn drxbsp_i2c_error_text()
 * \brief Returns a human readable error.
 * Counter part of numerical drx_i2c_error_g.
@@ -124,7 +124,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
 */
 char *drxbsp_i2c_error_text(void);
 
-/**
+/*
 * \var drx_i2c_error_g;
 * \brief I2C specific error codes, platform dependent.
 */
@@ -241,13 +241,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
                                                struct i2c_device_addr *r_dev_addr,
                                                u16 r_count, u8 *r_data);
 
-/**************
+/*************
 *
 * This section configures the DRX Data Access Protocols (DAPs).
 *
 **************/
 
-/**
+/*
 * \def DRXDAP_SINGLE_MASTER
 * \brief Enable I2C single or I2C multimaster mode on host.
 *
@@ -262,7 +262,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define DRXDAP_SINGLE_MASTER 1
 #endif
 
-/**
+/*
 * \def DRXDAP_MAX_WCHUNKSIZE
 * \brief Defines maximum chunksize of an i2c write action by host.
 *
@@ -282,7 +282,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define  DRXDAP_MAX_WCHUNKSIZE 60
 #endif
 
-/**
+/*
 * \def DRXDAP_MAX_RCHUNKSIZE
 * \brief Defines maximum chunksize of an i2c read action by host.
 *
@@ -297,13 +297,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define  DRXDAP_MAX_RCHUNKSIZE 60
 #endif
 
-/**************
+/*************
 *
 * This section describes drxdriver defines.
 *
 **************/
 
-/**
+/*
 * \def DRX_UNKNOWN
 * \brief Generic UNKNOWN value for DRX enumerated types.
 *
@@ -313,7 +313,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define DRX_UNKNOWN (254)
 #endif
 
-/**
+/*
 * \def DRX_AUTO
 * \brief Generic AUTO value for DRX enumerated types.
 *
@@ -324,104 +324,104 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 #define DRX_AUTO    (255)
 #endif
 
-/**************
+/*************
 *
 * This section describes flag definitions for the device capbilities.
 *
 **************/
 
-/**
+/*
 * \brief LNA capability flag
 *
 * Device has a Low Noise Amplifier
 *
 */
 #define DRX_CAPABILITY_HAS_LNA           (1UL <<  0)
-/**
+/*
 * \brief OOB-RX capability flag
 *
 * Device has OOB-RX
 *
 */
 #define DRX_CAPABILITY_HAS_OOBRX         (1UL <<  1)
-/**
+/*
 * \brief ATV capability flag
 *
 * Device has ATV
 *
 */
 #define DRX_CAPABILITY_HAS_ATV           (1UL <<  2)
-/**
+/*
 * \brief DVB-T capability flag
 *
 * Device has DVB-T
 *
 */
 #define DRX_CAPABILITY_HAS_DVBT          (1UL <<  3)
-/**
+/*
 * \brief  ITU-B capability flag
 *
 * Device has ITU-B
 *
 */
 #define DRX_CAPABILITY_HAS_ITUB          (1UL <<  4)
-/**
+/*
 * \brief  Audio capability flag
 *
 * Device has Audio
 *
 */
 #define DRX_CAPABILITY_HAS_AUD           (1UL <<  5)
-/**
+/*
 * \brief  SAW switch capability flag
 *
 * Device has SAW switch
 *
 */
 #define DRX_CAPABILITY_HAS_SAWSW         (1UL <<  6)
-/**
+/*
 * \brief  GPIO1 capability flag
 *
 * Device has GPIO1
 *
 */
 #define DRX_CAPABILITY_HAS_GPIO1         (1UL <<  7)
-/**
+/*
 * \brief  GPIO2 capability flag
 *
 * Device has GPIO2
 *
 */
 #define DRX_CAPABILITY_HAS_GPIO2         (1UL <<  8)
-/**
+/*
 * \brief  IRQN capability flag
 *
 * Device has IRQN
 *
 */
 #define DRX_CAPABILITY_HAS_IRQN          (1UL <<  9)
-/**
+/*
 * \brief  8VSB capability flag
 *
 * Device has 8VSB
 *
 */
 #define DRX_CAPABILITY_HAS_8VSB          (1UL << 10)
-/**
+/*
 * \brief  SMA-TX capability flag
 *
 * Device has SMATX
 *
 */
 #define DRX_CAPABILITY_HAS_SMATX         (1UL << 11)
-/**
+/*
 * \brief  SMA-RX capability flag
 *
 * Device has SMARX
 *
 */
 #define DRX_CAPABILITY_HAS_SMARX         (1UL << 12)
-/**
+/*
 * \brief  ITU-A/C capability flag
 *
 * Device has ITU-A/C
@@ -439,7 +439,7 @@ MACROS
         DRX_VERSIONSTRING_HELP(PATCH)
 #define DRX_VERSIONSTRING_HELP(NUM) #NUM
 
-/**
+/*
 * \brief Macro to create byte array elements from 16 bit integers.
 * This macro is used to create byte arrays for block writes.
 * Block writes speed up I2C traffic between host and demod.
@@ -449,7 +449,7 @@ MACROS
 #define DRX_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
                        ((u8)((((u16)x)>>8)&0xFF))
 
-/**
+/*
 * \brief Macro to convert 16 bit register value to a s32
 */
 #define DRX_U16TODRXFREQ(x)   ((x & 0x8000) ? \
@@ -461,191 +461,191 @@ MACROS
 ENUM
 -------------------------------------------------------------------------*/
 
-/**
+/*
 * \enum enum drx_standard
 * \brief Modulation standards.
 */
 enum drx_standard {
-       DRX_STANDARD_DVBT = 0, /**< Terrestrial DVB-T.               */
-       DRX_STANDARD_8VSB,     /**< Terrestrial 8VSB.                */
-       DRX_STANDARD_NTSC,     /**< Terrestrial\Cable analog NTSC.   */
+       DRX_STANDARD_DVBT = 0, /*< Terrestrial DVB-T.               */
+       DRX_STANDARD_8VSB,     /*< Terrestrial 8VSB.                */
+       DRX_STANDARD_NTSC,     /*< Terrestrial\Cable analog NTSC.   */
        DRX_STANDARD_PAL_SECAM_BG,
-                               /**< Terrestrial analog PAL/SECAM B/G */
+                               /*< Terrestrial analog PAL/SECAM B/G */
        DRX_STANDARD_PAL_SECAM_DK,
-                               /**< Terrestrial analog PAL/SECAM D/K */
+                               /*< Terrestrial analog PAL/SECAM D/K */
        DRX_STANDARD_PAL_SECAM_I,
-                               /**< Terrestrial analog PAL/SECAM I   */
+                               /*< Terrestrial analog PAL/SECAM I   */
        DRX_STANDARD_PAL_SECAM_L,
-                               /**< Terrestrial analog PAL/SECAM L
+                               /*< Terrestrial analog PAL/SECAM L
                                        with negative modulation        */
        DRX_STANDARD_PAL_SECAM_LP,
-                               /**< Terrestrial analog PAL/SECAM L
+                               /*< Terrestrial analog PAL/SECAM L
                                        with positive modulation        */
-       DRX_STANDARD_ITU_A,    /**< Cable ITU ANNEX A.               */
-       DRX_STANDARD_ITU_B,    /**< Cable ITU ANNEX B.               */
-       DRX_STANDARD_ITU_C,    /**< Cable ITU ANNEX C.               */
-       DRX_STANDARD_ITU_D,    /**< Cable ITU ANNEX D.               */
-       DRX_STANDARD_FM,       /**< Terrestrial\Cable FM radio       */
-       DRX_STANDARD_DTMB,     /**< Terrestrial DTMB standard (China)*/
+       DRX_STANDARD_ITU_A,    /*< Cable ITU ANNEX A.               */
+       DRX_STANDARD_ITU_B,    /*< Cable ITU ANNEX B.               */
+       DRX_STANDARD_ITU_C,    /*< Cable ITU ANNEX C.               */
+       DRX_STANDARD_ITU_D,    /*< Cable ITU ANNEX D.               */
+       DRX_STANDARD_FM,       /*< Terrestrial\Cable FM radio       */
+       DRX_STANDARD_DTMB,     /*< Terrestrial DTMB standard (China)*/
        DRX_STANDARD_UNKNOWN = DRX_UNKNOWN,
-                               /**< Standard unknown.                */
+                               /*< Standard unknown.                */
        DRX_STANDARD_AUTO = DRX_AUTO
-                               /**< Autodetect standard.             */
+                               /*< Autodetect standard.             */
 };
 
-/**
+/*
 * \enum enum drx_standard
 * \brief Modulation sub-standards.
 */
 enum drx_substandard {
-       DRX_SUBSTANDARD_MAIN = 0, /**< Main subvariant of standard   */
+       DRX_SUBSTANDARD_MAIN = 0, /*< Main subvariant of standard   */
        DRX_SUBSTANDARD_ATV_BG_SCANDINAVIA,
        DRX_SUBSTANDARD_ATV_DK_POLAND,
        DRX_SUBSTANDARD_ATV_DK_CHINA,
        DRX_SUBSTANDARD_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Sub-standard unknown.         */
+                                       /*< Sub-standard unknown.         */
        DRX_SUBSTANDARD_AUTO = DRX_AUTO
-                                       /**< Auto (default) sub-standard   */
+                                       /*< Auto (default) sub-standard   */
 };
 
-/**
+/*
 * \enum enum drx_bandwidth
 * \brief Channel bandwidth or channel spacing.
 */
 enum drx_bandwidth {
-       DRX_BANDWIDTH_8MHZ = 0,  /**< Bandwidth 8 MHz.   */
-       DRX_BANDWIDTH_7MHZ,      /**< Bandwidth 7 MHz.   */
-       DRX_BANDWIDTH_6MHZ,      /**< Bandwidth 6 MHz.   */
+       DRX_BANDWIDTH_8MHZ = 0,  /*< Bandwidth 8 MHz.   */
+       DRX_BANDWIDTH_7MHZ,      /*< Bandwidth 7 MHz.   */
+       DRX_BANDWIDTH_6MHZ,      /*< Bandwidth 6 MHz.   */
        DRX_BANDWIDTH_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Bandwidth unknown. */
+                                       /*< Bandwidth unknown. */
        DRX_BANDWIDTH_AUTO = DRX_AUTO
-                                       /**< Auto Set Bandwidth */
+                                       /*< Auto Set Bandwidth */
 };
 
-/**
+/*
 * \enum enum drx_mirror
 * \brief Indicate if channel spectrum is mirrored or not.
 */
 enum drx_mirror {
-       DRX_MIRROR_NO = 0,   /**< Spectrum is not mirrored.           */
-       DRX_MIRROR_YES,      /**< Spectrum is mirrored.               */
+       DRX_MIRROR_NO = 0,   /*< Spectrum is not mirrored.           */
+       DRX_MIRROR_YES,      /*< Spectrum is mirrored.               */
        DRX_MIRROR_UNKNOWN = DRX_UNKNOWN,
-                               /**< Unknown if spectrum is mirrored.    */
+                               /*< Unknown if spectrum is mirrored.    */
        DRX_MIRROR_AUTO = DRX_AUTO
-                               /**< Autodetect if spectrum is mirrored. */
+                               /*< Autodetect if spectrum is mirrored. */
 };
 
-/**
+/*
 * \enum enum drx_modulation
 * \brief Constellation type of the channel.
 */
 enum drx_modulation {
-       DRX_CONSTELLATION_BPSK = 0,  /**< Modulation is BPSK.       */
-       DRX_CONSTELLATION_QPSK,      /**< Constellation is QPSK.    */
-       DRX_CONSTELLATION_PSK8,      /**< Constellation is PSK8.    */
-       DRX_CONSTELLATION_QAM16,     /**< Constellation is QAM16.   */
-       DRX_CONSTELLATION_QAM32,     /**< Constellation is QAM32.   */
-       DRX_CONSTELLATION_QAM64,     /**< Constellation is QAM64.   */
-       DRX_CONSTELLATION_QAM128,    /**< Constellation is QAM128.  */
-       DRX_CONSTELLATION_QAM256,    /**< Constellation is QAM256.  */
-       DRX_CONSTELLATION_QAM512,    /**< Constellation is QAM512.  */
-       DRX_CONSTELLATION_QAM1024,   /**< Constellation is QAM1024. */
-       DRX_CONSTELLATION_QPSK_NR,   /**< Constellation is QPSK_NR  */
+       DRX_CONSTELLATION_BPSK = 0,  /*< Modulation is BPSK.       */
+       DRX_CONSTELLATION_QPSK,      /*< Constellation is QPSK.    */
+       DRX_CONSTELLATION_PSK8,      /*< Constellation is PSK8.    */
+       DRX_CONSTELLATION_QAM16,     /*< Constellation is QAM16.   */
+       DRX_CONSTELLATION_QAM32,     /*< Constellation is QAM32.   */
+       DRX_CONSTELLATION_QAM64,     /*< Constellation is QAM64.   */
+       DRX_CONSTELLATION_QAM128,    /*< Constellation is QAM128.  */
+       DRX_CONSTELLATION_QAM256,    /*< Constellation is QAM256.  */
+       DRX_CONSTELLATION_QAM512,    /*< Constellation is QAM512.  */
+       DRX_CONSTELLATION_QAM1024,   /*< Constellation is QAM1024. */
+       DRX_CONSTELLATION_QPSK_NR,   /*< Constellation is QPSK_NR  */
        DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Constellation unknown.    */
+                                       /*< Constellation unknown.    */
        DRX_CONSTELLATION_AUTO = DRX_AUTO
-                                       /**< Autodetect constellation. */
+                                       /*< Autodetect constellation. */
 };
 
-/**
+/*
 * \enum enum drx_hierarchy
 * \brief Hierarchy of the channel.
 */
 enum drx_hierarchy {
-       DRX_HIERARCHY_NONE = 0, /**< None hierarchical channel.     */
-       DRX_HIERARCHY_ALPHA1,   /**< Hierarchical channel, alpha=1. */
-       DRX_HIERARCHY_ALPHA2,   /**< Hierarchical channel, alpha=2. */
-       DRX_HIERARCHY_ALPHA4,   /**< Hierarchical channel, alpha=4. */
+       DRX_HIERARCHY_NONE = 0, /*< None hierarchical channel.     */
+       DRX_HIERARCHY_ALPHA1,   /*< Hierarchical channel, alpha=1. */
+       DRX_HIERARCHY_ALPHA2,   /*< Hierarchical channel, alpha=2. */
+       DRX_HIERARCHY_ALPHA4,   /*< Hierarchical channel, alpha=4. */
        DRX_HIERARCHY_UNKNOWN = DRX_UNKNOWN,
-                               /**< Hierarchy unknown.             */
+                               /*< Hierarchy unknown.             */
        DRX_HIERARCHY_AUTO = DRX_AUTO
-                               /**< Autodetect hierarchy.          */
+                               /*< Autodetect hierarchy.          */
 };
 
-/**
+/*
 * \enum enum drx_priority
 * \brief Channel priority in case of hierarchical transmission.
 */
 enum drx_priority {
-       DRX_PRIORITY_LOW = 0,  /**< Low priority channel.  */
-       DRX_PRIORITY_HIGH,     /**< High priority channel. */
+       DRX_PRIORITY_LOW = 0,  /*< Low priority channel.  */
+       DRX_PRIORITY_HIGH,     /*< High priority channel. */
        DRX_PRIORITY_UNKNOWN = DRX_UNKNOWN
-                               /**< Priority unknown.      */
+                               /*< Priority unknown.      */
 };
 
-/**
+/*
 * \enum enum drx_coderate
 * \brief Channel priority in case of hierarchical transmission.
 */
 enum drx_coderate {
-               DRX_CODERATE_1DIV2 = 0, /**< Code rate 1/2nd.      */
-               DRX_CODERATE_2DIV3,     /**< Code rate 2/3nd.      */
-               DRX_CODERATE_3DIV4,     /**< Code rate 3/4nd.      */
-               DRX_CODERATE_5DIV6,     /**< Code rate 5/6nd.      */
-               DRX_CODERATE_7DIV8,     /**< Code rate 7/8nd.      */
+               DRX_CODERATE_1DIV2 = 0, /*< Code rate 1/2nd.      */
+               DRX_CODERATE_2DIV3,     /*< Code rate 2/3nd.      */
+               DRX_CODERATE_3DIV4,     /*< Code rate 3/4nd.      */
+               DRX_CODERATE_5DIV6,     /*< Code rate 5/6nd.      */
+               DRX_CODERATE_7DIV8,     /*< Code rate 7/8nd.      */
                DRX_CODERATE_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Code rate unknown.    */
+                                       /*< Code rate unknown.    */
                DRX_CODERATE_AUTO = DRX_AUTO
-                                       /**< Autodetect code rate. */
+                                       /*< Autodetect code rate. */
 };
 
-/**
+/*
 * \enum enum drx_guard
 * \brief Guard interval of a channel.
 */
 enum drx_guard {
-       DRX_GUARD_1DIV32 = 0, /**< Guard interval 1/32nd.     */
-       DRX_GUARD_1DIV16,     /**< Guard interval 1/16th.     */
-       DRX_GUARD_1DIV8,      /**< Guard interval 1/8th.      */
-       DRX_GUARD_1DIV4,      /**< Guard interval 1/4th.      */
+       DRX_GUARD_1DIV32 = 0, /*< Guard interval 1/32nd.     */
+       DRX_GUARD_1DIV16,     /*< Guard interval 1/16th.     */
+       DRX_GUARD_1DIV8,      /*< Guard interval 1/8th.      */
+       DRX_GUARD_1DIV4,      /*< Guard interval 1/4th.      */
        DRX_GUARD_UNKNOWN = DRX_UNKNOWN,
-                               /**< Guard interval unknown.    */
+                               /*< Guard interval unknown.    */
        DRX_GUARD_AUTO = DRX_AUTO
-                               /**< Autodetect guard interval. */
+                               /*< Autodetect guard interval. */
 };
 
-/**
+/*
 * \enum enum drx_fft_mode
 * \brief FFT mode.
 */
 enum drx_fft_mode {
-       DRX_FFTMODE_2K = 0,    /**< 2K FFT mode.         */
-       DRX_FFTMODE_4K,        /**< 4K FFT mode.         */
-       DRX_FFTMODE_8K,        /**< 8K FFT mode.         */
+       DRX_FFTMODE_2K = 0,    /*< 2K FFT mode.         */
+       DRX_FFTMODE_4K,        /*< 4K FFT mode.         */
+       DRX_FFTMODE_8K,        /*< 8K FFT mode.         */
        DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
-                               /**< FFT mode unknown.    */
+                               /*< FFT mode unknown.    */
        DRX_FFTMODE_AUTO = DRX_AUTO
-                               /**< Autodetect FFT mode. */
+                               /*< Autodetect FFT mode. */
 };
 
-/**
+/*
 * \enum enum drx_classification
 * \brief Channel classification.
 */
 enum drx_classification {
-       DRX_CLASSIFICATION_GAUSS = 0, /**< Gaussion noise.            */
-       DRX_CLASSIFICATION_HVY_GAUSS, /**< Heavy Gaussion noise.      */
-       DRX_CLASSIFICATION_COCHANNEL, /**< Co-channel.                */
-       DRX_CLASSIFICATION_STATIC,    /**< Static echo.               */
-       DRX_CLASSIFICATION_MOVING,    /**< Moving echo.               */
-       DRX_CLASSIFICATION_ZERODB,    /**< Zero dB echo.              */
+       DRX_CLASSIFICATION_GAUSS = 0, /*< Gaussion noise.            */
+       DRX_CLASSIFICATION_HVY_GAUSS, /*< Heavy Gaussion noise.      */
+       DRX_CLASSIFICATION_COCHANNEL, /*< Co-channel.                */
+       DRX_CLASSIFICATION_STATIC,    /*< Static echo.               */
+       DRX_CLASSIFICATION_MOVING,    /*< Moving echo.               */
+       DRX_CLASSIFICATION_ZERODB,    /*< Zero dB echo.              */
        DRX_CLASSIFICATION_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Unknown classification     */
+                                       /*< Unknown classification     */
        DRX_CLASSIFICATION_AUTO = DRX_AUTO
-                                       /**< Autodetect classification. */
+                                       /*< Autodetect classification. */
 };
 
-/**
+/*
 * /enum enum drx_interleave_mode
 * /brief Interleave modes
 */
@@ -673,80 +673,80 @@ enum drx_interleave_mode {
        DRX_INTERLEAVEMODE_B52_M48,
        DRX_INTERLEAVEMODE_B52_M0,
        DRX_INTERLEAVEMODE_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Unknown interleave mode    */
+                                       /*< Unknown interleave mode    */
        DRX_INTERLEAVEMODE_AUTO = DRX_AUTO
-                                       /**< Autodetect interleave mode */
+                                       /*< Autodetect interleave mode */
 };
 
-/**
+/*
 * \enum enum drx_carrier_mode
 * \brief Channel Carrier Mode.
 */
 enum drx_carrier_mode {
-       DRX_CARRIER_MULTI = 0,          /**< Multi carrier mode       */
-       DRX_CARRIER_SINGLE,             /**< Single carrier mode      */
+       DRX_CARRIER_MULTI = 0,          /*< Multi carrier mode       */
+       DRX_CARRIER_SINGLE,             /*< Single carrier mode      */
        DRX_CARRIER_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Carrier mode unknown.    */
-       DRX_CARRIER_AUTO = DRX_AUTO     /**< Autodetect carrier mode  */
+                                       /*< Carrier mode unknown.    */
+       DRX_CARRIER_AUTO = DRX_AUTO     /*< Autodetect carrier mode  */
 };
 
-/**
+/*
 * \enum enum drx_frame_mode
 * \brief Channel Frame Mode.
 */
 enum drx_frame_mode {
-       DRX_FRAMEMODE_420 = 0,   /**< 420 with variable PN  */
-       DRX_FRAMEMODE_595,       /**< 595                   */
-       DRX_FRAMEMODE_945,       /**< 945 with variable PN  */
+       DRX_FRAMEMODE_420 = 0,   /*< 420 with variable PN  */
+       DRX_FRAMEMODE_595,       /*< 595                   */
+       DRX_FRAMEMODE_945,       /*< 945 with variable PN  */
        DRX_FRAMEMODE_420_FIXED_PN,
-                                       /**< 420 with fixed PN     */
+                                       /*< 420 with fixed PN     */
        DRX_FRAMEMODE_945_FIXED_PN,
-                                       /**< 945 with fixed PN     */
+                                       /*< 945 with fixed PN     */
        DRX_FRAMEMODE_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Frame mode unknown.   */
+                                       /*< Frame mode unknown.   */
        DRX_FRAMEMODE_AUTO = DRX_AUTO
-                                       /**< Autodetect frame mode */
+                                       /*< Autodetect frame mode */
 };
 
-/**
+/*
 * \enum enum drx_tps_frame
 * \brief Frame number in current super-frame.
 */
 enum drx_tps_frame {
-       DRX_TPS_FRAME1 = 0,       /**< TPS frame 1.       */
-       DRX_TPS_FRAME2,           /**< TPS frame 2.       */
-       DRX_TPS_FRAME3,           /**< TPS frame 3.       */
-       DRX_TPS_FRAME4,           /**< TPS frame 4.       */
+       DRX_TPS_FRAME1 = 0,       /*< TPS frame 1.       */
+       DRX_TPS_FRAME2,           /*< TPS frame 2.       */
+       DRX_TPS_FRAME3,           /*< TPS frame 3.       */
+       DRX_TPS_FRAME4,           /*< TPS frame 4.       */
        DRX_TPS_FRAME_UNKNOWN = DRX_UNKNOWN
-                                       /**< TPS frame unknown. */
+                                       /*< TPS frame unknown. */
 };
 
-/**
+/*
 * \enum enum drx_ldpc
 * \brief TPS LDPC .
 */
 enum drx_ldpc {
-       DRX_LDPC_0_4 = 0,         /**< LDPC 0.4           */
-       DRX_LDPC_0_6,             /**< LDPC 0.6           */
-       DRX_LDPC_0_8,             /**< LDPC 0.8           */
+       DRX_LDPC_0_4 = 0,         /*< LDPC 0.4           */
+       DRX_LDPC_0_6,             /*< LDPC 0.6           */
+       DRX_LDPC_0_8,             /*< LDPC 0.8           */
        DRX_LDPC_UNKNOWN = DRX_UNKNOWN,
-                                       /**< LDPC unknown.      */
-       DRX_LDPC_AUTO = DRX_AUTO  /**< Autodetect LDPC    */
+                                       /*< LDPC unknown.      */
+       DRX_LDPC_AUTO = DRX_AUTO  /*< Autodetect LDPC    */
 };
 
-/**
+/*
 * \enum enum drx_pilot_mode
 * \brief Pilot modes in DTMB.
 */
 enum drx_pilot_mode {
-       DRX_PILOT_ON = 0,         /**< Pilot On             */
-       DRX_PILOT_OFF,            /**< Pilot Off            */
+       DRX_PILOT_ON = 0,         /*< Pilot On             */
+       DRX_PILOT_OFF,            /*< Pilot Off            */
        DRX_PILOT_UNKNOWN = DRX_UNKNOWN,
-                                       /**< Pilot unknown.       */
-       DRX_PILOT_AUTO = DRX_AUTO /**< Autodetect Pilot     */
+                                       /*< Pilot unknown.       */
+       DRX_PILOT_AUTO = DRX_AUTO /*< Autodetect Pilot     */
 };
 
-/**
+/*
  * enum drxu_code_action - indicate if firmware has to be uploaded or verified.
  * @UCODE_UPLOAD:      Upload the microcode image to device
  * @UCODE_VERIFY:      Compare microcode image with code on device
@@ -756,7 +756,7 @@ enum drxu_code_action {
        UCODE_VERIFY
 };
 
-/**
+/*
 * \enum enum drx_lock_status * \brief Used to reflect current lock status of demodulator.
 *
 * The generic lock states have device dependent semantics.
@@ -801,7 +801,7 @@ enum drx_lock_status {
        DRX_LOCKED
 };
 
-/**
+/*
 * \enum enum drx_uio* \brief Used to address a User IO (UIO).
 */
 enum drx_uio {
@@ -840,7 +840,7 @@ enum drx_uio {
        DRX_UIO_MAX = DRX_UIO32
 };
 
-/**
+/*
 * \enum enum drxuio_mode * \brief Used to configure the modus oprandi of a UIO.
 *
 * DRX_UIO_MODE_FIRMWARE is an old uio mode.
@@ -850,37 +850,37 @@ enum drx_uio {
 */
 enum drxuio_mode {
        DRX_UIO_MODE_DISABLE = 0x01,
-                           /**< not used, pin is configured as input */
+                           /*< not used, pin is configured as input */
        DRX_UIO_MODE_READWRITE = 0x02,
-                           /**< used for read/write by application   */
+                           /*< used for read/write by application   */
        DRX_UIO_MODE_FIRMWARE = 0x04,
-                           /**< controlled by firmware, function 0   */
+                           /*< controlled by firmware, function 0   */
        DRX_UIO_MODE_FIRMWARE0 = DRX_UIO_MODE_FIRMWARE,
-                                           /**< same as above        */
+                                           /*< same as above        */
        DRX_UIO_MODE_FIRMWARE1 = 0x08,
-                           /**< controlled by firmware, function 1   */
+                           /*< controlled by firmware, function 1   */
        DRX_UIO_MODE_FIRMWARE2 = 0x10,
-                           /**< controlled by firmware, function 2   */
+                           /*< controlled by firmware, function 2   */
        DRX_UIO_MODE_FIRMWARE3 = 0x20,
-                           /**< controlled by firmware, function 3   */
+                           /*< controlled by firmware, function 3   */
        DRX_UIO_MODE_FIRMWARE4 = 0x40,
-                           /**< controlled by firmware, function 4   */
+                           /*< controlled by firmware, function 4   */
        DRX_UIO_MODE_FIRMWARE5 = 0x80
-                           /**< controlled by firmware, function 5   */
+                           /*< controlled by firmware, function 5   */
 };
 
-/**
+/*
 * \enum enum drxoob_downstream_standard * \brief Used to select OOB standard.
 *
 * Based on ANSI 55-1 and 55-2
 */
 enum drxoob_downstream_standard {
        DRX_OOB_MODE_A = 0,
-                      /**< ANSI 55-1   */
+                      /*< ANSI 55-1   */
        DRX_OOB_MODE_B_GRADE_A,
-                      /**< ANSI 55-2 A */
+                      /*< ANSI 55-2 A */
        DRX_OOB_MODE_B_GRADE_B
-                      /**< ANSI 55-2 B */
+                      /*< ANSI 55-2 B */
 };
 
 /*-------------------------------------------------------------------------
@@ -924,7 +924,7 @@ STRUCTS
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
  * struct drxu_code_info       Parameters for microcode upload and verfiy.
  *
  * @mc_file:   microcode file name
@@ -935,7 +935,7 @@ struct drxu_code_info {
        char                    *mc_file;
 };
 
-/**
+/*
 * \struct drx_mc_version_rec_t
 * \brief Microcode version record
 * Version numbers are stored in BCD format, as usual:
@@ -963,7 +963,7 @@ struct drx_mc_version_rec {
 
 /*========================================*/
 
-/**
+/*
 * \struct drx_filter_info_t
 * \brief Parameters for loading filter coefficients
 *
@@ -971,18 +971,18 @@ struct drx_mc_version_rec {
 */
 struct drx_filter_info {
        u8 *data_re;
-             /**< pointer to coefficients for RE */
+             /*< pointer to coefficients for RE */
        u8 *data_im;
-             /**< pointer to coefficients for IM */
+             /*< pointer to coefficients for IM */
        u16 size_re;
-             /**< size of coefficients for RE    */
+             /*< size of coefficients for RE    */
        u16 size_im;
-             /**< size of coefficients for IM    */
+             /*< size of coefficients for IM    */
 };
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_channel * \brief The set of parameters describing a single channel.
 *
 * Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL.
@@ -991,29 +991,29 @@ struct drx_filter_info {
 */
 struct drx_channel {
        s32 frequency;
-                               /**< frequency in kHz                 */
+                               /*< frequency in kHz                 */
        enum drx_bandwidth bandwidth;
-                               /**< bandwidth                        */
-       enum drx_mirror mirror; /**< mirrored or not on RF            */
+                               /*< bandwidth                        */
+       enum drx_mirror mirror; /*< mirrored or not on RF            */
        enum drx_modulation constellation;
-                               /**< constellation                    */
+                               /*< constellation                    */
        enum drx_hierarchy hierarchy;
-                               /**< hierarchy                        */
-       enum drx_priority priority;     /**< priority                         */
-       enum drx_coderate coderate;     /**< coderate                         */
-       enum drx_guard guard;   /**< guard interval                   */
-       enum drx_fft_mode fftmode;      /**< fftmode                          */
+                               /*< hierarchy                        */
+       enum drx_priority priority;     /*< priority                         */
+       enum drx_coderate coderate;     /*< coderate                         */
+       enum drx_guard guard;   /*< guard interval                   */
+       enum drx_fft_mode fftmode;      /*< fftmode                          */
        enum drx_classification classification;
-                               /**< classification                   */
+                               /*< classification                   */
        u32 symbolrate;
-                               /**< symbolrate in symbols/sec        */
+                               /*< symbolrate in symbols/sec        */
        enum drx_interleave_mode interleavemode;
-                               /**< interleaveMode QAM               */
-       enum drx_ldpc ldpc;             /**< ldpc                             */
-       enum drx_carrier_mode carrier;  /**< carrier                          */
+                               /*< interleaveMode QAM               */
+       enum drx_ldpc ldpc;             /*< ldpc                             */
+       enum drx_carrier_mode carrier;  /*< carrier                          */
        enum drx_frame_mode framemode;
-                               /**< frame mode                       */
-       enum drx_pilot_mode pilot;      /**< pilot mode                       */
+                               /*< frame mode                       */
+       enum drx_pilot_mode pilot;      /*< pilot mode                       */
 };
 
 /*========================================*/
@@ -1027,74 +1027,74 @@ enum drx_cfg_sqi_speed {
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_complex * A complex number.
 *
 * Used by DRX_CTRL_CONSTEL.
 */
 struct drx_complex {
        s16 im;
-     /**< Imaginary part. */
+     /*< Imaginary part. */
        s16 re;
-     /**< Real part.      */
+     /*< Real part.      */
 };
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_frequency_plan * Array element of a frequency plan.
 *
 * Used by DRX_CTRL_SCAN_INIT.
 */
 struct drx_frequency_plan {
        s32 first;
-                    /**< First centre frequency in this band        */
+                    /*< First centre frequency in this band        */
        s32 last;
-                    /**< Last centre frequency in this band         */
+                    /*< Last centre frequency in this band         */
        s32 step;
-                    /**< Stepping frequency in this band            */
+                    /*< Stepping frequency in this band            */
        enum drx_bandwidth bandwidth;
-                    /**< Bandwidth within this frequency band       */
+                    /*< Bandwidth within this frequency band       */
        u16 ch_number;
-                    /**< First channel number in this band, or first
+                    /*< First channel number in this band, or first
                            index in ch_names                         */
        char **ch_names;
-                    /**< Optional list of channel names in this
+                    /*< Optional list of channel names in this
                            band                                     */
 };
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drx_scan_param * Parameters for channel scan.
 *
 * Used by DRX_CTRL_SCAN_INIT.
 */
 struct drx_scan_param {
        struct drx_frequency_plan *frequency_plan;
-                                 /**< Frequency plan (array)*/
-       u16 frequency_plan_size;  /**< Number of bands       */
-       u32 num_tries;            /**< Max channels tried    */
-       s32 skip;         /**< Minimum frequency step to take
+                                 /*< Frequency plan (array)*/
+       u16 frequency_plan_size;  /*< Number of bands       */
+       u32 num_tries;            /*< Max channels tried    */
+       s32 skip;         /*< Minimum frequency step to take
                                        after a channel is found */
-       void *ext_params;         /**< Standard specific params */
+       void *ext_params;         /*< Standard specific params */
 };
 
 /*========================================*/
 
-/**
+/*
 * \brief Scan commands.
 * Used by scanning algorithms.
 */
 enum drx_scan_command {
-               DRX_SCAN_COMMAND_INIT = 0,/**< Initialize scanning */
-               DRX_SCAN_COMMAND_NEXT,    /**< Next scan           */
-               DRX_SCAN_COMMAND_STOP     /**< Stop scanning       */
+               DRX_SCAN_COMMAND_INIT = 0,/*< Initialize scanning */
+               DRX_SCAN_COMMAND_NEXT,    /*< Next scan           */
+               DRX_SCAN_COMMAND_STOP     /*< Stop scanning       */
 };
 
 /*========================================*/
 
-/**
+/*
 * \brief Inner scan function prototype.
 */
 typedef int(*drx_scan_func_t) (void *scan_context,
@@ -1104,77 +1104,77 @@ typedef int(*drx_scan_func_t) (void *scan_context,
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drxtps_info * TPS information, DVB-T specific.
 *
 * Used by DRX_CTRL_TPS_INFO.
 */
        struct drxtps_info {
-               enum drx_fft_mode fftmode;      /**< Fft mode       */
-               enum drx_guard guard;   /**< Guard interval */
+               enum drx_fft_mode fftmode;      /*< Fft mode       */
+               enum drx_guard guard;   /*< Guard interval */
                enum drx_modulation constellation;
-                                       /**< Constellation  */
+                                       /*< Constellation  */
                enum drx_hierarchy hierarchy;
-                                       /**< Hierarchy      */
+                                       /*< Hierarchy      */
                enum drx_coderate high_coderate;
-                                       /**< High code rate */
+                                       /*< High code rate */
                enum drx_coderate low_coderate;
-                                       /**< Low cod rate   */
-               enum drx_tps_frame frame;       /**< Tps frame      */
-               u8 length;              /**< Length         */
-               u16 cell_id;            /**< Cell id        */
+                                       /*< Low cod rate   */
+               enum drx_tps_frame frame;       /*< Tps frame      */
+               u8 length;              /*< Length         */
+               u16 cell_id;            /*< Cell id        */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Power mode of device.
 *
 * Used by DRX_CTRL_SET_POWER_MODE.
 */
        enum drx_power_mode {
                DRX_POWER_UP = 0,
-                        /**< Generic         , Power Up Mode   */
+                        /*< Generic         , Power Up Mode   */
                DRX_POWER_MODE_1,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_2,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_3,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_4,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_5,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_6,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_7,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
                DRX_POWER_MODE_8,
-                        /**< Device specific , Power Up Mode   */
+                        /*< Device specific , Power Up Mode   */
 
                DRX_POWER_MODE_9,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_10,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_11,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_12,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_13,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_14,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_15,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_MODE_16,
-                        /**< Device specific , Power Down Mode */
+                        /*< Device specific , Power Down Mode */
                DRX_POWER_DOWN = 255
-                        /**< Generic         , Power Down Mode */
+                        /*< Generic         , Power Down Mode */
        };
 
 /*========================================*/
 
-/**
+/*
 * \enum enum drx_module * \brief Software module identification.
 *
 * Used by DRX_CTRL_VERSION.
@@ -1191,93 +1191,93 @@ typedef int(*drx_scan_func_t) (void *scan_context,
                DRX_MODULE_UNKNOWN
        };
 
-/**
+/*
 * \enum struct drx_version * \brief Version information of one software module.
 *
 * Used by DRX_CTRL_VERSION.
 */
        struct drx_version {
                enum drx_module module_type;
-                              /**< Type identifier of the module */
+                              /*< Type identifier of the module */
                char *module_name;
-                              /**< Name or description of module */
-               u16 v_major;  /**< Major version number          */
-               u16 v_minor;  /**< Minor version number          */
-               u16 v_patch;  /**< Patch version number          */
-               char *v_string; /**< Version as text string        */
+                              /*< Name or description of module */
+               u16 v_major;  /*< Major version number          */
+               u16 v_minor;  /*< Minor version number          */
+               u16 v_patch;  /*< Patch version number          */
+               char *v_string; /*< Version as text string        */
        };
 
-/**
+/*
 * \enum struct drx_version_list * \brief List element of NULL terminated, linked list for version information.
 *
 * Used by DRX_CTRL_VERSION.
 */
 struct drx_version_list {
-       struct drx_version *version;/**< Version information */
+       struct drx_version *version;/*< Version information */
        struct drx_version_list *next;
-                             /**< Next list element   */
+                             /*< Next list element   */
 };
 
 /*========================================*/
 
-/**
+/*
 * \brief Parameters needed to confiugure a UIO.
 *
 * Used by DRX_CTRL_UIO_CFG.
 */
        struct drxuio_cfg {
                enum drx_uio uio;
-                      /**< UIO identifier       */
+                      /*< UIO identifier       */
                enum drxuio_mode mode;
-                      /**< UIO operational mode */
+                      /*< UIO operational mode */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Parameters needed to read from or write to a UIO.
 *
 * Used by DRX_CTRL_UIO_READ and DRX_CTRL_UIO_WRITE.
 */
        struct drxuio_data {
                enum drx_uio uio;
-                  /**< UIO identifier              */
+                  /*< UIO identifier              */
                bool value;
-                  /**< UIO value (true=1, false=0) */
+                  /*< UIO value (true=1, false=0) */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Parameters needed to configure OOB.
 *
 * Used by DRX_CTRL_SET_OOB.
 */
        struct drxoob {
-               s32 frequency;     /**< Frequency in kHz      */
+               s32 frequency;     /*< Frequency in kHz      */
                enum drxoob_downstream_standard standard;
-                                                  /**< OOB standard          */
-               bool spectrum_inverted;    /**< If true, then spectrum
+                                                  /*< OOB standard          */
+               bool spectrum_inverted;    /*< If true, then spectrum
                                                         is inverted          */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Metrics from OOB.
 *
 * Used by DRX_CTRL_GET_OOB.
 */
        struct drxoob_status {
-               s32 frequency; /**< Frequency in Khz         */
-               enum drx_lock_status lock;        /**< Lock status              */
-               u32 mer;                  /**< MER                      */
-               s32 symbol_rate_offset;   /**< Symbolrate offset in ppm */
+               s32 frequency; /*< Frequency in Khz         */
+               enum drx_lock_status lock;        /*< Lock status              */
+               u32 mer;                  /*< MER                      */
+               s32 symbol_rate_offset;   /*< Symbolrate offset in ppm */
        };
 
 /*========================================*/
 
-/**
+/*
 * \brief Device dependent configuration data.
 *
 * Used by DRX_CTRL_SET_CFG and DRX_CTRL_GET_CFG.
@@ -1285,14 +1285,14 @@ struct drx_version_list {
 */
        struct drx_cfg {
                u32 cfg_type;
-                         /**< Function identifier */
+                         /*< Function identifier */
                void *cfg_data;
-                         /**< Function data */
+                         /*< Function data */
        };
 
 /*========================================*/
 
-/**
+/*
 * /struct DRXMpegStartWidth_t
 * MStart width [nr MCLK cycles] for serial MPEG output.
 */
@@ -1303,7 +1303,7 @@ struct drx_version_list {
        };
 
 /* CTRL CFG MPEG output */
-/**
+/*
 * \struct struct drx_cfg_mpeg_output * \brief Configuration parameters for MPEG output control.
 *
 * Used by DRX_CFG_MPEG_OUTPUT, in combination with DRX_CTRL_SET_CFG and
@@ -1311,29 +1311,29 @@ struct drx_version_list {
 */
 
        struct drx_cfg_mpeg_output {
-               bool enable_mpeg_output;/**< If true, enable MPEG output      */
-               bool insert_rs_byte;    /**< If true, insert RS byte          */
-               bool enable_parallel;   /**< If true, parallel out otherwise
+               bool enable_mpeg_output;/*< If true, enable MPEG output      */
+               bool insert_rs_byte;    /*< If true, insert RS byte          */
+               bool enable_parallel;   /*< If true, parallel out otherwise
                                                                     serial   */
-               bool invert_data;       /**< If true, invert DATA signals     */
-               bool invert_err;        /**< If true, invert ERR signal       */
-               bool invert_str;        /**< If true, invert STR signals      */
-               bool invert_val;        /**< If true, invert VAL signals      */
-               bool invert_clk;        /**< If true, invert CLK signals      */
-               bool static_clk;        /**< If true, static MPEG clockrate
+               bool invert_data;       /*< If true, invert DATA signals     */
+               bool invert_err;        /*< If true, invert ERR signal       */
+               bool invert_str;        /*< If true, invert STR signals      */
+               bool invert_val;        /*< If true, invert VAL signals      */
+               bool invert_clk;        /*< If true, invert CLK signals      */
+               bool static_clk;        /*< If true, static MPEG clockrate
                                             will be used, otherwise clockrate
                                             will adapt to the bitrate of the
                                             TS                               */
-               u32 bitrate;            /**< Maximum bitrate in b/s in case
+               u32 bitrate;            /*< Maximum bitrate in b/s in case
                                             static clockrate is selected     */
                enum drxmpeg_str_width width_str;
-                                       /**< MPEG start width                 */
+                                       /*< MPEG start width                 */
        };
 
 
 /*========================================*/
 
-/**
+/*
 * \struct struct drxi2c_data * \brief Data for I2C via 2nd or 3rd or etc I2C port.
 *
 * Used by DRX_CTRL_I2C_READWRITE.
@@ -1341,187 +1341,187 @@ struct drx_version_list {
 *
 */
        struct drxi2c_data {
-               u16 port_nr;    /**< I2C port number               */
+               u16 port_nr;    /*< I2C port number               */
                struct i2c_device_addr *w_dev_addr;
-                               /**< Write device address          */
-               u16 w_count;    /**< Size of write data in bytes   */
-               u8 *wData;      /**< Pointer to write data         */
+                               /*< Write device address          */
+               u16 w_count;    /*< Size of write data in bytes   */
+               u8 *wData;      /*< Pointer to write data         */
                struct i2c_device_addr *r_dev_addr;
-                               /**< Read device address           */
-               u16 r_count;    /**< Size of data to read in bytes */
-               u8 *r_data;     /**< Pointer to read buffer        */
+                               /*< Read device address           */
+               u16 r_count;    /*< Size of data to read in bytes */
+               u8 *r_data;     /*< Pointer to read buffer        */
        };
 
 /*========================================*/
 
-/**
+/*
 * \enum enum drx_aud_standard * \brief Audio standard identifier.
 *
 * Used by DRX_CTRL_SET_AUD.
 */
        enum drx_aud_standard {
-               DRX_AUD_STANDARD_BTSC,     /**< set BTSC standard (USA)       */
-               DRX_AUD_STANDARD_A2,       /**< set A2-Korea FM Stereo        */
-               DRX_AUD_STANDARD_EIAJ,     /**< set to Japanese FM Stereo     */
-               DRX_AUD_STANDARD_FM_STEREO,/**< set to FM-Stereo Radio        */
-               DRX_AUD_STANDARD_M_MONO,   /**< for 4.5 MHz mono detected     */
-               DRX_AUD_STANDARD_D_K_MONO, /**< for 6.5 MHz mono detected     */
-               DRX_AUD_STANDARD_BG_FM,    /**< set BG_FM standard            */
-               DRX_AUD_STANDARD_D_K1,     /**< set D_K1 standard             */
-               DRX_AUD_STANDARD_D_K2,     /**< set D_K2 standard             */
-               DRX_AUD_STANDARD_D_K3,     /**< set D_K3 standard             */
+               DRX_AUD_STANDARD_BTSC,     /*< set BTSC standard (USA)       */
+               DRX_AUD_STANDARD_A2,       /*< set A2-Korea FM Stereo        */
+               DRX_AUD_STANDARD_EIAJ,     /*< set to Japanese FM Stereo     */
+               DRX_AUD_STANDARD_FM_STEREO,/*< set to FM-Stereo Radio        */
+               DRX_AUD_STANDARD_M_MONO,   /*< for 4.5 MHz mono detected     */
+               DRX_AUD_STANDARD_D_K_MONO, /*< for 6.5 MHz mono detected     */
+               DRX_AUD_STANDARD_BG_FM,    /*< set BG_FM standard            */
+               DRX_AUD_STANDARD_D_K1,     /*< set D_K1 standard             */
+               DRX_AUD_STANDARD_D_K2,     /*< set D_K2 standard             */
+               DRX_AUD_STANDARD_D_K3,     /*< set D_K3 standard             */
                DRX_AUD_STANDARD_BG_NICAM_FM,
-                                          /**< set BG_NICAM_FM standard      */
+                                          /*< set BG_NICAM_FM standard      */
                DRX_AUD_STANDARD_L_NICAM_AM,
-                                          /**< set L_NICAM_AM standard       */
+                                          /*< set L_NICAM_AM standard       */
                DRX_AUD_STANDARD_I_NICAM_FM,
-                                          /**< set I_NICAM_FM standard       */
+                                          /*< set I_NICAM_FM standard       */
                DRX_AUD_STANDARD_D_K_NICAM_FM,
-                                          /**< set D_K_NICAM_FM standard     */
-               DRX_AUD_STANDARD_NOT_READY,/**< used to detect audio standard */
+                                          /*< set D_K_NICAM_FM standard     */
+               DRX_AUD_STANDARD_NOT_READY,/*< used to detect audio standard */
                DRX_AUD_STANDARD_AUTO = DRX_AUTO,
-                                          /**< Automatic Standard Detection  */
+                                          /*< Automatic Standard Detection  */
                DRX_AUD_STANDARD_UNKNOWN = DRX_UNKNOWN
-                                          /**< used as auto and for readback */
+                                          /*< used as auto and for readback */
        };
 
 /* CTRL_AUD_GET_STATUS    - struct drx_aud_status */
-/**
+/*
 * \enum enum drx_aud_nicam_status * \brief Status of NICAM carrier.
 */
        enum drx_aud_nicam_status {
                DRX_AUD_NICAM_DETECTED = 0,
-                                         /**< NICAM carrier detected         */
+                                         /*< NICAM carrier detected         */
                DRX_AUD_NICAM_NOT_DETECTED,
-                                         /**< NICAM carrier not detected     */
-               DRX_AUD_NICAM_BAD         /**< NICAM carrier bad quality      */
+                                         /*< NICAM carrier not detected     */
+               DRX_AUD_NICAM_BAD         /*< NICAM carrier bad quality      */
        };
 
-/**
+/*
 * \struct struct drx_aud_status * \brief Audio status characteristics.
 */
        struct drx_aud_status {
-               bool stereo;              /**< stereo detection               */
-               bool carrier_a;   /**< carrier A detected             */
-               bool carrier_b;   /**< carrier B detected             */
-               bool sap;                 /**< sap / bilingual detection      */
-               bool rds;                 /**< RDS data array present         */
+               bool stereo;              /*< stereo detection               */
+               bool carrier_a;   /*< carrier A detected             */
+               bool carrier_b;   /*< carrier B detected             */
+               bool sap;                 /*< sap / bilingual detection      */
+               bool rds;                 /*< RDS data array present         */
                enum drx_aud_nicam_status nicam_status;
-                                         /**< status of NICAM carrier        */
-               s8 fm_ident;              /**< FM Identification value        */
+                                         /*< status of NICAM carrier        */
+               s8 fm_ident;              /*< FM Identification value        */
        };
 
 /* CTRL_AUD_READ_RDS       - DRXRDSdata_t */
 
-/**
+/*
 * \struct DRXRDSdata_t
 * \brief Raw RDS data array.
 */
        struct drx_cfg_aud_rds {
-               bool valid;               /**< RDS data validation            */
-               u16 data[18];             /**< data from one RDS data array   */
+               bool valid;               /*< RDS data validation            */
+               u16 data[18];             /*< data from one RDS data array   */
        };
 
 /* DRX_CFG_AUD_VOLUME      - struct drx_cfg_aud_volume - set/get */
-/**
+/*
 * \enum DRXAudAVCDecayTime_t
 * \brief Automatic volume control configuration.
 */
        enum drx_aud_avc_mode {
-               DRX_AUD_AVC_OFF,          /**< Automatic volume control off   */
-               DRX_AUD_AVC_DECAYTIME_8S, /**< level volume in  8 seconds     */
-               DRX_AUD_AVC_DECAYTIME_4S, /**< level volume in  4 seconds     */
-               DRX_AUD_AVC_DECAYTIME_2S, /**< level volume in  2 seconds     */
-               DRX_AUD_AVC_DECAYTIME_20MS/**< level volume in 20 millisec    */
+               DRX_AUD_AVC_OFF,          /*< Automatic volume control off   */
+               DRX_AUD_AVC_DECAYTIME_8S, /*< level volume in  8 seconds     */
+               DRX_AUD_AVC_DECAYTIME_4S, /*< level volume in  4 seconds     */
+               DRX_AUD_AVC_DECAYTIME_2S, /*< level volume in  2 seconds     */
+               DRX_AUD_AVC_DECAYTIME_20MS/*< level volume in 20 millisec    */
        };
 
-/**
+/*
 * /enum DRXAudMaxAVCGain_t
 * /brief Automatic volume control max gain in audio baseband.
 */
        enum drx_aud_avc_max_gain {
-               DRX_AUD_AVC_MAX_GAIN_0DB, /**< maximum AVC gain  0 dB         */
-               DRX_AUD_AVC_MAX_GAIN_6DB, /**< maximum AVC gain  6 dB         */
-               DRX_AUD_AVC_MAX_GAIN_12DB /**< maximum AVC gain 12 dB         */
+               DRX_AUD_AVC_MAX_GAIN_0DB, /*< maximum AVC gain  0 dB         */
+               DRX_AUD_AVC_MAX_GAIN_6DB, /*< maximum AVC gain  6 dB         */
+               DRX_AUD_AVC_MAX_GAIN_12DB /*< maximum AVC gain 12 dB         */
        };
 
-/**
+/*
 * /enum DRXAudMaxAVCAtten_t
 * /brief Automatic volume control max attenuation in audio baseband.
 */
        enum drx_aud_avc_max_atten {
                DRX_AUD_AVC_MAX_ATTEN_12DB,
-                                         /**< maximum AVC attenuation 12 dB  */
+                                         /*< maximum AVC attenuation 12 dB  */
                DRX_AUD_AVC_MAX_ATTEN_18DB,
-                                         /**< maximum AVC attenuation 18 dB  */
-               DRX_AUD_AVC_MAX_ATTEN_24DB/**< maximum AVC attenuation 24 dB  */
+                                         /*< maximum AVC attenuation 18 dB  */
+               DRX_AUD_AVC_MAX_ATTEN_24DB/*< maximum AVC attenuation 24 dB  */
        };
-/**
+/*
 * \struct struct drx_cfg_aud_volume * \brief Audio volume configuration.
 */
        struct drx_cfg_aud_volume {
-               bool mute;                /**< mute overrides volume setting  */
-               s16 volume;               /**< volume, range -114 to 12 dB    */
-               enum drx_aud_avc_mode avc_mode;  /**< AVC auto volume control mode   */
-               u16 avc_ref_level;        /**< AVC reference level            */
+               bool mute;                /*< mute overrides volume setting  */
+               s16 volume;               /*< volume, range -114 to 12 dB    */
+               enum drx_aud_avc_mode avc_mode;  /*< AVC auto volume control mode   */
+               u16 avc_ref_level;        /*< AVC reference level            */
                enum drx_aud_avc_max_gain avc_max_gain;
-                                         /**< AVC max gain selection         */
+                                         /*< AVC max gain selection         */
                enum drx_aud_avc_max_atten avc_max_atten;
-                                         /**< AVC max attenuation selection  */
-               s16 strength_left;        /**< quasi-peak, left speaker       */
-               s16 strength_right;       /**< quasi-peak, right speaker      */
+                                         /*< AVC max attenuation selection  */
+               s16 strength_left;        /*< quasi-peak, left speaker       */
+               s16 strength_right;       /*< quasi-peak, right speaker      */
        };
 
 /* DRX_CFG_I2S_OUTPUT      - struct drx_cfg_i2s_output - set/get */
-/**
+/*
 * \enum enum drxi2s_mode * \brief I2S output mode.
 */
        enum drxi2s_mode {
-               DRX_I2S_MODE_MASTER,      /**< I2S is in master mode          */
-               DRX_I2S_MODE_SLAVE        /**< I2S is in slave mode           */
+               DRX_I2S_MODE_MASTER,      /*< I2S is in master mode          */
+               DRX_I2S_MODE_SLAVE        /*< I2S is in slave mode           */
        };
 
-/**
+/*
 * \enum enum drxi2s_word_length * \brief Width of I2S data.
 */
        enum drxi2s_word_length {
-               DRX_I2S_WORDLENGTH_32 = 0,/**< I2S data is 32 bit wide        */
-               DRX_I2S_WORDLENGTH_16 = 1 /**< I2S data is 16 bit wide        */
+               DRX_I2S_WORDLENGTH_32 = 0,/*< I2S data is 32 bit wide        */
+               DRX_I2S_WORDLENGTH_16 = 1 /*< I2S data is 16 bit wide        */
        };
 
-/**
+/*
 * \enum enum drxi2s_format * \brief Data wordstrobe alignment for I2S.
 */
        enum drxi2s_format {
                DRX_I2S_FORMAT_WS_WITH_DATA,
-                                   /**< I2S data and wordstrobe are aligned  */
+                                   /*< I2S data and wordstrobe are aligned  */
                DRX_I2S_FORMAT_WS_ADVANCED
-                                   /**< I2S data one cycle after wordstrobe  */
+                                   /*< I2S data one cycle after wordstrobe  */
        };
 
-/**
+/*
 * \enum enum drxi2s_polarity * \brief Polarity of I2S data.
 */
        enum drxi2s_polarity {
-               DRX_I2S_POLARITY_RIGHT,/**< wordstrobe - right high, left low */
-               DRX_I2S_POLARITY_LEFT  /**< wordstrobe - right low, left high */
+               DRX_I2S_POLARITY_RIGHT,/*< wordstrobe - right high, left low */
+               DRX_I2S_POLARITY_LEFT  /*< wordstrobe - right low, left high */
        };
 
-/**
+/*
 * \struct struct drx_cfg_i2s_output * \brief I2S output configuration.
 */
        struct drx_cfg_i2s_output {
-               bool output_enable;       /**< I2S output enable              */
-               u32 frequency;    /**< range from 8000-48000 Hz       */
-               enum drxi2s_mode mode;    /**< I2S mode, master or slave      */
+               bool output_enable;       /*< I2S output enable              */
+               u32 frequency;    /*< range from 8000-48000 Hz       */
+               enum drxi2s_mode mode;    /*< I2S mode, master or slave      */
                enum drxi2s_word_length word_length;
-                                         /**< I2S wordlength, 16 or 32 bits  */
-               enum drxi2s_polarity polarity;/**< I2S wordstrobe polarity        */
-               enum drxi2s_format format;        /**< I2S wordstrobe delay to data   */
+                                         /*< I2S wordlength, 16 or 32 bits  */
+               enum drxi2s_polarity polarity;/*< I2S wordstrobe polarity        */
+               enum drxi2s_format format;        /*< I2S wordstrobe delay to data   */
        };
 
 /* ------------------------------expert interface-----------------------------*/
-/**
+/*
 * /enum enum drx_aud_fm_deemphasis * setting for FM-Deemphasis in audio demodulator.
 *
 */
@@ -1531,7 +1531,7 @@ struct drx_version_list {
                DRX_AUD_FM_DEEMPH_OFF
        };
 
-/**
+/*
 * /enum DRXAudDeviation_t
 * setting for deviation mode in audio demodulator.
 *
@@ -1541,7 +1541,7 @@ struct drx_version_list {
                DRX_AUD_DEVIATION_HIGH
        };
 
-/**
+/*
 * /enum enum drx_no_carrier_option * setting for carrier, mute/noise.
 *
 */
@@ -1550,7 +1550,7 @@ struct drx_version_list {
                DRX_NO_CARRIER_NOISE
        };
 
-/**
+/*
 * \enum DRXAudAutoSound_t
 * \brief Automatic Sound
 */
@@ -1560,7 +1560,7 @@ struct drx_version_list {
                DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_OFF
        };
 
-/**
+/*
 * \enum DRXAudASSThres_t
 * \brief Automatic Sound Select Thresholds
 */
@@ -1570,7 +1570,7 @@ struct drx_version_list {
                u16 nicam;      /* Nicam Threshold for ASS configuration */
        };
 
-/**
+/*
 * \struct struct drx_aud_carrier * \brief Carrier detection related parameters
 */
        struct drx_aud_carrier {
@@ -1580,7 +1580,7 @@ struct drx_version_list {
                s32 dco;        /* frequency adjustment (A) */
        };
 
-/**
+/*
 * \struct struct drx_cfg_aud_carriers * \brief combining carrier A & B to one struct
 */
        struct drx_cfg_aud_carriers {
@@ -1588,7 +1588,7 @@ struct drx_version_list {
                struct drx_aud_carrier b;
        };
 
-/**
+/*
 * /enum enum drx_aud_i2s_src * Selection of audio source
 */
        enum drx_aud_i2s_src {
@@ -1597,19 +1597,19 @@ struct drx_version_list {
                DRX_AUD_SRC_STEREO_OR_A,
                DRX_AUD_SRC_STEREO_OR_B};
 
-/**
+/*
 * \enum enum drx_aud_i2s_matrix * \brief Used for selecting I2S output.
 */
        enum drx_aud_i2s_matrix {
                DRX_AUD_I2S_MATRIX_A_MONO,
-                                       /**< A sound only, stereo or mono     */
+                                       /*< A sound only, stereo or mono     */
                DRX_AUD_I2S_MATRIX_B_MONO,
-                                       /**< B sound only, stereo or mono     */
+                                       /*< B sound only, stereo or mono     */
                DRX_AUD_I2S_MATRIX_STEREO,
-                                       /**< A+B sound, transparant           */
-               DRX_AUD_I2S_MATRIX_MONO /**< A+B mixed to mono sum, (L+R)/2   */};
+                                       /*< A+B sound, transparant           */
+               DRX_AUD_I2S_MATRIX_MONO /*< A+B mixed to mono sum, (L+R)/2   */};
 
-/**
+/*
 * /enum enum drx_aud_fm_matrix * setting for FM-Matrix in audio demodulator.
 *
 */
@@ -1620,7 +1620,7 @@ struct drx_version_list {
                DRX_AUD_FM_MATRIX_SOUND_A,
                DRX_AUD_FM_MATRIX_SOUND_B};
 
-/**
+/*
 * \struct DRXAudMatrices_t
 * \brief Mixer settings
 */
@@ -1630,22 +1630,22 @@ struct drx_cfg_aud_mixer {
        enum drx_aud_fm_matrix matrix_fm;
 };
 
-/**
+/*
 * \enum DRXI2SVidSync_t
 * \brief Audio/video synchronization, interacts with I2S mode.
 * AUTO_1 and AUTO_2 are for automatic video standard detection with preference
 * for NTSC or Monochrome, because the frequencies are too close (59.94 & 60 Hz)
 */
        enum drx_cfg_aud_av_sync {
-               DRX_AUD_AVSYNC_OFF,/**< audio/video synchronization is off   */
+               DRX_AUD_AVSYNC_OFF,/*< audio/video synchronization is off   */
                DRX_AUD_AVSYNC_NTSC,
-                                  /**< it is an NTSC system                 */
+                                  /*< it is an NTSC system                 */
                DRX_AUD_AVSYNC_MONOCHROME,
-                                  /**< it is a MONOCHROME system            */
+                                  /*< it is a MONOCHROME system            */
                DRX_AUD_AVSYNC_PAL_SECAM
-                                  /**< it is a PAL/SECAM system             */};
+                                  /*< it is a PAL/SECAM system             */};
 
-/**
+/*
 * \struct struct drx_cfg_aud_prescale * \brief Prescalers
 */
 struct drx_cfg_aud_prescale {
@@ -1653,7 +1653,7 @@ struct drx_cfg_aud_prescale {
        s16 nicam_gain;
 };
 
-/**
+/*
 * \struct struct drx_aud_beep * \brief Beep
 */
 struct drx_aud_beep {
@@ -1662,14 +1662,14 @@ struct drx_aud_beep {
        bool mute;
 };
 
-/**
+/*
 * \enum enum drx_aud_btsc_detect * \brief BTSC detetcion mode
 */
        enum drx_aud_btsc_detect {
                DRX_BTSC_STEREO,
                DRX_BTSC_MONO_AND_SAP};
 
-/**
+/*
 * \struct struct drx_aud_data * \brief Audio data structure
 */
 struct drx_aud_data {
@@ -1692,7 +1692,7 @@ struct drx_aud_data {
        bool rds_data_present;
 };
 
-/**
+/*
 * \enum enum drx_qam_lock_range * \brief QAM lock range mode
 */
        enum drx_qam_lock_range {
@@ -1782,7 +1782,7 @@ struct drx_aud_data {
                                                             u32 wdata, /* data to write               */
                                                             u32 *rdata);       /* data to read                */
 
-/**
+/*
 * \struct struct drx_access_func * \brief Interface to an access protocol.
 */
 struct drx_access_func {
@@ -1811,85 +1811,85 @@ struct drx_reg_dump {
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \struct struct drx_common_attr * \brief Set of common attributes, shared by all DRX devices.
 */
        struct drx_common_attr {
                /* Microcode (firmware) attributes */
-               char *microcode_file;   /**<  microcode filename           */
+               char *microcode_file;   /*<  microcode filename           */
                bool verify_microcode;
-                                  /**< Use microcode verify or not.          */
+                                  /*< Use microcode verify or not.          */
                struct drx_mc_version_rec mcversion;
-                                  /**< Version record of microcode from file */
+                                  /*< Version record of microcode from file */
 
                /* Clocks and tuner attributes */
                s32 intermediate_freq;
-                                    /**< IF,if tuner instance not used. (kHz)*/
+                                    /*< IF,if tuner instance not used. (kHz)*/
                s32 sys_clock_freq;
-                                    /**< Systemclock frequency.  (kHz)       */
+                                    /*< Systemclock frequency.  (kHz)       */
                s32 osc_clock_freq;
-                                    /**< Oscillator clock frequency.  (kHz)  */
+                                    /*< Oscillator clock frequency.  (kHz)  */
                s16 osc_clock_deviation;
-                                    /**< Oscillator clock deviation.  (ppm)  */
+                                    /*< Oscillator clock deviation.  (ppm)  */
                bool mirror_freq_spect;
-                                    /**< Mirror IF frequency spectrum or not.*/
+                                    /*< Mirror IF frequency spectrum or not.*/
 
                /* Initial MPEG output attributes */
                struct drx_cfg_mpeg_output mpeg_cfg;
-                                    /**< MPEG configuration                  */
+                                    /*< MPEG configuration                  */
 
-               bool is_opened;     /**< if true instance is already opened. */
+               bool is_opened;     /*< if true instance is already opened. */
 
                /* Channel scan */
                struct drx_scan_param *scan_param;
-                                     /**< scan parameters                    */
+                                     /*< scan parameters                    */
                u16 scan_freq_plan_index;
-                                     /**< next index in freq plan            */
+                                     /*< next index in freq plan            */
                s32 scan_next_frequency;
-                                     /**< next freq to scan                  */
-               bool scan_ready;     /**< scan ready flag                    */
-               u32 scan_max_channels;/**< number of channels in freqplan     */
+                                     /*< next freq to scan                  */
+               bool scan_ready;     /*< scan ready flag                    */
+               u32 scan_max_channels;/*< number of channels in freqplan     */
                u32 scan_channels_scanned;
-                                       /**< number of channels scanned       */
+                                       /*< number of channels scanned       */
                /* Channel scan - inner loop: demod related */
                drx_scan_func_t scan_function;
-                                     /**< function to check channel          */
+                                     /*< function to check channel          */
                /* Channel scan - inner loop: SYSObj related */
-               void *scan_context;    /**< Context Pointer of SYSObj          */
+               void *scan_context;    /*< Context Pointer of SYSObj          */
                /* Channel scan - parameters for default DTV scan function in core driver  */
                u16 scan_demod_lock_timeout;
-                                        /**< millisecs to wait for lock      */
+                                        /*< millisecs to wait for lock      */
                enum drx_lock_status scan_desired_lock;
-                                     /**< lock requirement for channel found */
+                                     /*< lock requirement for channel found */
                /* scan_active can be used by SetChannel to decide how to program the tuner,
                   fast or slow (but stable). Usually fast during scan. */
-               bool scan_active;    /**< true when scan routines are active */
+               bool scan_active;    /*< true when scan routines are active */
 
                /* Power management */
                enum drx_power_mode current_power_mode;
-                                     /**< current power management mode      */
+                                     /*< current power management mode      */
 
                /* Tuner */
-               u8 tuner_port_nr;     /**< nr of I2C port to wich tuner is    */
+               u8 tuner_port_nr;     /*< nr of I2C port to wich tuner is    */
                s32 tuner_min_freq_rf;
-                                     /**< minimum RF input frequency, in kHz */
+                                     /*< minimum RF input frequency, in kHz */
                s32 tuner_max_freq_rf;
-                                     /**< maximum RF input frequency, in kHz */
-               bool tuner_rf_agc_pol; /**< if true invert RF AGC polarity     */
-               bool tuner_if_agc_pol; /**< if true invert IF AGC polarity     */
-               bool tuner_slow_mode; /**< if true invert IF AGC polarity     */
+                                     /*< maximum RF input frequency, in kHz */
+               bool tuner_rf_agc_pol; /*< if true invert RF AGC polarity     */
+               bool tuner_if_agc_pol; /*< if true invert IF AGC polarity     */
+               bool tuner_slow_mode; /*< if true invert IF AGC polarity     */
 
                struct drx_channel current_channel;
-                                     /**< current channel parameters         */
+                                     /*< current channel parameters         */
                enum drx_standard current_standard;
-                                     /**< current standard selection         */
+                                     /*< current standard selection         */
                enum drx_standard prev_standard;
-                                     /**< previous standard selection        */
+                                     /*< previous standard selection        */
                enum drx_standard di_cache_standard;
-                                     /**< standard in DI cache if available  */
-               bool use_bootloader; /**< use bootloader in open             */
-               u32 capabilities;   /**< capabilities flags                 */
-               u32 product_id;      /**< product ID inc. metal fix number   */};
+                                     /*< standard in DI cache if available  */
+               bool use_bootloader; /*< use bootloader in open             */
+               u32 capabilities;   /*< capabilities flags                 */
+               u32 product_id;      /*< product ID inc. metal fix number   */};
 
 /*
 * Generic functions for DRX devices.
@@ -1897,16 +1897,16 @@ struct drx_reg_dump {
 
 struct drx_demod_instance;
 
-/**
+/*
 * \struct struct drx_demod_instance * \brief Top structure of demodulator instance.
 */
 struct drx_demod_instance {
-                               /**< data access protocol functions       */
+                               /*< data access protocol functions       */
        struct i2c_device_addr *my_i2c_dev_addr;
-                               /**< i2c address and device identifier    */
+                               /*< i2c address and device identifier    */
        struct drx_common_attr *my_common_attr;
-                               /**< common DRX attributes                */
-       void *my_ext_attr;    /**< device specific attributes           */
+                               /*< common DRX attributes                */
+       void *my_ext_attr;    /*< device specific attributes           */
        /* generic demodulator data */
 
        struct i2c_adapter      *i2c;
@@ -2195,7 +2195,7 @@ Conversion from enum values to human readable form.
 Access macros
 -------------------------------------------------------------------------*/
 
-/**
+/*
 * \brief Create a compilable reference to the microcode attribute
 * \param d pointer to demod instance
 *
@@ -2229,7 +2229,7 @@ Access macros
 #define DRX_ATTR_I2CDEVID(d)        ((d)->my_i2c_dev_addr->i2c_dev_id)
 #define DRX_ISMCVERTYPE(x) ((x) == AUX_VER_RECORD)
 
-/**************************/
+/*************************/
 
 /* Macros with device-specific handling are converted to CFG functions */
 
@@ -2285,7 +2285,7 @@ Access macros
 #define DRX_GET_QAM_LOCKRANGE(d, x) DRX_ACCESSMACRO_GET((d), (x), \
         DRX_XS_CFG_QAM_LOCKRANGE, enum drx_qam_lock_range, DRX_UNKNOWN)
 
-/**
+/*
 * \brief Macro to check if std is an ATV standard
 * \retval true std is an ATV standard
 * \retval false std is an ATV standard
@@ -2298,7 +2298,7 @@ Access macros
                              ((std) == DRX_STANDARD_NTSC) || \
                              ((std) == DRX_STANDARD_FM))
 
-/**
+/*
 * \brief Macro to check if std is an QAM standard
 * \retval true std is an QAM standards
 * \retval false std is an QAM standards
@@ -2308,14 +2308,14 @@ Access macros
                              ((std) == DRX_STANDARD_ITU_C) || \
                              ((std) == DRX_STANDARD_ITU_D))
 
-/**
+/*
 * \brief Macro to check if std is VSB standard
 * \retval true std is VSB standard
 * \retval false std is not VSB standard
 */
 #define DRX_ISVSBSTD(std) ((std) == DRX_STANDARD_8VSB)
 
-/**
+/*
 * \brief Macro to check if std is DVBT standard
 * \retval true std is DVBT standard
 * \retval false std is not DVBT standard
index 499ccff557bff41469c2ccc4b85fdcec8bff1d7b..8cbd8cc21059d8a94112608170e386ebcf7866d5 100644 (file)
@@ -73,7 +73,7 @@ INCLUDE FILES
 
 #define DRX39XX_MAIN_FIRMWARE "dvb-fe-drxj-mc-1.0.8.fw"
 
-/**
+/*
 * \brief Maximum u32 value.
 */
 #ifndef MAX_U32
@@ -100,8 +100,8 @@ INCLUDE FILES
 #ifndef OOB_DRX_DRIVE_STRENGTH
 #define OOB_DRX_DRIVE_STRENGTH 0x02
 #endif
-/**** START DJCOMBO patches to DRXJ registermap constants *********************/
-/**** registermap 200706071303 from drxj **************************************/
+/*** START DJCOMBO patches to DRXJ registermap constants *********************/
+/*** registermap 200706071303 from drxj **************************************/
 #define   ATV_TOP_CR_AMP_TH_FM                                              0x0
 #define   ATV_TOP_CR_AMP_TH_L                                               0xA
 #define   ATV_TOP_CR_AMP_TH_LP                                              0xA
@@ -188,7 +188,7 @@ INCLUDE FILES
 #define     IQM_RC_ADJ_SEL_B_OFF                                            0x0
 #define     IQM_RC_ADJ_SEL_B_QAM                                            0x1
 #define     IQM_RC_ADJ_SEL_B_VSB                                            0x2
-/**** END DJCOMBO patches to DRXJ registermap *********************************/
+/*** END DJCOMBO patches to DRXJ registermap *********************************/
 
 #include "drx_driver_version.h"
 
@@ -208,25 +208,25 @@ DEFINES
 #define DRXJ_WAKE_UP_KEY (demod->my_i2c_dev_addr->i2c_addr)
 #endif
 
-/**
+/*
 * \def DRXJ_DEF_I2C_ADDR
 * \brief Default I2C address of a demodulator instance.
 */
 #define DRXJ_DEF_I2C_ADDR (0x52)
 
-/**
+/*
 * \def DRXJ_DEF_DEMOD_DEV_ID
 * \brief Default device identifier of a demodultor instance.
 */
 #define DRXJ_DEF_DEMOD_DEV_ID      (1)
 
-/**
+/*
 * \def DRXJ_SCAN_TIMEOUT
 * \brief Timeout value for waiting on demod lock during channel scan (millisec).
 */
 #define DRXJ_SCAN_TIMEOUT    1000
 
-/**
+/*
 * \def HI_I2C_DELAY
 * \brief HI timing delay for I2C timing (in nano seconds)
 *
@@ -234,7 +234,7 @@ DEFINES
 */
 #define HI_I2C_DELAY    42
 
-/**
+/*
 * \def HI_I2C_BRIDGE_DELAY
 * \brief HI timing delay for I2C timing (in nano seconds)
 *
@@ -242,13 +242,13 @@ DEFINES
 */
 #define HI_I2C_BRIDGE_DELAY   750
 
-/**
+/*
 * \brief Time Window for MER and SER Measurement in Units of Segment duration.
 */
 #define VSB_TOP_MEASUREMENT_PERIOD  64
 #define SYMBOLS_PER_SEGMENT         832
 
-/**
+/*
 * \brief bit rate and segment rate constants used for SER and BER.
 */
 /* values taken from the QAM microcode */
@@ -260,21 +260,21 @@ DEFINES
 #define DRXJ_QAM_SL_SIG_POWER_QAM64       43008
 #define DRXJ_QAM_SL_SIG_POWER_QAM128      20992
 #define DRXJ_QAM_SL_SIG_POWER_QAM256      43520
-/**
+/*
 * \brief Min supported symbolrates.
 */
 #ifndef DRXJ_QAM_SYMBOLRATE_MIN
 #define DRXJ_QAM_SYMBOLRATE_MIN          (520000)
 #endif
 
-/**
+/*
 * \brief Max supported symbolrates.
 */
 #ifndef DRXJ_QAM_SYMBOLRATE_MAX
 #define DRXJ_QAM_SYMBOLRATE_MAX         (7233000)
 #endif
 
-/**
+/*
 * \def DRXJ_QAM_MAX_WAITTIME
 * \brief Maximal wait time for QAM auto constellation in ms
 */
@@ -290,7 +290,7 @@ DEFINES
 #define DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME 200
 #endif
 
-/**
+/*
 * \def SCU status and results
 * \brief SCU
 */
@@ -299,7 +299,7 @@ DEFINES
 #define FEC_RS_MEASUREMENT_PERIOD   12894      /* 1 sec */
 #define FEC_RS_MEASUREMENT_PRESCALE 1  /* n sec */
 
-/**
+/*
 * \def DRX_AUD_MAX_DEVIATION
 * \brief Needed for calculation of prescale feature in AUD
 */
@@ -307,14 +307,14 @@ DEFINES
 #define DRXJ_AUD_MAX_FM_DEVIATION  100 /* kHz */
 #endif
 
-/**
+/*
 * \brief Needed for calculation of NICAM prescale feature in AUD
 */
 #ifndef DRXJ_AUD_MAX_NICAM_PRESCALE
 #define DRXJ_AUD_MAX_NICAM_PRESCALE  (9)       /* dB */
 #endif
 
-/**
+/*
 * \brief Needed for calculation of NICAM prescale feature in AUD
 */
 #ifndef DRXJ_AUD_MAX_WAITTIME
@@ -371,21 +371,21 @@ DEFINES
 /*============================================================================*/
 /*=== GLOBAL VARIABLEs =======================================================*/
 /*============================================================================*/
-/**
+/*
 */
 
-/**
+/*
 * \brief Temporary register definitions.
 *        (register definitions that are not yet available in register master)
 */
 
-/******************************************************************************/
+/*****************************************************************************/
 /* Audio block 0x103 is write only. To avoid shadowing in driver accessing    */
 /* RAM adresses directly. This must be READ ONLY to avoid problems.           */
 /* Writing to the interface adresses is more than only writing the RAM        */
 /* locations                                                                  */
-/******************************************************************************/
-/**
+/*****************************************************************************/
+/*
 * \brief RAM location of MODUS registers
 */
 #define AUD_DEM_RAM_MODUS_HI__A              0x10204A3
@@ -394,13 +394,13 @@ DEFINES
 #define AUD_DEM_RAM_MODUS_LO__A              0x10204A4
 #define AUD_DEM_RAM_MODUS_LO__M              0x0FFF
 
-/**
+/*
 * \brief RAM location of I2S config registers
 */
 #define AUD_DEM_RAM_I2S_CONFIG1__A           0x10204B1
 #define AUD_DEM_RAM_I2S_CONFIG2__A           0x10204B2
 
-/**
+/*
 * \brief RAM location of DCO config registers
 */
 #define AUD_DEM_RAM_DCO_B_HI__A              0x1020461
@@ -408,20 +408,20 @@ DEFINES
 #define AUD_DEM_RAM_DCO_A_HI__A              0x1020463
 #define AUD_DEM_RAM_DCO_A_LO__A              0x1020464
 
-/**
+/*
 * \brief RAM location of Threshold registers
 */
 #define AUD_DEM_RAM_NICAM_THRSHLD__A         0x102045A
 #define AUD_DEM_RAM_A2_THRSHLD__A            0x10204BB
 #define AUD_DEM_RAM_BTSC_THRSHLD__A          0x10204A6
 
-/**
+/*
 * \brief RAM location of Carrier Threshold registers
 */
 #define AUD_DEM_RAM_CM_A_THRSHLD__A          0x10204AF
 #define AUD_DEM_RAM_CM_B_THRSHLD__A          0x10204B0
 
-/**
+/*
 * \brief FM Matrix register fix
 */
 #ifdef AUD_DEM_WR_FM_MATRIX__A
@@ -430,7 +430,7 @@ DEFINES
 #define AUD_DEM_WR_FM_MATRIX__A              0x105006F
 
 /*============================================================================*/
-/**
+/*
 * \brief Defines required for audio
 */
 #define AUD_VOLUME_ZERO_DB                      115
@@ -443,14 +443,14 @@ DEFINES
 #define AUD_I2S_FREQUENCY_MIN                   12000UL
 #define AUD_RDS_ARRAY_SIZE                      18
 
-/**
+/*
 * \brief Needed for calculation of prescale feature in AUD
 */
 #ifndef DRX_AUD_MAX_FM_DEVIATION
 #define DRX_AUD_MAX_FM_DEVIATION  (100)        /* kHz */
 #endif
 
-/**
+/*
 * \brief Needed for calculation of NICAM prescale feature in AUD
 */
 #ifndef DRX_AUD_MAX_NICAM_PRESCALE
@@ -478,7 +478,7 @@ DEFINES
 /*=== REGISTER ACCESS MACROS =================================================*/
 /*============================================================================*/
 
-/**
+/*
 * This macro is used to create byte arrays for block writes.
 * Block writes speed up I2C traffic between host and demod.
 * The macro takes care of the required byte order in a 16 bits word.
@@ -486,7 +486,7 @@ DEFINES
 */
 #define DRXJ_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
                       ((u8)((((u16)x)>>8)&0xFF))
-/**
+/*
 * This macro is used to convert byte array to 16 bit register value for block read.
 * Block read speed up I2C traffic between host and demod.
 * The macro takes care of the required byte order in a 16 bits word.
@@ -501,7 +501,7 @@ DEFINES
 /*=== HI COMMAND RELATED DEFINES =============================================*/
 /*============================================================================*/
 
-/**
+/*
 * \brief General maximum number of retries for ucode command interfaces
 */
 #define DRXJ_MAX_RETRIES (100)
@@ -807,7 +807,7 @@ static struct drxj_data drxj_data_g = {
         },
 };
 
-/**
+/*
 * \var drxj_default_addr_g
 * \brief Default I2C address and device identifier.
 */
@@ -816,7 +816,7 @@ static struct i2c_device_addr drxj_default_addr_g = {
        DRXJ_DEF_DEMOD_DEV_ID   /* device id */
 };
 
-/**
+/*
 * \var drxj_default_comm_attr_g
 * \brief Default common attributes of a drxj demodulator instance.
 */
@@ -887,7 +887,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = {
        0                       /* mfx */
 };
 
-/**
+/*
 * \var drxj_default_demod_g
 * \brief Default drxj demodulator instance.
 */
@@ -897,7 +897,7 @@ static struct drx_demod_instance drxj_default_demod_g = {
        &drxj_data_g            /* demod device specific attributes */
 };
 
-/**
+/*
 * \brief Default audio data structure for DRK demodulator instance.
 *
 * This structure is DRXK specific.
@@ -997,7 +997,7 @@ struct drxj_hi_cmd {
 /*=== MICROCODE RELATED STRUCTURES ===========================================*/
 /*============================================================================*/
 
-/**
+/*
  * struct drxu_code_block_hdr - Structure of the microcode block headers
  *
  * @addr:      Destination address of the data in this block
@@ -1086,7 +1086,7 @@ static u32 frac28(u32 N, u32 D)
        return Q1;
 }
 
-/**
+/*
 * \fn u32 log1_times100( u32 x)
 * \brief Compute: 100*log10(x)
 * \param x 32 bits
@@ -1198,7 +1198,7 @@ static u32 log1_times100(u32 x)
 
 }
 
-/**
+/*
 * \fn u32 frac_times1e6( u16 N, u32 D)
 * \brief Compute: (N/D) * 1000000.
 * \param N nominator 16-bits.
@@ -1235,7 +1235,7 @@ static u32 frac_times1e6(u32 N, u32 D)
 /*============================================================================*/
 
 
-/**
+/*
 * \brief Values for NICAM prescaler gain. Computed from dB to integer
 *        and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20).
 *
@@ -1280,7 +1280,7 @@ static const u16 nicam_presc_table_val[43] = {
 #define DRXJ_DAP_AUDTRIF_TIMEOUT 80    /* millisec */
 /*============================================================================*/
 
-/**
+/*
 * \fn bool is_handled_by_aud_tr_if( u32 addr )
 * \brief Check if this address is handled by the audio token ring interface.
 * \param addr
@@ -1386,7 +1386,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
 
 /*============================================================================*/
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_block (
 *      struct i2c_device_addr *dev_addr,      -- address of I2C device
@@ -1498,7 +1498,7 @@ static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr,
 }
 
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_reg16 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1531,7 +1531,7 @@ static int drxdap_fasi_read_reg16(struct i2c_device_addr *dev_addr,
        return rc;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_reg32 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1566,7 +1566,7 @@ static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr,
        return rc;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_write_block (
 *      struct i2c_device_addr *dev_addr,    -- address of I2C device
@@ -1705,7 +1705,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
        return first_err;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_write_reg16 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1734,7 +1734,7 @@ static int drxdap_fasi_write_reg16(struct i2c_device_addr *dev_addr,
        return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags);
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_read_modify_write_reg16 (
 *      struct i2c_device_addr *dev_addr,   -- address of I2C device
@@ -1778,7 +1778,7 @@ static int drxdap_fasi_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
        return rc;
 }
 
-/******************************
+/*****************************
 *
 * int drxdap_fasi_write_reg32 (
 *     struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1811,7 +1811,7 @@ static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr,
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_rm_write_reg16short
 * \brief Read modify write 16 bits audio register using short format only.
 * \param dev_addr
@@ -1890,7 +1890,7 @@ static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_read_aud_reg16
 * \brief Read 16 bits audio register
 * \param dev_addr
@@ -1997,7 +1997,7 @@ static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr,
 }
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_write_aud_reg16
 * \brief Write 16 bits audio register
 * \param dev_addr
@@ -2086,7 +2086,7 @@ static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr,
 #define DRXJ_HI_ATOMIC_READ      SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
 #define DRXJ_HI_ATOMIC_WRITE     SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
 
-/**
+/*
 * \fn int drxj_dap_atomic_read_write_block()
 * \brief Basic access routine for atomic read or write access
 * \param dev_addr  pointer to i2c dev address
@@ -2168,7 +2168,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int drxj_dap_atomic_read_reg32()
 * \brief Atomic read of 32 bits words
 */
@@ -2215,7 +2215,7 @@ int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr,
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int hi_cfg_command()
 * \brief Configure HI with settings stored in the demod structure.
 * \param demod Demodulator.
@@ -2258,7 +2258,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int hi_command()
 * \brief Configure HI with settings stored in the demod structure.
 * \param dev_addr I2C address.
@@ -2369,7 +2369,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int init_hi( const struct drx_demod_instance *demod )
 * \brief Initialise and configurate HI.
 * \param demod pointer to demod data.
@@ -2450,7 +2450,7 @@ rw_error:
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int get_device_capabilities()
 * \brief Get and store device capabilities.
 * \param demod  Pointer to demodulator instance.
@@ -2656,7 +2656,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int power_up_device()
 * \brief Power up device.
 * \param demod  Pointer to demodulator instance.
@@ -2710,7 +2710,7 @@ static int power_up_device(struct drx_demod_instance *demod)
 /*----------------------------------------------------------------------------*/
 /* MPEG Output Configuration Functions - begin                                */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int ctrl_set_cfg_mpeg_output()
 * \brief Set MPEG output configuration of the device.
 * \param devmod  Pointer to demodulator instance.
@@ -3356,7 +3356,7 @@ rw_error:
 /* miscellaneous configurations - begin                           */
 /*----------------------------------------------------------------------------*/
 
-/**
+/*
 * \fn int set_mpegtei_handling()
 * \brief Activate MPEG TEI handling settings.
 * \param devmod  Pointer to demodulator instance.
@@ -3429,7 +3429,7 @@ rw_error:
 }
 
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int bit_reverse_mpeg_output()
 * \brief Set MPEG output bit-endian settings.
 * \param devmod  Pointer to demodulator instance.
@@ -3472,7 +3472,7 @@ rw_error:
 }
 
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int set_mpeg_start_width()
 * \brief Set MPEG start width.
 * \param devmod  Pointer to demodulator instance.
@@ -3522,7 +3522,7 @@ rw_error:
 /*----------------------------------------------------------------------------*/
 /* UIO Configuration Functions - begin                                        */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int ctrl_set_uio_cfg()
 * \brief Configure modus oprandi UIO.
 * \param demod Pointer to demodulator instance.
@@ -3659,7 +3659,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int ctrl_uio_write()
 * \brief Write to a UIO.
 * \param demod Pointer to demodulator instance.
@@ -3868,7 +3868,7 @@ rw_error:
 /*----------------------------------------------------------------------------*/
 /* I2C Bridge Functions - begin                                               */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int ctrl_i2c_bridge()
 * \brief Open or close the I2C switch to tuner.
 * \param demod Pointer to demodulator instance.
@@ -3903,7 +3903,7 @@ ctrl_i2c_bridge(struct drx_demod_instance *demod, bool *bridge_closed)
 /*----------------------------------------------------------------------------*/
 /* Smart antenna Functions - begin                                            */
 /*----------------------------------------------------------------------------*/
-/**
+/*
 * \fn int smart_ant_init()
 * \brief Initialize Smart Antenna.
 * \param pointer to struct drx_demod_instance.
@@ -4116,7 +4116,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int DRXJ_DAP_SCUAtomicReadWriteBlock()
 * \brief Basic access routine for SCU atomic read or write access
 * \param dev_addr  pointer to i2c dev address
@@ -4188,7 +4188,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int DRXJ_DAP_AtomicReadReg16()
 * \brief Atomic read of 16 bits words
 */
@@ -4216,7 +4216,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
 }
 
 /*============================================================================*/
-/**
+/*
 * \fn int drxj_dap_scu_atomic_write_reg16()
 * \brief Atomic read of 16 bits words
 */
@@ -4237,7 +4237,7 @@ int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr,
 }
 
 /* -------------------------------------------------------------------------- */
-/**
+/*
 * \brief Measure result of ADC synchronisation
 * \param demod demod instance
 * \param count (returned) count
@@ -4297,7 +4297,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \brief Synchronize analog and digital clock domains
 * \param demod demod instance
 * \return int.
@@ -4365,7 +4365,7 @@ rw_error:
 /*==                8VSB & QAM COMMON DATAPATH FUNCTIONS                    ==*/
 /*============================================================================*/
 /*============================================================================*/
-/**
+/*
 * \fn int init_agc ()
 * \brief Initialize AGC for all standards.
 * \param demod instance of demodulator.
@@ -4741,7 +4741,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_frequency ()
 * \brief Set frequency shift.
 * \param demod instance of demodulator.
@@ -4839,7 +4839,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int get_acc_pkt_err()
 * \brief Retrieve signal strength for VSB and QAM.
 * \param demod Pointer to demod instance
@@ -4891,7 +4891,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_agc_rf ()
 * \brief Configure RF AGC
 * \param demod instance of demodulator.
@@ -5105,7 +5105,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_agc_if ()
 * \brief Configure If AGC
 * \param demod instance of demodulator.
@@ -5334,7 +5334,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_iqm_af ()
 * \brief Configure IQM AF registers
 * \param demod instance of demodulator.
@@ -5380,7 +5380,7 @@ rw_error:
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int power_down_vsb ()
 * \brief Powr down QAM related blocks.
 * \param demod instance of demodulator.
@@ -5478,7 +5478,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_vsb_leak_n_gain ()
 * \brief Set ATSC demod.
 * \param demod instance of demodulator.
@@ -5694,7 +5694,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_vsb()
 * \brief Set 8VSB demod.
 * \param demod instance of demodulator.
@@ -6200,7 +6200,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn static short get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u16 *PckErrs)
 * \brief Get the values of packet error in 8VSB mode
 * \return Error code
@@ -6239,7 +6239,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn static short GetVSBBer(struct i2c_device_addr *dev_addr, u32 *ber)
 * \brief Get the values of ber in VSB mode
 * \return Error code
@@ -6284,7 +6284,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn static short get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber)
 * \brief Get the values of ber in VSB mode
 * \return Error code
@@ -6306,7 +6306,7 @@ static int get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr,
        return 0;
 }
 
-/**
+/*
 * \fn static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer)
 * \brief Get the values of MER
 * \return Error code
@@ -6340,7 +6340,7 @@ rw_error:
 /*============================================================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int power_down_qam ()
 * \brief Powr down QAM related blocks.
 * \param demod instance of demodulator.
@@ -6444,7 +6444,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam_measurement ()
 * \brief Setup of the QAM Measuremnt intervals for signal quality
 * \param demod instance of demod.
@@ -6656,7 +6656,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam16 ()
 * \brief QAM16 specific setup
 * \param demod instance of demod.
@@ -6891,7 +6891,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam32 ()
 * \brief QAM32 specific setup
 * \param demod instance of demod.
@@ -7126,7 +7126,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam64 ()
 * \brief QAM64 specific setup
 * \param demod instance of demod.
@@ -7362,7 +7362,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam128 ()
 * \brief QAM128 specific setup
 * \param demod: instance of demod.
@@ -7597,7 +7597,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int set_qam256 ()
 * \brief QAM256 specific setup
 * \param demod: instance of demod.
@@ -7835,7 +7835,7 @@ rw_error:
 #define QAM_SET_OP_CONSTELLATION 0x2
 #define QAM_SET_OP_SPECTRUM 0X4
 
-/**
+/*
 * \fn int set_qam ()
 * \brief Set QAM demod.
 * \param demod:   instance of demod.
@@ -8845,7 +8845,7 @@ rw_error:
 #define  DEMOD_LOCKED   0x1
 #define  SYNC_FLIPPED   0x2
 #define  SPEC_MIRRORED  0x4
-/**
+/*
 * \fn int qam64auto ()
 * \brief auto do sync pattern switching and mirroring.
 * \param demod:   instance of demod.
@@ -8993,7 +8993,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int qam256auto ()
 * \brief auto do sync pattern switching and mirroring.
 * \param demod:   instance of demod.
@@ -9077,7 +9077,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_qam_channel ()
 * \brief Set QAM channel according to the requested constellation.
 * \param demod:   instance of demod.
@@ -9284,7 +9284,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn static short get_qamrs_err_count(struct i2c_device_addr *dev_addr)
 * \brief Get RS error count in QAM mode (used for post RS BER calculation)
 * \return Error code
@@ -9355,7 +9355,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
  * \fn int get_sig_strength()
  * \brief Retrieve signal strength for VSB and QAM.
  * \param demod Pointer to demod instance
@@ -9435,7 +9435,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int ctrl_get_qam_sig_quality()
 * \brief Retrieve QAM signal quality from device.
 * \param devmod Pointer to demodulator instance.
@@ -9721,7 +9721,7 @@ rw_error:
 */
 /* -------------------------------------------------------------------------- */
 
-/**
+/*
 * \fn int power_down_atv ()
 * \brief Power down ATV.
 * \param demod instance of demodulator
@@ -9822,7 +9822,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Power up AUD.
 * \param demod instance of demodulator
 * \return int.
@@ -9850,7 +9850,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int set_orx_nsu_aox()
 * \brief Configure OrxNsuAox for OOB
 * \param demod instance of demodulator.
@@ -9884,7 +9884,7 @@ rw_error:
        return rc;
 }
 
-/**
+/*
 * \fn int ctrl_set_oob()
 * \brief Set OOB channel to be used.
 * \param demod instance of demodulator
@@ -9986,9 +9986,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                    20;
        }
 
-   /*********/
+   /********/
        /* Stop  */
-   /*********/
+   /********/
        rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0);
        if (rc != 0) {
                pr_err("error %d\n", rc);
@@ -10004,9 +10004,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                pr_err("error %d\n", rc);
                goto rw_error;
        }
-   /*********/
+   /********/
        /* Reset */
-   /*********/
+   /********/
        scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
            | SCU_RAM_COMMAND_CMD_DEMOD_RESET;
        scu_cmd.parameter_len = 0;
@@ -10017,9 +10017,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                pr_err("error %d\n", rc);
                goto rw_error;
        }
-   /***********/
+   /**********/
        /* SET_ENV */
-   /***********/
+   /**********/
        /* set frequency, spectrum inversion and data rate */
        scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
            | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV;
@@ -10376,9 +10376,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
                pr_err("error %d\n", rc);
                goto rw_error;
        }
-       /*********/
+       /********/
        /* Start */
-       /*********/
+       /********/
        scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
            | SCU_RAM_COMMAND_CMD_DEMOD_START;
        scu_cmd.parameter_len = 0;
@@ -10419,7 +10419,7 @@ rw_error:
 /*=============================================================================
   ===== ctrl_set_channel() ==========================================================
   ===========================================================================*/
-/**
+/*
 * \fn int ctrl_set_channel()
 * \brief Select a new transmission channel.
 * \param demod instance of demod.
@@ -10652,7 +10652,7 @@ rw_error:
   ===== SigQuality() ==========================================================
   ===========================================================================*/
 
-/**
+/*
 * \fn int ctrl_sig_quality()
 * \brief Retrieve signal quality form device.
 * \param devmod Pointer to demodulator instance.
@@ -10768,7 +10768,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_lock_status()
 * \brief Retrieve lock status .
 * \param dev_addr Pointer to demodulator device address.
@@ -10856,7 +10856,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_set_standard()
 * \brief Set modulation standard to be used.
 * \param standard Modulation standard.
@@ -11012,7 +11012,7 @@ static void drxj_reset_mode(struct drxj_data *ext_attr)
        ext_attr->vsb_pre_saw_cfg.use_pre_saw = true;
 }
 
-/**
+/*
 * \fn int ctrl_power_mode()
 * \brief Set the power mode of the device to the specified power mode
 * \param demod Pointer to demodulator instance.
@@ -11171,7 +11171,7 @@ rw_error:
 /*== CTRL Set/Get Config related functions ===================================*/
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_set_cfg_pre_saw()
 * \brief Set Pre-saw reference.
 * \param demod demod instance
@@ -11234,7 +11234,7 @@ rw_error:
 
 /*============================================================================*/
 
-/**
+/*
 * \fn int ctrl_set_cfg_afe_gain()
 * \brief Set AFE Gain.
 * \param demod demod instance
@@ -11324,7 +11324,7 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
                       enum drxu_code_action action);
 static int drxj_set_lna_state(struct drx_demod_instance *demod, bool state);
 
-/**
+/*
 * \fn drxj_open()
 * \brief Open the demod instance, configure device, configure drxdriver
 * \return Status_t Return status.
@@ -11543,7 +11543,7 @@ rw_error:
 }
 
 /*============================================================================*/
-/**
+/*
 * \fn drxj_close()
 * \brief Close the demod instance, power down the device
 * \return Status_t Return status.
@@ -11594,7 +11594,7 @@ rw_error:
  * Microcode related functions
  */
 
-/**
+/*
  * drx_u_code_compute_crc      - Compute CRC of block of microcode data.
  * @block_data: Pointer to microcode data.
  * @nr_words:   Size of microcode block (number of 16 bits words).
@@ -11622,7 +11622,7 @@ static u16 drx_u_code_compute_crc(u8 *block_data, u16 nr_words)
        return (u16)(crc_word >> 16);
 }
 
-/**
+/*
  * drx_check_firmware - checks if the loaded firmware is valid
  *
  * @demod:     demod structure
@@ -11708,7 +11708,7 @@ eof:
        return -EINVAL;
 }
 
-/**
+/*
  * drx_ctrl_u_code - Handle microcode upload or verify.
  * @dev_addr: Address of device.
  * @mc_info:  Pointer to information about microcode data.
index 6c5b8f78f9f63bf7d4d938c49c300be7ab14ca0e..d3ee1c23bb2f94619b24e9f58f2bd0dd1a92e0ab 100644 (file)
@@ -69,15 +69,15 @@ TYPEDEFS
 
        struct drxjscu_cmd {
                u16 command;
-                       /**< Command number */
+                       /*< Command number */
                u16 parameter_len;
-                       /**< Data length in byte */
+                       /*< Data length in byte */
                u16 result_len;
-                       /**< result length in byte */
+                       /*< result length in byte */
                u16 *parameter;
-                       /**< General purpous param */
+                       /*< General purpous param */
                u16 *result;
-                       /**< General purpous param */};
+                       /*< General purpous param */};
 
 /*============================================================================*/
 /*============================================================================*/
@@ -130,7 +130,7 @@ TYPEDEFS
 
                DRXJ_CFG_MAX    /* dummy, never to be used */};
 
-/**
+/*
 * /struct enum drxj_cfg_smart_ant_io * smart antenna i/o.
 */
 enum drxj_cfg_smart_ant_io {
@@ -138,7 +138,7 @@ enum drxj_cfg_smart_ant_io {
        DRXJ_SMT_ANT_INPUT
 };
 
-/**
+/*
 * /struct struct drxj_cfg_smart_ant * Set smart antenna.
 */
        struct drxj_cfg_smart_ant {
@@ -146,7 +146,7 @@ enum drxj_cfg_smart_ant_io {
                u16 ctrl_data;
        };
 
-/**
+/*
 * /struct DRXJAGCSTATUS_t
 * AGC status information from the DRXJ-IQM-AF.
 */
@@ -158,7 +158,7 @@ struct drxj_agc_status {
 
 /* DRXJ_CFG_AGC_RF, DRXJ_CFG_AGC_IF */
 
-/**
+/*
 * /struct enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ.
 */
        enum drxj_agc_ctrl_mode {
@@ -166,7 +166,7 @@ struct drxj_agc_status {
                DRX_AGC_CTRL_USER,
                DRX_AGC_CTRL_OFF};
 
-/**
+/*
 * /struct struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ.
 */
        struct drxj_cfg_agc {
@@ -182,7 +182,7 @@ struct drxj_agc_status {
 
 /* DRXJ_CFG_PRE_SAW */
 
-/**
+/*
 * /struct struct drxj_cfg_pre_saw * Interface to configure pre SAW sense.
 */
        struct drxj_cfg_pre_saw {
@@ -192,14 +192,14 @@ struct drxj_agc_status {
 
 /* DRXJ_CFG_AFE_GAIN */
 
-/**
+/*
 * /struct struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA).
 */
        struct drxj_cfg_afe_gain {
                enum drx_standard standard;     /* standard to which these settings apply */
                u16 gain;       /* gain in 0.1 dB steps, DRXJ range 140 .. 335 */};
 
-/**
+/*
 * /struct drxjrs_errors
 * Available failure information in DRXJ_FEC_RS.
 *
@@ -208,25 +208,25 @@ struct drxj_agc_status {
 */
        struct drxjrs_errors {
                u16 nr_bit_errors;
-                               /**< no of pre RS bit errors          */
+                               /*< no of pre RS bit errors          */
                u16 nr_symbol_errors;
-                               /**< no of pre RS symbol errors       */
+                               /*< no of pre RS symbol errors       */
                u16 nr_packet_errors;
-                               /**< no of pre RS packet errors       */
+                               /*< no of pre RS packet errors       */
                u16 nr_failures;
-                               /**< no of post RS failures to decode */
+                               /*< no of post RS failures to decode */
                u16 nr_snc_par_fail_count;
-                               /**< no of post RS bit erros          */
+                               /*< no of post RS bit erros          */
        };
 
-/**
+/*
 * /struct struct drxj_cfg_vsb_misc * symbol error rate
 */
        struct drxj_cfg_vsb_misc {
                u32 symb_error;
-                             /**< symbol error rate sps */};
+                             /*< symbol error rate sps */};
 
-/**
+/*
 * /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
 *
 */
@@ -234,7 +234,7 @@ struct drxj_agc_status {
                DRXJ_MPEG_START_WIDTH_1CLKCYC,
                DRXJ_MPEG_START_WIDTH_8CLKCYC};
 
-/**
+/*
 * /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
 *
 */
@@ -247,20 +247,20 @@ struct drxj_agc_status {
                DRXJ_MPEGOUTPUT_CLOCK_RATE_25313K,
                DRXJ_MPEGOUTPUT_CLOCK_RATE_21696K};
 
-/**
+/*
 * /struct DRXJCfgMisc_t
 * Change TEI bit of MPEG output
 * reverse MPEG output bit order
 * set MPEG output clock rate
 */
        struct drxj_cfg_mpeg_output_misc {
-               bool disable_tei_handling;            /**< if true pass (not change) TEI bit */
-               bool bit_reverse_mpeg_outout;         /**< if true, parallel: msb on MD0; serial: lsb out first */
+               bool disable_tei_handling;            /*< if true pass (not change) TEI bit */
+               bool bit_reverse_mpeg_outout;         /*< if true, parallel: msb on MD0; serial: lsb out first */
                enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
-                                                     /**< set MPEG output clock rate that overwirtes the derived one from symbol rate */
-               enum drxj_mpeg_start_width mpeg_start_width;  /**< set MPEG output start width */};
+                                                     /*< set MPEG output clock rate that overwirtes the derived one from symbol rate */
+               enum drxj_mpeg_start_width mpeg_start_width;  /*< set MPEG output start width */};
 
-/**
+/*
 * /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
 */
        enum drxj_xtal_freq {
@@ -269,21 +269,21 @@ struct drxj_agc_status {
                DRXJ_XTAL_FREQ_20P25MHZ,
                DRXJ_XTAL_FREQ_4MHZ};
 
-/**
+/*
 * /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
 */
        enum drxji2c_speed {
                DRXJ_I2C_SPEED_400KBPS,
                DRXJ_I2C_SPEED_100KBPS};
 
-/**
+/*
 * /struct struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal reference frequency, I2C speed, etc...
 */
        struct drxj_cfg_hw_cfg {
                enum drxj_xtal_freq xtal_freq;
-                                  /**< crystal reference frequency */
+                                  /*< crystal reference frequency */
                enum drxji2c_speed i2c_speed;
-                                  /**< 100 or 400 kbps */};
+                                  /*< 100 or 400 kbps */};
 
 /*
  *  DRXJ_CFG_ATV_MISC
@@ -352,7 +352,7 @@ struct drxj_cfg_oob_misc {
  *  DRXJ_CFG_ATV_OUTPUT
  */
 
-/**
+/*
 * /enum DRXJAttenuation_t
 * Attenuation setting for SIF AGC.
 *
@@ -363,7 +363,7 @@ struct drxj_cfg_oob_misc {
                DRXJ_SIF_ATTENUATION_6DB,
                DRXJ_SIF_ATTENUATION_9DB};
 
-/**
+/*
 * /struct struct drxj_cfg_atv_output * SIF attenuation setting.
 *
 */
@@ -398,7 +398,7 @@ struct drxj_cfg_atv_output {
 /*============================================================================*/
 
 /*========================================*/
-/**
+/*
 * /struct struct drxj_data * DRXJ specific attributes.
 *
 * Global data container for DRXJ specific data.
@@ -406,93 +406,93 @@ struct drxj_cfg_atv_output {
 */
        struct drxj_data {
                /* device capabilties (determined during drx_open()) */
-               bool has_lna;             /**< true if LNA (aka PGA) present */
-               bool has_oob;             /**< true if OOB supported */
-               bool has_ntsc;            /**< true if NTSC supported */
-               bool has_btsc;            /**< true if BTSC supported */
-               bool has_smatx;   /**< true if mat_tx is available */
-               bool has_smarx;   /**< true if mat_rx is available */
-               bool has_gpio;            /**< true if GPIO is available */
-               bool has_irqn;            /**< true if IRQN is available */
+               bool has_lna;             /*< true if LNA (aka PGA) present */
+               bool has_oob;             /*< true if OOB supported */
+               bool has_ntsc;            /*< true if NTSC supported */
+               bool has_btsc;            /*< true if BTSC supported */
+               bool has_smatx;   /*< true if mat_tx is available */
+               bool has_smarx;   /*< true if mat_rx is available */
+               bool has_gpio;            /*< true if GPIO is available */
+               bool has_irqn;            /*< true if IRQN is available */
                /* A1/A2/A... */
-               u8 mfx;           /**< metal fix */
+               u8 mfx;           /*< metal fix */
 
                /* tuner settings */
-               bool mirror_freq_spect_oob;/**< tuner inversion (true = tuner mirrors the signal */
+               bool mirror_freq_spect_oob;/*< tuner inversion (true = tuner mirrors the signal */
 
                /* standard/channel settings */
-               enum drx_standard standard;       /**< current standard information                     */
+               enum drx_standard standard;       /*< current standard information                     */
                enum drx_modulation constellation;
-                                         /**< current constellation                            */
-               s32 frequency; /**< center signal frequency in KHz                   */
+                                         /*< current constellation                            */
+               s32 frequency; /*< center signal frequency in KHz                   */
                enum drx_bandwidth curr_bandwidth;
-                                         /**< current channel bandwidth                        */
-               enum drx_mirror mirror;   /**< current channel mirror                           */
+                                         /*< current channel bandwidth                        */
+               enum drx_mirror mirror;   /*< current channel mirror                           */
 
                /* signal quality information */
-               u32 fec_bits_desired;     /**< BER accounting period                            */
-               u16 fec_vd_plen;          /**< no of trellis symbols: VD SER measurement period */
-               u16 qam_vd_prescale;      /**< Viterbi Measurement Prescale                     */
-               u16 qam_vd_period;        /**< Viterbi Measurement period                       */
-               u16 fec_rs_plen;          /**< defines RS BER measurement period                */
-               u16 fec_rs_prescale;      /**< ReedSolomon Measurement Prescale                 */
-               u16 fec_rs_period;        /**< ReedSolomon Measurement period                   */
-               bool reset_pkt_err_acc;   /**< Set a flag to reset accumulated packet error     */
-               u16 pkt_err_acc_start;    /**< Set a flag to reset accumulated packet error     */
+               u32 fec_bits_desired;     /*< BER accounting period                            */
+               u16 fec_vd_plen;          /*< no of trellis symbols: VD SER measurement period */
+               u16 qam_vd_prescale;      /*< Viterbi Measurement Prescale                     */
+               u16 qam_vd_period;        /*< Viterbi Measurement period                       */
+               u16 fec_rs_plen;          /*< defines RS BER measurement period                */
+               u16 fec_rs_prescale;      /*< ReedSolomon Measurement Prescale                 */
+               u16 fec_rs_period;        /*< ReedSolomon Measurement period                   */
+               bool reset_pkt_err_acc;   /*< Set a flag to reset accumulated packet error     */
+               u16 pkt_err_acc_start;    /*< Set a flag to reset accumulated packet error     */
 
                /* HI configuration */
-               u16 hi_cfg_timing_div;    /**< HI Configure() parameter 2                       */
-               u16 hi_cfg_bridge_delay;          /**< HI Configure() parameter 3                       */
-               u16 hi_cfg_wake_up_key;   /**< HI Configure() parameter 4                       */
-               u16 hi_cfg_ctrl;          /**< HI Configure() parameter 5                       */
-               u16 hi_cfg_transmit;      /**< HI Configure() parameter 6                       */
+               u16 hi_cfg_timing_div;    /*< HI Configure() parameter 2                       */
+               u16 hi_cfg_bridge_delay;          /*< HI Configure() parameter 3                       */
+               u16 hi_cfg_wake_up_key;   /*< HI Configure() parameter 4                       */
+               u16 hi_cfg_ctrl;          /*< HI Configure() parameter 5                       */
+               u16 hi_cfg_transmit;      /*< HI Configure() parameter 6                       */
 
                /* UIO configuration */
-               enum drxuio_mode uio_sma_rx_mode;/**< current mode of SmaRx pin                        */
-               enum drxuio_mode uio_sma_tx_mode;/**< current mode of SmaTx pin                        */
-               enum drxuio_mode uio_gpio_mode; /**< current mode of ASEL pin                         */
-               enum drxuio_mode uio_irqn_mode; /**< current mode of IRQN pin                         */
+               enum drxuio_mode uio_sma_rx_mode;/*< current mode of SmaRx pin                        */
+               enum drxuio_mode uio_sma_tx_mode;/*< current mode of SmaTx pin                        */
+               enum drxuio_mode uio_gpio_mode; /*< current mode of ASEL pin                         */
+               enum drxuio_mode uio_irqn_mode; /*< current mode of IRQN pin                         */
 
                /* IQM fs frequecy shift and inversion */
-               u32 iqm_fs_rate_ofs;       /**< frequency shifter setting after setchannel      */
-               bool pos_image;    /**< Ture: positive image                            */
+               u32 iqm_fs_rate_ofs;       /*< frequency shifter setting after setchannel      */
+               bool pos_image;    /*< Ture: positive image                            */
                /* IQM RC frequecy shift */
-               u32 iqm_rc_rate_ofs;       /**< frequency shifter setting after setchannel      */
+               u32 iqm_rc_rate_ofs;       /*< frequency shifter setting after setchannel      */
 
                /* ATV configuration */
-               u32 atv_cfg_changed_flags; /**< flag: flags cfg changes */
-               s16 atv_top_equ0[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU0__A */
-               s16 atv_top_equ1[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU1__A */
-               s16 atv_top_equ2[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU2__A */
-               s16 atv_top_equ3[DRXJ_COEF_IDX_MAX];         /**< shadow of ATV_TOP_EQU3__A */
-               bool phase_correction_bypass;/**< flag: true=bypass */
-               s16 atv_top_vid_peak;     /**< shadow of ATV_TOP_VID_PEAK__A */
-               u16 atv_top_noise_th;     /**< shadow of ATV_TOP_NOISE_TH__A */
-               bool enable_cvbs_output;  /**< flag CVBS ouput enable */
-               bool enable_sif_output;   /**< flag SIF ouput enable */
+               u32 atv_cfg_changed_flags; /*< flag: flags cfg changes */
+               s16 atv_top_equ0[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU0__A */
+               s16 atv_top_equ1[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU1__A */
+               s16 atv_top_equ2[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU2__A */
+               s16 atv_top_equ3[DRXJ_COEF_IDX_MAX];         /*< shadow of ATV_TOP_EQU3__A */
+               bool phase_correction_bypass;/*< flag: true=bypass */
+               s16 atv_top_vid_peak;     /*< shadow of ATV_TOP_VID_PEAK__A */
+               u16 atv_top_noise_th;     /*< shadow of ATV_TOP_NOISE_TH__A */
+               bool enable_cvbs_output;  /*< flag CVBS ouput enable */
+               bool enable_sif_output;   /*< flag SIF ouput enable */
                 enum drxjsif_attenuation sif_attenuation;
-                                         /**< current SIF att setting */
+                                         /*< current SIF att setting */
                /* Agc configuration for QAM and VSB */
-               struct drxj_cfg_agc qam_rf_agc_cfg; /**< qam RF AGC config */
-               struct drxj_cfg_agc qam_if_agc_cfg; /**< qam IF AGC config */
-               struct drxj_cfg_agc vsb_rf_agc_cfg; /**< vsb RF AGC config */
-               struct drxj_cfg_agc vsb_if_agc_cfg; /**< vsb IF AGC config */
+               struct drxj_cfg_agc qam_rf_agc_cfg; /*< qam RF AGC config */
+               struct drxj_cfg_agc qam_if_agc_cfg; /*< qam IF AGC config */
+               struct drxj_cfg_agc vsb_rf_agc_cfg; /*< vsb RF AGC config */
+               struct drxj_cfg_agc vsb_if_agc_cfg; /*< vsb IF AGC config */
 
                /* PGA gain configuration for QAM and VSB */
-               u16 qam_pga_cfg;          /**< qam PGA config */
-               u16 vsb_pga_cfg;          /**< vsb PGA config */
+               u16 qam_pga_cfg;          /*< qam PGA config */
+               u16 vsb_pga_cfg;          /*< vsb PGA config */
 
                /* Pre SAW configuration for QAM and VSB */
                struct drxj_cfg_pre_saw qam_pre_saw_cfg;
-                                         /**< qam pre SAW config */
+                                         /*< qam pre SAW config */
                struct drxj_cfg_pre_saw vsb_pre_saw_cfg;
-                                         /**< qam pre SAW config */
+                                         /*< qam pre SAW config */
 
                /* Version information */
-               char v_text[2][12];       /**< allocated text versions */
-               struct drx_version v_version[2]; /**< allocated versions structs */
+               char v_text[2][12];       /*< allocated text versions */
+               struct drx_version v_version[2]; /*< allocated versions structs */
                struct drx_version_list v_list_elements[2];
-                                         /**< allocated version list */
+                                         /*< allocated version list */
 
                /* smart antenna configuration */
                bool smart_ant_inverted;
@@ -502,25 +502,25 @@ struct drxj_cfg_atv_output {
                bool oob_power_on;
 
                /* MPEG static bitrate setting */
-               u32 mpeg_ts_static_bitrate;  /**< bitrate static MPEG output */
-               bool disable_te_ihandling;  /**< MPEG TS TEI handling */
-               bool bit_reverse_mpeg_outout;/**< MPEG output bit order */
+               u32 mpeg_ts_static_bitrate;  /*< bitrate static MPEG output */
+               bool disable_te_ihandling;  /*< MPEG TS TEI handling */
+               bool bit_reverse_mpeg_outout;/*< MPEG output bit order */
                 enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
-                                           /**< MPEG output clock rate */
+                                           /*< MPEG output clock rate */
                 enum drxj_mpeg_start_width mpeg_start_width;
-                                           /**< MPEG Start width */
+                                           /*< MPEG Start width */
 
                /* Pre SAW & Agc configuration for ATV */
                struct drxj_cfg_pre_saw atv_pre_saw_cfg;
-                                         /**< atv pre SAW config */
-               struct drxj_cfg_agc atv_rf_agc_cfg; /**< atv RF AGC config */
-               struct drxj_cfg_agc atv_if_agc_cfg; /**< atv IF AGC config */
-               u16 atv_pga_cfg;          /**< atv pga config    */
+                                         /*< atv pre SAW config */
+               struct drxj_cfg_agc atv_rf_agc_cfg; /*< atv RF AGC config */
+               struct drxj_cfg_agc atv_if_agc_cfg; /*< atv IF AGC config */
+               u16 atv_pga_cfg;          /*< atv pga config    */
 
                u32 curr_symbol_rate;
 
                /* pin-safe mode */
-               bool pdr_safe_mode;         /**< PDR safe mode activated      */
+               bool pdr_safe_mode;         /*< PDR safe mode activated      */
                u16 pdr_safe_restore_val_gpio;
                u16 pdr_safe_restore_val_v_sync;
                u16 pdr_safe_restore_val_sma_rx;
@@ -531,12 +531,12 @@ struct drxj_cfg_atv_output {
                enum drxj_cfg_oob_lo_power oob_lo_pow;
 
                struct drx_aud_data aud_data;
-                                   /**< audio storage                  */};
+                                   /*< audio storage                  */};
 
 /*-------------------------------------------------------------------------
 Access MACROS
 -------------------------------------------------------------------------*/
-/**
+/*
 * \brief Compilable references to attributes
 * \param d pointer to demod instance
 *
@@ -554,7 +554,7 @@ Access MACROS
 DEFINES
 -------------------------------------------------------------------------*/
 
-/**
+/*
 * \def DRXJ_NTSC_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -569,7 +569,7 @@ DEFINES
 */
 #define DRXJ_NTSC_CARRIER_FREQ_OFFSET           ((s32)(1750))
 
-/**
+/*
 * \def DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -585,7 +585,7 @@ DEFINES
 */
 #define DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET   ((s32)(2375))
 
-/**
+/*
 * \def DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -601,7 +601,7 @@ DEFINES
 */
 #define DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET ((s32)(2775))
 
-/**
+/*
 * \def DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET
 * \brief Offset from picture carrier to centre frequency in kHz, in RF domain
 *
@@ -616,7 +616,7 @@ DEFINES
 */
 #define DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET   ((s32)(-3255))
 
-/**
+/*
 * \def DRXJ_FM_CARRIER_FREQ_OFFSET
 * \brief Offset from sound carrier to centre frequency in kHz, in RF domain
 *
index eb9bdc9f59c4661bef12b63d8d5f8ef41ccb5b14..b16fedbb53a3a23b80f643b737418e182dc7c84f 100644 (file)
  * @antenna_dvbt:      GPIO bit for changing antenna to DVB-C. A value of 1
  *                     means that 1=DVBC, 0 = DVBT. Zero means the opposite.
  * @mpeg_out_clk_strength: DRXK Mpeg output clock drive strength.
+ * @chunk_size:                maximum size for I2C messages
  * @microcode_name:    Name of the firmware file with the microcode
  * @qam_demod_parameter_count: The number of parameters used for the command
  *                             to set the demodulator parameters. All
  *                             firmwares are using the 2-parameter commmand.
- *                             An exception is the "drxk_a3.mc" firmware,
+ *                             An exception is the ``drxk_a3.mc`` firmware,
  *                             which uses the 4-parameter command.
  *                             A value of 0 (default) or lower indicates that
  *                             the correct number of parameters will be
  *                             automatically detected.
  *
- * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
+ * On the ``*_gpio`` vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
  * UIO-3.
  */
 struct drxk_config {
@@ -52,6 +53,14 @@ struct drxk_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_DRXK)
+/**
+ * Attach a drxk demod
+ *
+ * @config: pointer to &struct drxk_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
                                        struct i2c_adapter *i2c);
 #else
index 48a8aad47a74d00879cac9b328b18e33b175b3be..f59ac2e91c5995fa205d12d428575619323b61e4 100644 (file)
@@ -207,9 +207,9 @@ static inline u32 log10times100(u32 value)
        return (100L * intlog10(value)) >> 24;
 }
 
-/****************************************************************************/
+/***************************************************************************/
 /* I2C **********************************************************************/
-/****************************************************************************/
+/***************************************************************************/
 
 static int drxk_i2c_lock(struct drxk_state *state)
 {
@@ -3444,7 +3444,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Activate DVBT specific presets
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -3484,7 +3484,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Initialize channelswitch-independent settings for DVBT.
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -3696,7 +3696,7 @@ error:
 }
 
 /*============================================================================*/
-/**
+/*
 * \brief start dvbt demodulating for channel.
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -3732,7 +3732,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Set up dvbt demodulator for channel.
 * \param demod instance of demodulator.
 * \return DRXStatus_t.
@@ -4086,7 +4086,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Retrieve lock status .
 * \param demod    Pointer to demodulator instance.
 * \param lockStat Pointer to lock status structure.
@@ -4148,7 +4148,7 @@ static int power_up_qam(struct drxk_state *state)
 }
 
 
-/** Power Down QAM */
+/* Power Down QAM */
 static int power_down_qam(struct drxk_state *state)
 {
        u16 data = 0;
@@ -4186,7 +4186,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Setup of the QAM Measurement intervals for signal quality
 * \param demod instance of demod.
 * \param modulation current modulation.
@@ -4461,7 +4461,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM32 specific setup
 * \param demod instance of demod.
 * \return DRXStatus_t.
@@ -4657,7 +4657,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM64 specific setup
 * \param demod instance of demod.
 * \return DRXStatus_t.
@@ -4852,7 +4852,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM128 specific setup
 * \param demod: instance of demod.
 * \return DRXStatus_t.
@@ -5049,7 +5049,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief QAM256 specific setup
 * \param demod: instance of demod.
 * \return DRXStatus_t.
@@ -5244,7 +5244,7 @@ error:
 
 
 /*============================================================================*/
-/**
+/*
 * \brief Reset QAM block.
 * \param demod:   instance of demod.
 * \param channel: pointer to channel data.
@@ -5272,7 +5272,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Set QAM symbolrate.
 * \param demod:   instance of demod.
 * \param channel: pointer to channel data.
@@ -5341,7 +5341,7 @@ error:
 
 /*============================================================================*/
 
-/**
+/*
 * \brief Get QAM lock status.
 * \param demod:   instance of demod.
 * \param channel: pointer to channel data.
index 6aaa9c6bff9c253122c9b4d43b4fde449db7765e..212e0730f1549014029dfa7028a27d6724a4fd14 100644 (file)
 #define DVB_PLL_TDEE4                 18
 #define DVB_PLL_THOMSON_DTT7520X       19
 
+#if IS_REACHABLE(CONFIG_DVB_PLL)
 /**
  * Attach a dvb-pll to the supplied frontend structure.
  *
- * @param fe Frontend to attach to.
- * @param pll_addr i2c address of the PLL (if used).
- * @param i2c i2c adapter to use (set to NULL if not used).
- * @param pll_desc_id dvb_pll_desc to use.
- * @return Frontend pointer on success, NULL on failure
+ * @fe: Frontend to attach to.
+ * @pll_addr: i2c address of the PLL (if used).
+ * @i2c: i2c adapter to use (set to NULL if not used).
+ * @pll_desc_id: dvb_pll_desc to use.
+ *
+ * return: Frontend pointer on success, NULL on failure
  */
-#if IS_REACHABLE(CONFIG_DVB_PLL)
 extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
                                           int pll_addr,
                                           struct i2c_adapter *i2c,
index 333615491d9e8ee8ce1725e82dead3888c0cdb0f..c9fc81c7e4e7e0449daa6ba8d4485614bcb3f803 100644 (file)
@@ -38,6 +38,7 @@ enum helene_xtal {
  * @set_tuner_priv:    Callback function private context
  * @set_tuner_callback:        Callback function that notifies the parent driver
  *                     which tuner is active now
+ * @xtal: Cristal frequency as described by &enum helene_xtal
  */
 struct helene_config {
        u8      i2c_address;
@@ -48,9 +49,31 @@ struct helene_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_HELENE)
+/**
+ * Attach a helene tuner (terrestrial and cable standards)
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
                                        const struct helene_config *config,
                                        struct i2c_adapter *i2c);
+
+/**
+ * Attach a helene tuner (satellite standards)
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
+extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+                                       const struct helene_config *config,
+                                       struct i2c_adapter *i2c);
 #else
 static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
                                        const struct helene_config *config,
@@ -59,13 +82,6 @@ static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
        pr_warn("%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif
-
-#if IS_REACHABLE(CONFIG_DVB_HELENE)
-extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
-                                       const struct helene_config *config,
-                                       struct i2c_adapter *i2c);
-#else
 static inline struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
                                        const struct helene_config *config,
                                        struct i2c_adapter *i2c)
index 672a556df71a184f1a2a21a0d302378500ac7bfe..9157fd037e2fea82cfa0d3149ecb769d5f936244 100644 (file)
@@ -41,6 +41,15 @@ struct horus3a_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_HORUS3A)
+/**
+ * Attach a horus3a tuner
+ *
+ * @fe: frontend to be attached
+ * @config: pointer to &struct helene_config with tuner configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
                                        const struct horus3a_config *config,
                                        struct i2c_adapter *i2c);
index 534b24fa2b95ae021dd4989091c312b8d1f4081e..965012ad5c596b4335abfaefcfde9f28aa2826a0 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
  *
  * Copyright (C) 2010 Malcolm Priestley
@@ -36,7 +36,7 @@ struct ix2505v_state {
        u32 frequency;
 };
 
-/**
+/*
  *  Data read format of the Sharp IX2505V B0017
  *
  *  byte1:   1   |   1   |   0   |   0   |   0   |  MA1  |  MA0  |  1
@@ -99,7 +99,7 @@ static void ix2505v_release(struct dvb_frontend *fe)
 
 }
 
-/**
+/*
  *  Data write format of the Sharp IX2505V B0017
  *
  *  byte1:   1   |   1   |   0   |   0   |   0   | 0(MA1)| 0(MA0)|  0
index 0b0a431c74f61a404c7c3c38dce035bc90a826b6..49ed93e754edbfea23d028ef041c863687c2ea3e 100644 (file)
 #include "dvb_frontend.h"
 
 /**
- * Attach a ix2505v tuner to the supplied frontend structure.
+ * struct ix2505v_config - ix2505 attachment configuration
  *
- * @param fe Frontend to attach to.
- * @param config ix2505v_config structure
- * @return FE pointer on success, NULL on failure.
+ * @tuner_address: tuner address
+ * @tuner_gain: Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB
+ * @tuner_chargepump: Charge pump output +/- 0=120 1=260 2=555 3=1200(default)
+ * @min_delay_ms: delay after tune
+ * @tuner_write_only: disables reads
  */
-
 struct ix2505v_config {
        u8 tuner_address;
-
-       /*Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB */
        u8 tuner_gain;
-
-       /*Charge pump output +/- 0=120 1=260 2=555 3=1200(default) */
        u8 tuner_chargepump;
-
-       /* delay after tune */
        int min_delay_ms;
-
-       /* disables reads*/
        u8 tuner_write_only;
 
 };
 
 #if IS_REACHABLE(CONFIG_DVB_IX2505V)
+/**
+ * Attach a ix2505v tuner to the supplied frontend structure.
+ *
+ * @fe: Frontend to attach to.
+ * @config: pointer to &struct ix2505v_config
+ * @i2c: pointer to &struct i2c_adapter.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
        const struct ix2505v_config *config, struct i2c_adapter *i2c);
 #else
index 68923c84679a2ea54aca1d35c6cc559c389c8c5c..e5a6c176666432d3e1c5c18a4c33233626f10e02 100644 (file)
@@ -517,7 +517,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config,
        state->i2c = i2c;
        state->first = 1;
 
-       /**
+       /*
         *  the L64781 won't show up before we send the reset_and_configure()
         *  broadcast. If nothing responds there is no L64781 on the bus...
         */
index 04b355a005fb539a7285a24a610a77e15ac1a321..1a8964a2265d7ef321a6c311d698b70f11fa4611 100644 (file)
  * 0x68,
  */
 
+/**
+ * enum m88ds3103_ts_mode - TS connection mode
+ * @M88DS3103_TS_SERIAL:       TS output pin D0, normal
+ * @M88DS3103_TS_SERIAL_D7:    TS output pin D7
+ * @M88DS3103_TS_PARALLEL:     TS Parallel mode
+ * @M88DS3103_TS_CI:           TS CI Mode
+ */
+enum m88ds3103_ts_mode {
+       M88DS3103_TS_SERIAL,
+       M88DS3103_TS_SERIAL_D7,
+       M88DS3103_TS_PARALLEL,
+       M88DS3103_TS_CI
+};
+
+/**
+ * enum m88ds3103_clock_out
+ * @M88DS3103_CLOCK_OUT_DISABLED:      Clock output is disabled
+ * @M88DS3103_CLOCK_OUT_ENABLED:       Clock output is enabled with crystal
+ *                                     clock.
+ * @M88DS3103_CLOCK_OUT_ENABLED_DIV2:  Clock output is enabled with half
+ *                                     crystal clock.
+ */
+enum m88ds3103_clock_out {
+       M88DS3103_CLOCK_OUT_DISABLED,
+       M88DS3103_CLOCK_OUT_ENABLED,
+       M88DS3103_CLOCK_OUT_ENABLED_DIV2
+};
+
 /**
  * struct m88ds3103_platform_data - Platform data for the m88ds3103 driver
  * @clk: Clock frequency.
  * @get_dvb_frontend: Get DVB frontend.
  * @get_i2c_adapter: Get I2C adapter.
  */
-
 struct m88ds3103_platform_data {
        u32 clk;
        u16 i2c_wr_max;
-#define M88DS3103_TS_SERIAL             0 /* TS output pin D0, normal */
-#define M88DS3103_TS_SERIAL_D7          1 /* TS output pin D7 */
-#define M88DS3103_TS_PARALLEL           2 /* TS Parallel mode */
-#define M88DS3103_TS_CI                 3 /* TS CI Mode */
-       u8 ts_mode:2;
+       enum m88ds3103_ts_mode ts_mode;
        u32 ts_clk;
+       enum m88ds3103_clock_out clk_out;
        u8 ts_clk_pol:1;
        u8 spec_inv:1;
        u8 agc;
        u8 agc_inv:1;
-#define M88DS3103_CLOCK_OUT_DISABLED        0
-#define M88DS3103_CLOCK_OUT_ENABLED         1
-#define M88DS3103_CLOCK_OUT_ENABLED_DIV2    2
-       u8 clk_out:2;
        u8 envelope_mode:1;
        u8 lnb_hv_pol:1;
        u8 lnb_en_pol:1;
@@ -73,105 +93,60 @@ struct m88ds3103_platform_data {
        u8 attach_in_use:1;
 };
 
-/*
- * Do not add new m88ds3103_attach() users! Use I2C bindings instead.
+/**
+ * struct m88ds3103_config - m88ds3102 configuration
+ *
+ * @i2c_addr:  I2C address. Default: none, must set. Example: 0x68, ...
+ * @clock:     Device's clock. Default: none, must set. Example: 27000000
+ * @i2c_wr_max: Max bytes I2C provider is asked to write at once.
+ *             Default: none, must set. Example: 33, 65, ...
+ * @ts_mode:   TS output mode, as defined by &enum m88ds3103_ts_mode.
+ *             Default: M88DS3103_TS_SERIAL.
+ * @ts_clk:    TS clk in KHz. Default: 0.
+ * @ts_clk_pol:        TS clk polarity.Default: 0.
+ *             1-active at falling edge; 0-active at rising edge.
+ * @spec_inv:  Spectrum inversion. Default: 0.
+ * @agc_inv:   AGC polarity. Default: 0.
+ * @clock_out: Clock output, as defined by &enum m88ds3103_clock_out.
+ *             Default: M88DS3103_CLOCK_OUT_DISABLED.
+ * @envelope_mode: DiSEqC envelope mode. Default: 0.
+ * @agc:       AGC configuration. Default: none, must set.
+ * @lnb_hv_pol:        LNB H/V pin polarity. Default: 0. Values:
+ *             1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18;
+ *             0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
+ * @lnb_en_pol:        LNB enable pin polarity. Default: 0. Values:
+ *             1: pin high to enable, pin low to disable;
+ *             0: pin high to disable, pin low to enable.
  */
 struct m88ds3103_config {
-       /*
-        * I2C address
-        * Default: none, must set
-        * 0x68, ...
-        */
        u8 i2c_addr;
-
-       /*
-        * clock
-        * Default: none, must set
-        * 27000000
-        */
        u32 clock;
-
-       /*
-        * max bytes I2C provider is asked to write at once
-        * Default: none, must set
-        * 33, 65, ...
-        */
        u16 i2c_wr_max;
-
-       /*
-        * TS output mode
-        * Default: M88DS3103_TS_SERIAL
-        */
-#define M88DS3103_TS_SERIAL             0 /* TS output pin D0, normal */
-#define M88DS3103_TS_SERIAL_D7          1 /* TS output pin D7 */
-#define M88DS3103_TS_PARALLEL           2 /* TS Parallel mode */
-#define M88DS3103_TS_CI                 3 /* TS CI Mode */
        u8 ts_mode;
-
-       /*
-        * TS clk in KHz
-        * Default: 0.
-        */
        u32 ts_clk;
-
-       /*
-        * TS clk polarity.
-        * Default: 0. 1-active at falling edge; 0-active at rising edge.
-        */
        u8 ts_clk_pol:1;
-
-       /*
-        * spectrum inversion
-        * Default: 0
-        */
        u8 spec_inv:1;
-
-       /*
-        * AGC polarity
-        * Default: 0
-        */
        u8 agc_inv:1;
-
-       /*
-        * clock output
-        * Default: M88DS3103_CLOCK_OUT_DISABLED
-        */
-#define M88DS3103_CLOCK_OUT_DISABLED        0
-#define M88DS3103_CLOCK_OUT_ENABLED         1
-#define M88DS3103_CLOCK_OUT_ENABLED_DIV2    2
        u8 clock_out;
-
-       /*
-        * DiSEqC envelope mode
-        * Default: 0
-        */
        u8 envelope_mode:1;
-
-       /*
-        * AGC configuration
-        * Default: none, must set
-        */
        u8 agc;
-
-       /*
-        * LNB H/V pin polarity
-        * Default: 0.
-        * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18.
-        * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
-        */
        u8 lnb_hv_pol:1;
-
-       /*
-        * LNB enable pin polarity
-        * Default: 0.
-        * 1: pin high to enable, pin low to disable.
-        * 0: pin high to disable, pin low to enable.
-        */
        u8 lnb_en_pol:1;
 };
 
 #if defined(CONFIG_DVB_M88DS3103) || \
                (defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
+/**
+ * Attach a m88ds3103 demod
+ *
+ * @config: pointer to &struct m88ds3103_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ * @tuner_i2c: on success, returns the I2C adapter associated with
+ *             m88ds3103 tuner.
+ *
+ * return: FE pointer on success, NULL on failure.
+ * Note: Do not add new m88ds3103_attach() users! Use I2C bindings instead.
+ */
 extern struct dvb_frontend *m88ds3103_attach(
                const struct m88ds3103_config *config,
                struct i2c_adapter *i2c,
index dfb02db2126c1061b00225c698d3305c8d3c9220..05c9725d1c5f94f0b234681c8351f4efc6a98016 100644 (file)
@@ -26,7 +26,6 @@
  * @demod_address:     the demodulator's i2c address
  * @is_serial:         if true, TS is serial. Otherwise, TS is parallel
  */
-
 struct mb86a20s_config {
        u32     fclk;
        u8      demod_address;
@@ -34,9 +33,17 @@ struct mb86a20s_config {
 };
 
 #if IS_REACHABLE(CONFIG_DVB_MB86A20S)
+/**
+ * Attach a mb86a20s demod
+ *
+ * @config: pointer to &struct mb86a20s_config with demod configuration.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
                                           struct i2c_adapter *i2c);
-extern struct i2c_adapter *mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *);
+
 #else
 static inline struct dvb_frontend *mb86a20s_attach(
        const struct mb86a20s_config *config, struct i2c_adapter *i2c)
@@ -44,12 +51,6 @@ static inline struct dvb_frontend *mb86a20s_attach(
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-static inline struct i2c_adapter *
-       mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *fe)
-{
-       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
-       return NULL;
-}
 #endif
 
 #endif /* MB86A20S */
index 323632523876c6e7a97a4abf7c7b1f2e73b64fc2..8cd5ef61903b986b477eaea36a74d45e782d0655 100644 (file)
 
 #include <linux/dvb/frontend.h>
 
+/* Define old names for backward compatibility */
+#define VARIABLE_TS_CLOCK   MN88472_TS_CLK_VARIABLE
+#define FIXED_TS_CLOCK      MN88472_TS_CLK_FIXED
+#define SERIAL_TS_MODE      MN88472_TS_MODE_SERIAL
+#define PARALLEL_TS_MODE    MN88472_TS_MODE_PARALLEL
+
 /**
  * struct mn88472_config - Platform data for the mn88472 driver
  * @xtal: Clock frequency.
  * @ts_mode: TS mode.
  * @ts_clock: TS clock config.
  * @i2c_wr_max: Max number of bytes driver writes to I2C at once.
- * @get_dvb_frontend: Get DVB frontend.
+ * @fe: pointer to a frontend pointer
+ * @get_dvb_frontend: Get DVB frontend callback.
  */
-
-/* Define old names for backward compatibility */
-#define VARIABLE_TS_CLOCK   MN88472_TS_CLK_VARIABLE
-#define FIXED_TS_CLOCK      MN88472_TS_CLK_FIXED
-#define SERIAL_TS_MODE      MN88472_TS_MODE_SERIAL
-#define PARALLEL_TS_MODE    MN88472_TS_MODE_PARALLEL
-
 struct mn88472_config {
        unsigned int xtal;
 
index 0cde151e66089c7b3319f5ea1c3ea39860986d36..458ac94e8a8b45da8ae4cde06a2837b4faf3a2fc 100644 (file)
@@ -32,7 +32,6 @@
  * @pid_filter: Set PID to PID filter.
  * @pid_filter_ctrl: Control PID filter.
  */
-
 struct rtl2830_platform_data {
        u32 clk;
        bool spec_inv;
index 03c0de039fa902e5d9ba524e1ee6e0770caf241a..6a124ff71c2b8694f430983210572d810645fba9 100644 (file)
@@ -35,7 +35,6 @@
  * @pid_filter: Set PID to PID filter.
  * @pid_filter_ctrl: Control PID filter.
  */
-
 struct rtl2832_platform_data {
        u32 clk;
        /*
index d8fc7e7212e34317d58897cb159dee8191f51c4c..8f88c2fb86273606fac1db738b6ed63c3bd7ce87 100644 (file)
  * struct rtl2832_sdr_platform_data - Platform data for the rtl2832_sdr driver
  * @clk: Clock frequency (4000000, 16000000, 25000000, 28800000).
  * @tuner: Used tuner model.
- * @i2c_client: rtl2832 demod driver I2C client.
- * @bulk_read: rtl2832 driver private I/O interface.
- * @bulk_write: rtl2832 driver private I/O interface.
- * @update_bits: rtl2832 driver private I/O interface.
+ * @regmap: pointer to &struct regmap.
  * @dvb_frontend: rtl2832 DVB frontend.
  * @v4l2_subdev: Tuner v4l2 controls.
  * @dvb_usb_device: DVB USB interface for USB streaming.
  */
-
 struct rtl2832_sdr_platform_data {
        u32 clk;
        /*
index 7c511c3cd4ca5f1286c6d8f8e9184ed8176d30ba..d2c402b52c6ef9c18b9f88340c489b9b850d0720 100644 (file)
@@ -57,7 +57,7 @@ static int sp887x_writereg (struct sp887x_state* state, u16 reg, u16 data)
        int ret;
 
        if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1) {
-               /**
+               /*
                 *  in case of soft reset we ignore ACK errors...
                 */
                if (!(reg == 0xf1a && data == 0x000 &&
@@ -130,7 +130,7 @@ static void sp887x_setup_agc (struct sp887x_state* state)
 
 #define BLOCKSIZE 30
 #define FW_SIZE 0x4000
-/**
+/*
  *  load firmware and setup MPEG interface...
  */
 static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware *fw)
@@ -279,7 +279,7 @@ static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
        return 0;
 }
 
-/**
+/*
  *  estimates division of two 24bit numbers,
  *  derived from the ves1820/stv0299 driver code
  */
index 78e75dfc317f271e92ff786c34ffa9183bae6830..e94a3d5facf67b60d21f4b1a685805ba4b770c7d 100644 (file)
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
+#if IS_REACHABLE(CONFIG_DVB_STB6000)
 /**
  * Attach a stb6000 tuner to the supplied frontend structure.
  *
- * @param fe Frontend to attach to.
- * @param addr i2c address of the tuner.
- * @param i2c i2c adapter to use.
- * @return FE pointer on success, NULL on failure.
+ * @fe: Frontend to attach to.
+ * @addr: i2c address of the tuner.
+ * @i2c: i2c adapter to use.
+ *
+ * return: FE pointer on success, NULL on failure.
  */
-#if IS_REACHABLE(CONFIG_DVB_STB6000)
 extern struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
                                           struct i2c_adapter *i2c);
 #else
index b36b21a1320142c59a9716f5d1d01161b57d35ec..b1f3d675d316d289c3269e30340a67380d4c390c 100644 (file)
@@ -368,7 +368,7 @@ static int stv0299_set_voltage(struct dvb_frontend *fe,
        reg0x08 = stv0299_readreg (state, 0x08);
        reg0x0c = stv0299_readreg (state, 0x0c);
 
-       /**
+       /*
         *  H/V switching over OP0, OP1 and OP2 are LNB power enable bits
         */
        reg0x0c &= 0x0f;
index 8f184026ee11058b6baef5d5cc20198353a4b038..da1a87bc160378d58a4c249a579f05c62c63f2af 100644 (file)
@@ -38,7 +38,6 @@
  * @tuner_i2c_addr: CX24118A tuner I2C address (0x14, 0x54, ...).
  * @get_dvb_frontend: Get DVB frontend.
  */
-
 struct tda10071_platform_data {
        u32 clk;
        u16 i2c_wr_max;
index 81abe1aebe9fba8d6746a2a34bf8430b1a4332ad..6a7bed12e741b1935486d2348c92e3ebea2dcd44 100644 (file)
 /**
  * Attach a tda826x tuner to the supplied frontend structure.
  *
- * @param fe Frontend to attach to.
- * @param addr i2c address of the tuner.
- * @param i2c i2c adapter to use.
- * @param has_loopthrough Set to 1 if the card has a loopthrough RF connector.
- * @return FE pointer on success, NULL on failure.
+ * @fe: Frontend to attach to.
+ * @addr: i2c address of the tuner.
+ * @i2c: i2c adapter to use.
+ * @has_loopthrough: Set to 1 if the card has a loopthrough RF connector.
+ *
+ * return: FE pointer on success, NULL on failure.
  */
 #if IS_REACHABLE(CONFIG_DVB_TDA826X)
 extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
index 18e6d4c5be21ce75c0d6afbf714cf5d11406b67a..1d41abd47f043432c01c6e937347f67699fc39cc 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Infineon tua6100 pll.
  *
  * (c) 2006 Andrew de Quincey
index 9f15cbdfdeca98b85ec4b28245ca13c1ed557a4c..6c098a894ea65ce6a7b34d74515f2aebe51359d7 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Infineon tua6100 PLL.
  *
  * (c) 2006 Andrew de Quincey
index ceb2e05e873cf9824e7d9283a2a8efa0cda4ddb3..6cd8f6f9c415632700101efecc131fdc59bc916a 100644 (file)
@@ -27,7 +27,6 @@
  * @reg_read: Register read callback.
  * @reg_write: Register write callback.
  */
-
 struct zd1301_demod_platform_data {
        void *reg_priv;
        int (*reg_read)(void *, u16, u8 *);
@@ -41,8 +40,7 @@ struct zd1301_demod_platform_data {
  *
  * Return: Pointer to DVB frontend which given platform device owns.
  */
-
-struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
+struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *pdev);
 
 /**
  * zd1301_demod_get_i2c_adapter() - Get pointer to I2C adapter
@@ -50,11 +48,16 @@ struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
  *
  * Return: Pointer to I2C adapter which given platform device owns.
  */
-
-struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *);
+struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *pdev);
 
 #else
 
+/**
+ * zd1301_demod_get_dvb_frontend() - Attach a zd1301 frontend
+ * @dev: Pointer to platform device
+ *
+ * Return: Pointer to %struct dvb_frontend or NULL if attach fails.
+ */
 static inline struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *dev)
 {
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
index 062282739ce56136f8abd0c2d30d8c4b0a94fcab..89dd65ae88ad479f4c50474437e7dd4a6314221a 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Driver for Zarlink zl10036 DVB-S silicon tuner
  *
  * Copyright (C) 2006 Tino Reichardt
@@ -157,7 +157,7 @@ static int zl10036_sleep(struct dvb_frontend *fe)
        return ret;
 }
 
-/**
+/*
  * register map of the ZL10036/ZL10038
  *
  * reg[default] content
@@ -219,7 +219,7 @@ static int zl10036_set_bandwidth(struct zl10036_state *state, u32 fbw)
        if (fbw <= 28820) {
                br = _BR_MAXIMUM;
        } else {
-               /**
+               /*
                 *  f(bw)=34,6MHz f(xtal)=10.111MHz
                 *  br = (10111/34600) * 63 * 1/K = 14;
                 */
@@ -315,7 +315,7 @@ static int zl10036_set_params(struct dvb_frontend *fe)
        ||  (frequency > fe->ops.info.frequency_max))
                return -EINVAL;
 
-       /**
+       /*
         * alpha = 1.35 for dvb-s
         * fBW = (alpha*symbolrate)/(2*0.8)
         * 1.35 / (2*0.8) = 27 / 32
index 88751adfecf7c6f5d8bf824f9f7ba1b2a7d4b4dc..ec90ca9277392bba1b8cec2d7b05cb4b32f6a84b 100644 (file)
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
-/**
- * Attach a zl10036 tuner to the supplied frontend structure.
- *
- * @param fe Frontend to attach to.
- * @param config zl10036_config structure
- * @return FE pointer on success, NULL on failure.
- */
-
 struct zl10036_config {
        u8 tuner_address;
        int rf_loop_enable;
 };
 
 #if IS_REACHABLE(CONFIG_DVB_ZL10036)
+/**
+ * Attach a zl10036 tuner to the supplied frontend structure.
+ *
+ * @fe: Frontend to attach to.
+ * @config: zl10036_config structure.
+ * @i2c: pointer to struct i2c_adapter.
+ * return: FE pointer on success, NULL on failure.
+ */
 extern struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe,
        const struct zl10036_config *config, struct i2c_adapter *i2c);
 #else
index 3c6d6428f5258c9f82e9d033c1d8540bc11283f4..cb5d7ff829151ca0940ce14d1372c0747233eccf 100644 (file)
@@ -676,6 +676,7 @@ config VIDEO_OV13858
        tristate "OmniVision OV13858 sensor support"
        depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
        depends on MEDIA_CAMERA_SUPPORT
+       select V4L2_FWNODE
        ---help---
          This is a Video4Linux2 sensor-level driver for the OmniVision
          OV13858 camera.
index 14399365ad7f93421b32971c14a826721a2cc993..9fe409e956662b1dcc07bb900696b46a722d3bfc 100644 (file)
@@ -1,6 +1,7 @@
 config VIDEO_ET8EK8
        tristate "ET8EK8 camera sensor support"
        depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       select V4L2_FWNODE
        ---help---
          This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
          It is used for example in Nokia N900 (RX-51).
index 800b9bf9cdd31c430572f8e0a9490913379aaac0..2f71af2f90bf98978165579f28a5c01680def0c1 100644 (file)
@@ -1770,8 +1770,7 @@ static int imx274_probe(struct i2c_client *client,
        return 0;
 
 err_ctrls:
-       v4l2_async_unregister_subdev(sd);
-       v4l2_ctrl_handler_free(sd->ctrl_handler);
+       v4l2_ctrl_handler_free(&imx274->ctrls.handler);
 err_me:
        media_entity_cleanup(&sd->entity);
 err_regmap:
@@ -1788,7 +1787,7 @@ static int imx274_remove(struct i2c_client *client)
        imx274_write_table(imx274, mode_table[IMX274_MODE_STOP_STREAM]);
 
        v4l2_async_unregister_subdev(sd);
-       v4l2_ctrl_handler_free(sd->ctrl_handler);
+       v4l2_ctrl_handler_free(&imx274->ctrls.handler);
        media_entity_cleanup(&sd->entity);
        mutex_destroy(&imx274->lock);
        return 0;
index 251a2aaf98c3b2665061266f31a248268d2f051e..b600e03aa94b06dd531f88b78adffd33b03ec051 100644 (file)
@@ -50,6 +50,7 @@ enum led_enable {
 /**
  * struct lm3560_flash
  *
+ * @dev: pointer to &struct device
  * @pdata: platform data
  * @regmap: reg. map for i2c
  * @lock: muxtex for serial access.
index a0cd6dc32eb0d75e55c58866686db9af87f31cf0..0fb457f579957445252ff8dc6cb1ef86146c2cd2 100644 (file)
 
 /**
  * m5mols_read_rational - I2C read of a rational number
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @addr_num: numerator register
+ * @addr_den: denominator register
+ * @val: place to store the division result
  *
  * Read numerator and denominator from registers @addr_num and @addr_den
  * respectively and return the division result in @val.
@@ -53,6 +57,7 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
 
 /**
  * m5mols_capture_info - Gather captured image information
+ * @info: M-5MOLS driver data structure
  *
  * For now it gathers only EXIF information and file size.
  */
index c2218c0a9e6fa11c43401265d065671c086e6cac..82eab7c2bc8cb59735db17529730a52c94c879e8 100644 (file)
@@ -126,6 +126,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
 
 /**
  * m5mols_do_scenemode() - Change current scenemode
+ * @info: M-5MOLS driver data structure
  * @mode:      Desired mode of the scenemode
  *
  * WARNING: The execution order is important. Do not change the order.
index 463534d44756e81052967822881de9a1ae68e8ba..12e79f9e32d53c090a001bc1036398a209cb9de1 100644 (file)
@@ -114,7 +114,8 @@ static const struct m5mols_resolution m5mols_reg_res[] = {
 
 /**
  * m5mols_swap_byte - an byte array to integer conversion function
- * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
+ * @data: byte array
+ * @length: size in bytes of I2C packet defined in the M-5MOLS datasheet
  *
  * Convert I2C data byte array with performing any required byte
  * reordering to assure proper values for each data type, regardless
@@ -132,8 +133,9 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
 
 /**
  * m5mols_read -  I2C read function
- * @reg: combination of size, category and command for the I2C packet
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @size: desired size of I2C packet
+ * @reg: combination of size, category and command for the I2C packet
  * @val: read value
  *
  * Returns 0 on success, or else negative errno.
@@ -232,6 +234,7 @@ int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
 
 /**
  * m5mols_write - I2C command write function
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @reg: combination of size, category and command for the I2C packet
  * @val: value to write
  *
@@ -284,6 +287,7 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
 
 /**
  * m5mols_busy_wait - Busy waiting with I2C register polling
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @reg: the I2C_REG() address of an 8-bit status register to check
  * @value: expected status register value
  * @mask: bit mask for the read status register value
@@ -316,6 +320,8 @@ int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
 
 /**
  * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @reg: combination of size, category and command for the I2C packet
  *
  * Before writing desired interrupt value the INT_FACTOR register should
  * be read to clear pending interrupts.
@@ -349,6 +355,8 @@ int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 irq_mask, u32 timeout)
 
 /**
  * m5mols_reg_mode - Write the mode and check busy status
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @mode: the required operation mode
  *
  * It always accompanies a little delay changing the M-5MOLS mode, so it is
  * needed checking current busy status to guarantee right mode.
@@ -364,6 +372,7 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
 
 /**
  * m5mols_set_mode - set the M-5MOLS controller mode
+ * @info: M-5MOLS driver data structure
  * @mode: the required operation mode
  *
  * The commands of M-5MOLS are grouped into specific modes. Each functionality
@@ -421,6 +430,7 @@ int m5mols_set_mode(struct m5mols_info *info, u8 mode)
 
 /**
  * m5mols_get_version - retrieve full revisions information of M-5MOLS
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  *
  * The version information includes revisions of hardware and firmware,
  * AutoFocus alghorithm version and the version string.
@@ -489,6 +499,7 @@ static enum m5mols_restype __find_restype(u32 code)
 
 /**
  * __find_resolution - Lookup preset and type of M-5MOLS's resolution
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  * @mf: pixel format to find/negotiate the resolution preset for
  * @type: M-5MOLS resolution type
  * @resolution:        M-5MOLS resolution preset register value
@@ -662,6 +673,7 @@ static const struct v4l2_subdev_pad_ops m5mols_pad_ops = {
 
 /**
  * m5mols_restore_controls - Apply current control values to the registers
+ * @info: M-5MOLS driver data structure
  *
  * m5mols_do_scenemode() handles all parameters for which there is yet no
  * individual control. It should be replaced at some point by setting each
@@ -686,6 +698,7 @@ int m5mols_restore_controls(struct m5mols_info *info)
 
 /**
  * m5mols_start_monitor - Start the monitor mode
+ * @info: M-5MOLS driver data structure
  *
  * Before applying the controls setup the resolution and frame rate
  * in PARAMETER mode, and then switch over to MONITOR mode.
@@ -789,6 +802,7 @@ int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
 
 /**
  * m5mols_fw_start - M-5MOLS internal ARM controller initialization
+ * @sd: sub-device, as pointed by struct v4l2_subdev
  *
  * Execute the M-5MOLS internal ARM controller initialization sequence.
  * This function should be called after the supply voltage has been
@@ -844,6 +858,8 @@ static int m5mols_auto_focus_stop(struct m5mols_info *info)
 
 /**
  * m5mols_s_power - Main sensor power control function
+ * @sd: sub-device, as pointed by struct v4l2_subdev
+ * @on: if true, powers on the device; powers off otherwise.
  *
  * To prevent breaking the lens when the sensor is powered off the Soft-Landing
  * algorithm is called where available. The Soft-Landing algorithm availability
index 34179d232a35d47dcc22f65c6ff1e8f673f4877e..da39c49de503c260b3cc78bed1c7a13c57472bf0 100644 (file)
@@ -428,8 +428,8 @@ static int ov5647_sensor_set_register(struct v4l2_subdev *sd,
 }
 #endif
 
-/**
- * @short Subdev core operations registration
+/*
+ * Subdev core operations registration
  */
 static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
        .s_power                = ov5647_sensor_power,
index 67dcca76f981cc5a38a23f6a098cc71dc37622e9..2e140272794b2589f40ecf45628e325c06678af1 100644 (file)
@@ -53,6 +53,9 @@ enum {
  * @gpio_reset: GPIO connected to the sensor's reset pin
  * @lock: mutex protecting the structure's members below
  * @format: media bus format at the sensor's source pad
+ * @clock: pointer to &struct clk.
+ * @clock_frequency: clock frequency
+ * @power_count: stores state if device is powered
  */
 struct s5k6a3 {
        struct device *dev;
index 9fd254a8e20d2771c2b92cfe29ba1494af1e2d1f..13c10b5e2b451ccedaae039e1d92a9ecc56d9155 100644 (file)
@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
 
 /**
  * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
  *
  * Configure the internal ISP PLL for the required output frequency.
  * Locking: called with s5k6aa.lock mutex held.
@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
 
 /**
  * s5k6aa_configure_video_bus - configure the video output interface
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
  * @bus_type: video bus type: parallel or MIPI-CSI
  * @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
  *
@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
 
 /**
  * s5k6aa_set_prev_config - write user preview register set
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
+ * @preset: s5kaa preset to be applied
  *
  * Configure output resolution and color fromat, pixel clock
  * frequency range, device frame rate type and frame period range.
@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
 
 /**
  * s5k6aa_initialize_isp - basic ISP MCU initialization
+ * @sd: pointer to V4L2 sub-device descriptor
  *
  * Configure AHB addresses for registers read/write; configure PLLs for
  * required output pixel clock. The ISP power supply needs to be already
index ad2df998f9c563dc9274d182429ad66eaed45251..d575b3e7e835efe60c94756f94027e55dd30da41 100644 (file)
@@ -86,6 +86,7 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
 /**
  * struct tvp514x_decoder - TVP5146/47 decoder object
  * @sd: Subdevice Slave handle
+ * @hdl: embedded &struct v4l2_ctrl_handler
  * @tvp514x_regs: copy of hw's regs with preset values.
  * @pdata: Board specific
  * @ver: Chip version
@@ -98,6 +99,9 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
  * @std_list: Standards list
  * @input: Input routing at chip level
  * @output: Output routing at chip level
+ * @pad: subdev media pad associated with the decoder
+ * @format: media bus frame format
+ * @int_seq: driver's register init sequence
  */
 struct tvp514x_decoder {
        struct v4l2_subdev sd;
@@ -211,7 +215,7 @@ static struct tvp514x_reg tvp514x_reg_list_default[] = {
        {TOK_TERM, 0, 0},
 };
 
-/**
+/*
  * List of image formats supported by TVP5146/47 decoder
  * Currently we are using 8 bit mode only, but can be
  * extended to 10/20 bit mode.
@@ -226,7 +230,7 @@ static const struct v4l2_fmtdesc tvp514x_fmt_list[] = {
        },
 };
 
-/**
+/*
  * Supported standards -
  *
  * Currently supports two standards only, need to add support for rest of the
@@ -931,7 +935,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
  * tvp514x_set_pad_format() - V4L2 decoder interface handler for set pad format
  * @sd: pointer to standard V4L2 sub-device structure
  * @cfg: pad configuration
- * @format: pointer to v4l2_subdev_format structure
+ * @fmt: pointer to v4l2_subdev_format structure
  *
  * Set pad format for the output pad
  */
@@ -1199,7 +1203,7 @@ static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
        {TOK_TERM, 0, 0},
 };
 
-/**
+/*
  * I2C Device Table -
  *
  * name - Name of the actual device/chip.
index 11829c0fa138050df4a80be72da20d422e6f9c54..509d69e6ca4adc27e7d26d05367f9d75d96e656d 100644 (file)
@@ -82,11 +82,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
  * @start_addr_lo:     DMA ring buffer start address, lower part
  * @start_addr_hi:     DMA ring buffer start address, higher part
  * @size:              DMA ring buffer size register
                      Bits [0-7]:     DMA packet size, 188 bytes
                      Bits [16-23]:   packets count in block, 128 packets
                      Bits [24-31]:   blocks count, 8 blocks
*                     * Bits [0-7]:   DMA packet size, 188 bytes
*                     * Bits [16-23]: packets count in block, 128 packets
*                     * Bits [24-31]: blocks count, 8 blocks
  * @timeout:           DMA timeout in units of 8ns
                      For example, value of 375000000 equals to 3 sec
*                     For example, value of 375000000 equals to 3 sec
  * @curr_addr_lo:      Current ring buffer head address, lower part
  * @curr_addr_hi:      Current ring buffer head address, higher part
  * @stat_pkt_received: Statistic register, not tested
index d28211bb967420c4470ad45896b55cbb9ea1a516..58d6b5131dd045a78c9a4d960d1df6423c7c291c 100644 (file)
@@ -175,7 +175,7 @@ out:
        return 0;
 }
 
-/**
+/*
  * Set channel Quality Profile (0-3).
  */
 void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch,
index eb5a9eae7c8ef7046fc08333bff244621959d732..dd199bfc1d457ad36dda7bccbf59ce66862f82e4 100644 (file)
@@ -404,6 +404,7 @@ static const struct v4l2_file_operations vip_fops = {
  * vidioc_querycap - return capabilities of device
  * @file: descriptor of device
  * @cap: contains return values
+ * @priv: unused
  *
  * the capabilities of the device are returned
  *
@@ -429,6 +430,7 @@ static int vidioc_querycap(struct file *file, void *priv,
  * vidioc_s_std - set video standard
  * @file: descriptor of device
  * @std: contains standard to be set
+ * @priv: unused
  *
  * the video standard is set
  *
@@ -466,6 +468,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
 /**
  * vidioc_g_std - get video standard
  * @file: descriptor of device
+ * @priv: unused
  * @std: contains return values
  *
  * the current video standard is returned
@@ -483,6 +486,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
 /**
  * vidioc_querystd - get possible video standards
  * @file: descriptor of device
+ * @priv: unused
  * @std: contains return values
  *
  * all possible video standards are returned
@@ -512,6 +516,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
 /**
  * vidioc_s_input - set input line
  * @file: descriptor of device
+ * @priv: unused
  * @i: new input line number
  *
  * the current active input line is set
@@ -538,6 +543,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
 /**
  * vidioc_g_input - return input line
  * @file: descriptor of device
+ * @priv: unused
  * @i: returned input line number
  *
  * the current active input line is returned
@@ -554,6 +560,8 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
 
 /**
  * vidioc_enum_fmt_vid_cap - return video capture format
+ * @file: descriptor of device
+ * @priv: unused
  * @f: returned format information
  *
  * returns name and format of video capture
@@ -577,6 +585,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
 /**
  * vidioc_try_fmt_vid_cap - set video capture format
  * @file: descriptor of device
+ * @priv: unused
  * @f: new format
  *
  * new video format is set which includes width and
@@ -639,6 +648,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
 /**
  * vidioc_s_fmt_vid_cap - set current video format parameters
  * @file: descriptor of device
+ * @priv: unused
  * @f: returned format information
  *
  * set new capture format
@@ -706,6 +716,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
 /**
  * vidioc_g_fmt_vid_cap - get current video format parameters
  * @file: descriptor of device
+ * @priv: unused
  * @f: contains format information
  *
  * returns current video format parameters
index 7439db212a69d2f401eacd5586505f59b85657b2..82ff9c9494f3d99a371495a60010ed17874679ed 100644 (file)
 #include "tw68.h"
 
 /**
- *  @rp                pointer to current risc program position
- *  @sglist    pointer to "scatter-gather list" of buffer pointers
- *  @offset    offset to target memory buffer
- *  @sync_line 0 -> no sync, 1 -> odd sync, 2 -> even sync
- *  @bpl       number of bytes per scan line
- *  @padding   number of bytes of padding to add
- *  @lines     number of lines in field
- *  @jump      insert a jump at the start
+ * tw68_risc_field
+ *  @rp:       pointer to current risc program position
+ *  @sglist:   pointer to "scatter-gather list" of buffer pointers
+ *  @offset:   offset to target memory buffer
+ *  @sync_line:        0 -> no sync, 1 -> odd sync, 2 -> even sync
+ *  @bpl:      number of bytes per scan line
+ *  @padding:  number of bytes of padding to add
+ *  @lines:    number of lines in field
+ *  @jump:     insert a jump at the start
  */
 static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
                            unsigned int offset, u32 sync_line,
@@ -120,18 +121,18 @@ static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
  *     memory for the dma controller "program" and then fills in that
  *     memory with the appropriate "instructions".
  *
- *     @pci_dev        structure with info about the pci
+ *     @pci:           structure with info about the pci
  *                     slot which our device is in.
- *     @risc           structure with info about the memory
+ *     @buf:           structure with info about the memory
  *                     used for our controller program.
- *     @sglist         scatter-gather list entry
- *     @top_offset     offset within the risc program area for the
+ *     @sglist:        scatter-gather list entry
+ *     @top_offset:    offset within the risc program area for the
  *                     first odd frame line
- *     @bottom_offset  offset within the risc program area for the
+ *     @bottom_offset: offset within the risc program area for the
  *                     first even frame line
- *     @bpl            number of data bytes per scan line
- *     @padding        number of extra bytes to add at end of line
- *     @lines          number of scan lines
+ *     @bpl:           number of data bytes per scan line
+ *     @padding:       number of extra bytes to add at end of line
+ *     @lines:         number of scan lines
  */
 int tw68_risc_buffer(struct pci_dev *pci,
                        struct tw68_buf *buf,
index 07e89a4985a6064a982b7d39cb6667613c05984a..16352e2263d2d2efcf165aabded6d10e3f216e02 100644 (file)
@@ -47,8 +47,9 @@ EXPORT_SYMBOL_GPL(vpif_lock);
 void __iomem *vpif_base;
 EXPORT_SYMBOL_GPL(vpif_base);
 
-/**
+/*
  * vpif_ch_params: video standard configuration parameters for vpif
+ *
  * The table must include all presets from supported subdevices.
  */
 const struct vpif_channel_config_params vpif_ch_params[] = {
index a89367ab1e068a593396c808c892ca0c8bf5d70d..fca4dc829f73eeb0eb9a1c191fc6daae940587a9 100644 (file)
@@ -109,7 +109,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
  * @vq: vb2_queue ptr
  * @nbuffers: ptr to number of buffers requested by application
  * @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
+ * @sizes: contains the size (in bytes) of each plane.
  * @alloc_devs: ptr to allocation context
  *
  * This callback function is called when reqbuf() is called to adjust
@@ -167,7 +167,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
 
 /**
  * vpif_start_streaming : Starts the DMA engine for streaming
- * @vb: ptr to vb2_buffer
+ * @vq: ptr to vb2_buffer
  * @count: number of buffers
  */
 static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -629,7 +629,7 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
 
 /**
  * vpif_get_default_field() - Get default field type based on interface
- * @vpif_params - ptr to vpif params
+ * @iface: ptr to vpif interface
  */
 static inline enum v4l2_field vpif_get_default_field(
                                struct vpif_interface *iface)
@@ -640,8 +640,8 @@ static inline enum v4l2_field vpif_get_default_field(
 
 /**
  * vpif_config_addr() - function to configure buffer address in vpif
- * @ch - channel ptr
- * @muxmode - channel mux mode
+ * @ch: channel ptr
+ * @muxmode: channel mux mode
  */
 static void vpif_config_addr(struct channel_obj *ch, int muxmode)
 {
@@ -661,9 +661,9 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
 
 /**
  * vpif_input_to_subdev() - Maps input to sub device
- * @vpif_cfg - global config ptr
- * @chan_cfg - channel config ptr
- * @input_index - Given input index from application
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @input_index: Given input index from application
  *
  * lookup the sub device information for a given input index.
  * we report all the inputs to application. inputs table also
@@ -699,9 +699,9 @@ static int vpif_input_to_subdev(
 
 /**
  * vpif_set_input() - Select an input
- * @vpif_cfg - global config ptr
- * @ch - channel
- * @_index - Given input index from application
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given input index from application
  *
  * Select the given input.
  */
@@ -792,7 +792,7 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
  * vpif_g_std() - get STD handler
  * @file: file ptr
  * @priv: file handle
- * @std_id: ptr to std id
+ * @std: ptr to std id
  */
 static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
 {
@@ -933,7 +933,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
  * vpif_enum_fmt_vid_cap() - ENUM_FMT handler
  * @file: file ptr
  * @priv: file handle
- * @index: input index
+ * @fmt: ptr to V4L2 format descriptor
  */
 static int vpif_enum_fmt_vid_cap(struct file *file, void  *priv,
                                        struct v4l2_fmtdesc *fmt)
@@ -1745,6 +1745,7 @@ static int vpif_remove(struct platform_device *device)
 #ifdef CONFIG_PM_SLEEP
 /**
  * vpif_suspend: vpif device suspend
+ * @dev: pointer to &struct device
  */
 static int vpif_suspend(struct device *dev)
 {
index ff2f75a328c98451e8c238bba2ae228319b962ce..7be636237acf8ec3fbb116f260ced51dbdd586ac 100644 (file)
@@ -102,7 +102,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
  * @vq: vb2_queue ptr
  * @nbuffers: ptr to number of buffers requested by application
  * @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
+ * @sizes: contains the size (in bytes) of each plane.
  * @alloc_devs: ptr to allocation context
  *
  * This callback function is called when reqbuf() is called to adjust
@@ -158,7 +158,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
 
 /**
  * vpif_start_streaming : Starts the DMA engine for streaming
- * @vb: ptr to vb2_buffer
+ * @vq: ptr to vb2_buffer
  * @count: number of buffers
  */
 static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -766,9 +766,9 @@ static int vpif_enum_output(struct file *file, void *fh,
 
 /**
  * vpif_output_to_subdev() - Maps output to sub device
- * @vpif_cfg - global config ptr
- * @chan_cfg - channel config ptr
- * @index - Given output index from application
+ * @vpif_cfg: global config ptr
+ * @chan_cfg: channel config ptr
+ * @index: Given output index from application
  *
  * lookup the sub device information for a given output index.
  * we report all the output to application. output table also
@@ -802,9 +802,9 @@ vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
 
 /**
  * vpif_set_output() - Select an output
- * @vpif_cfg - global config ptr
- * @ch - channel
- * @index - Given output index from application
+ * @vpif_cfg: global config ptr
+ * @ch: channel
+ * @index: Given output index from application
  *
  * Select the given output.
  */
index 948fe01f6c96cc1745e96bccee0bde7283fc0abc..ed9302caa00484c55e4e75474acd204fa77df404 100644 (file)
@@ -146,6 +146,7 @@ static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
 
 /**
  * fimc_capture_config_update - apply the camera interface configuration
+ * @ctx: FIMC capture context
  *
  * To be called from within the interrupt handler with fimc.slock
  * spinlock held. It updates the camera pixel crop, rotation and
@@ -858,6 +859,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
  * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters
  * @sensor: pointer to the sensor subdev
  * @plane_fmt: provides plane sizes corresponding to the frame layout entries
+ * @num_planes: number of planes
  * @try: true to set the frame parameters, false to query only
  *
  * This function is used by this driver only for compressed/blob data formats.
@@ -1101,6 +1103,7 @@ static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
 /**
  * fimc_pipeline_validate - check for formats inconsistencies
  *                          between source and sink pad of each link
+ * @fimc:      the FIMC device this context applies to
  *
  * Return 0 if all formats match or -EPIPE otherwise.
  */
index c15596b56dc97af7bfb4ef5079c5161a2434b69f..0ef583cfc424f61671d0374909aacdaaf4c67fe0 100644 (file)
@@ -60,6 +60,7 @@ static void __setup_sensor_notification(struct fimc_md *fmd,
 
 /**
  * fimc_pipeline_prepare - update pipeline information with subdevice pointers
+ * @p: fimc pipeline
  * @me: media entity terminating the pipeline
  *
  * Caller holds the graph mutex.
@@ -151,8 +152,8 @@ static int __subdev_set_power(struct v4l2_subdev *sd, int on)
 
 /**
  * fimc_pipeline_s_power - change power state of all pipeline subdevs
- * @fimc: fimc device terminating the pipeline
- * @state: true to power on, false to power off
+ * @p: fimc device terminating the pipeline
+ * @on: true to power on, false to power off
  *
  * Needs to be called with the graph mutex held.
  */
@@ -219,6 +220,7 @@ static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
 /**
  * __fimc_pipeline_open - update the pipeline information, enable power
  *                        of all pipeline subdevs and the sensor clock
+ * @ep: fimc device terminating the pipeline
  * @me: media entity to start graph walk with
  * @prepare: true to walk the current pipeline and acquire all subdevs
  *
@@ -252,7 +254,7 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
 
 /**
  * __fimc_pipeline_close - disable the sensor clock and pipeline power
- * @fimc: fimc device terminating the pipeline
+ * @ep: fimc device terminating the pipeline
  *
  * Disable power of all subdevs and turn the external sensor clock off.
  */
@@ -281,7 +283,7 @@ static int __fimc_pipeline_close(struct exynos_media_pipeline *ep)
 
 /**
  * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs
- * @pipeline: video pipeline structure
+ * @ep: video pipeline structure
  * @on: passed as the s_stream() callback argument
  */
 static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
@@ -902,6 +904,7 @@ static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
 
 /**
  * fimc_md_create_links - create default links between registered entities
+ * @fmd: fimc media device
  *
  * Parallel interface sensor entities are connected directly to FIMC capture
  * entities. The sensors using MIPI CSIS bus are connected through immutable
index 560aadabcb111c3de8aac551c1e4809f741de245..cba46a65633836d5632c61107e7e8dbecdf1b39b 100644 (file)
@@ -189,7 +189,7 @@ struct csis_drvdata {
  * @irq: requested s5p-mipi-csis irq number
  * @interrupt_mask: interrupt mask of the all used interrupts
  * @flags: the state variable for power and streaming control
- * @clock_frequency: device bus clock frequency
+ * @clk_frequency: device bus clock frequency
  * @hs_settle: HS-RX settle time
  * @num_lanes: number of MIPI-CSI data lanes used
  * @max_num_lanes: maximum number of MIPI-CSI data lanes supported
index fb43025df57379834a99e1be3d6124c827cd91a9..dba21215dc845df2aec191127bf59502bdc903cd 100644 (file)
@@ -339,9 +339,9 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
        }
 }
 
-static void viu_vid_timeout(unsigned long data)
+static void viu_vid_timeout(struct timer_list *t)
 {
-       struct viu_dev *dev = (struct viu_dev *)data;
+       struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
        struct viu_buf *buf;
        struct viu_dmaqueue *vidq = &dev->vidq;
 
@@ -1466,8 +1466,7 @@ static int viu_of_probe(struct platform_device *op)
        viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
                        "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
 
-       setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout,
-                   (unsigned long)viu_dev);
+       timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
        viu_dev->std = V4L2_STD_NTSC_M;
        viu_dev->first = 1;
 
index b7731b18ecae1741ebee3be1bf9188659bfeb398..aa3ce41898bc82c194c32e4055d5facdfa789043 100644 (file)
@@ -59,6 +59,7 @@ struct h264_fb {
  * @read_idx  : read index
  * @write_idx : write index
  * @count     : buffer count in list
+ * @reserved  : for 8 bytes alignment
  */
 struct h264_ring_fb_list {
        struct h264_fb fb_list[H264_MAX_FB_NUM];
index b9fad6a488799ebc7fad8b12b6990b9c33d7c60b..3e84a761db3a9188500610a2a83a7c80d61580da 100644 (file)
@@ -155,7 +155,6 @@ struct vdec_vp8_vpu_inst {
  * @reg_base              : HW register base address
  * @frm_cnt               : decode frame count
  * @ctx                           : V4L2 context
- * @dev                           : platform device
  * @vpu                           : VPU instance for decoder
  * @vsi                           : VPU share information
  */
index 4eb3be37ba143980291f33e0de40835a352f42ad..6cf31b366aad2d94df0c80bd0b0b300ba854fa48 100644 (file)
@@ -34,7 +34,7 @@ static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
 #define H264_FILLER_MARKER_SIZE ARRAY_SIZE(h264_filler_marker)
 #define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098
 
-/**
+/*
  * enum venc_h264_vpu_work_buf - h264 encoder buffer index
  */
 enum venc_h264_vpu_work_buf {
@@ -50,7 +50,7 @@ enum venc_h264_vpu_work_buf {
        VENC_H264_VPU_WORK_BUF_MAX,
 };
 
-/**
+/*
  * enum venc_h264_bs_mode - for bs_mode argument in h264_enc_vpu_encode
  */
 enum venc_h264_bs_mode {
index acb639c4abd2b65c3b59e244c671f6981b9ef790..957420dd60de245260e39d5aef04a8efa45c882c 100644 (file)
@@ -34,7 +34,7 @@
 /* This ac_tag is vp8 frame tag. */
 #define MAX_AC_TAG_SIZE 10
 
-/**
+/*
  * enum venc_vp8_vpu_work_buf - vp8 encoder buffer index
  */
 enum venc_vp8_vpu_work_buf {
index 853d598937f69d4667ce5a28d078e53365c2b134..1ff6a93262b7a674b718a44a17e1d686c21314bb 100644 (file)
@@ -181,6 +181,7 @@ struct share_obj {
  * @extmem:            VPU extended memory information
  * @reg:               VPU TCM and configuration registers
  * @run:               VPU initialization status
+ * @wdt:               VPU watchdog workqueue
  * @ipi_desc:          VPU IPI descriptor
  * @recv_buf:          VPU DTCM share buffer for receiving. The
  *                     receive buffer is only accessed in interrupt context.
@@ -194,7 +195,7 @@ struct share_obj {
  *                     suppose a client is using VPU to decode VP8.
  *                     If the other client wants to encode VP8,
  *                     it has to wait until VP8 decode completes.
- * @wdt_refcnt         WDT reference count to make sure the watchdog can be
+ * @wdt_refcnt:                WDT reference count to make sure the watchdog can be
  *                     disabled if no other client is using VPU service
  * @ack_wq:            The wait queue for each codec and mdp. When sleeping
  *                     processes wake up, they will check the condition
index 9d3f0cb1d95ae25519045fb501dc955b67123d65..295f34ad10800b1019a172e90dc805589138f234 100644 (file)
@@ -235,6 +235,7 @@ enum pxa_mbus_layout {
  *                     stored in memory in the following way:
  * @packing:           Type of sample-packing, that has to be used
  * @order:             Sample order when storing in memory
+ * @layout:            Planes layout in memory
  * @bits_per_sample:   How many bits the bridge has to sample
  */
 struct pxa_mbus_pixelfmt {
@@ -852,10 +853,10 @@ static void pxa_camera_dma_irq_v(void *data)
 /**
  * pxa_init_dma_channel - init dma descriptors
  * @pcdev: pxa camera device
- * @vb: videobuffer2 buffer
- * @dma: dma video buffer
+ * @buf: pxa camera buffer
  * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
- * @cibr: camera Receive Buffer Register
+ * @sg: dma scatter list
+ * @sglen: dma scatter list length
  *
  * Prepares the pxa dma descriptors to transfer one camera channel.
  *
@@ -1010,6 +1011,8 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
 /**
  * pxa_camera_check_link_miss - check missed DMA linking
  * @pcdev: camera device
+ * @last_submitted: an opaque DMA cookie for last submitted
+ * @last_issued: an opaque DMA cookie for last issued
  *
  * The DMA chaining is done with DMA running. This means a tiny temporal window
  * remains, where a buffer is queued on the chain, while the chain is already
index 3245bc45f4a0636581599b01529ec34fae445c44..b13dec3081e55266486156bd8d2d8d398955b8d0 100644 (file)
@@ -1132,7 +1132,7 @@ static int fdp1_device_process(struct fdp1_ctx *ctx)
  * mem2mem callbacks
  */
 
-/**
+/*
  * job_ready() - check whether an instance is ready to be scheduled to run
  */
 static int fdp1_m2m_job_ready(void *priv)
index 070bac36d766891dabeb01e1f1bf064eb989de58..f6092ae459129478aebb628f39685d9d35669532 100644 (file)
@@ -257,7 +257,7 @@ struct jpu_fmt {
 };
 
 /**
- * jpu_q_data - parameters of one queue
+ * struct jpu_q_data - parameters of one queue
  * @fmtinfo: driver-specific format of this queue
  * @format: multiplanar format of this queue
  * @sequence: sequence number
@@ -269,7 +269,7 @@ struct jpu_q_data {
 };
 
 /**
- * jpu_ctx - the device context data
+ * struct jpu_ctx - the device context data
  * @jpu: JPEG IP device for this context
  * @encoder: compression (encode) operation or decompression (decode)
  * @compr_quality: destination image quality in compression (encode) mode
index c4ab63986c8f08df7d33e15fdf2765a7e7d8d099..79bc0ef6bb413d8cd15aaaea3662444d2b047635 100644 (file)
@@ -103,6 +103,7 @@ static const struct camif_fmt camif_formats[] = {
 
 /**
  * s3c_camif_find_format() - lookup camif color format by fourcc or an index
+ * @vp: video path (DMA) description (codec/preview)
  * @pixelformat: fourcc to match, ignored if null
  * @index: index to the camif_formats array, ignored if negative
  */
index 1839a86cc2a557dde0dd590eda4dadf1bc5d0815..bc68dbbcaec1667e29088ecb35f17944ec1ba85f 100644 (file)
@@ -145,9 +145,9 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
        }
 }
 
-static void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(struct timer_list *t)
 {
-       struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+       struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
 
        if (test_bit(0, &dev->hw_lock))
                atomic_inc(&dev->watchdog_cnt);
@@ -1314,9 +1314,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
        dev->hw_lock = 0;
        INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
        atomic_set(&dev->watchdog_cnt, 0);
-       init_timer(&dev->watchdog_timer);
-       dev->watchdog_timer.data = (unsigned long)dev;
-       dev->watchdog_timer.function = s5p_mfc_watchdog;
+       timer_setup(&dev->watchdog_timer, s5p_mfc_watchdog, 0);
 
        ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
        if (ret)
index 15a562af13c774711c8478cf682491e916b8784b..dedc1b024f6f1ef692f6c41f0aec892521b51035 100644 (file)
@@ -267,7 +267,7 @@ static void sh_veu_process(struct sh_veu_dev *veu,
        sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
 }
 
-/**
+/*
  * sh_veu_device_run() - prepares and starts the device
  *
  * This will be called by the framework when it decides to schedule a particular
index 0116097c0c0faf8ce7db5a42032213a92ba615a2..270ec613c27ca04ea68041ab13009f6b3c4b58ac 100644 (file)
@@ -306,16 +306,17 @@ update_cache:
 }
 
 /**
- * @icd                - soc-camera device
- * @rect       - camera cropping window
- * @subrect    - part of rect, sent to the user
- * @mf         - in- / output camera output window
- * @width      - on input: max host input width
- *               on output: user width, mapped back to input
- * @height     - on input: max host input height
- *               on output: user height, mapped back to input
- * @host_can_scale - host can scale this pixel format
- * @shift      - shift, used for scaling
+ * soc_camera_client_scale
+ * @icd:               soc-camera device
+ * @rect:              camera cropping window
+ * @subrect:           part of rect, sent to the user
+ * @mf:                        in- / output camera output window
+ * @width:             on input: max host input width;
+ *                     on output: user width, mapped back to input
+ * @height:            on input: max host input height;
+ *                     on output: user height, mapped back to input
+ * @host_can_scale:    host can scale this pixel format
+ * @shift:             shift, used for scaling
  */
 int soc_camera_client_scale(struct soc_camera_device *icd,
                        struct v4l2_rect *rect, struct v4l2_rect *subrect,
index 59280ac319374c11103a30ec6123a8f3192a1577..a0acee7671b16170ab9f19f430349d1aec00aeba 100644 (file)
@@ -61,9 +61,9 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei);
 
 #define FIFO_LEN 1024
 
-static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
 {
-       struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+       struct c8sectpfei *fei = from_timer(fei, t, timer);
        struct channel_info *channel;
        int chan_num;
 
@@ -865,8 +865,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
        }
 
        /* Setup timer interrupt */
-       setup_timer(&fei->timer, c8sectpfe_timer_interrupt,
-                   (unsigned long)fei);
+       timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
 
        mutex_init(&fei->lock);
 
index a7e5eed17adae6487307b1670f79be64535c61cd..17f1eb0ba957faea55d4df7909cb0b1a84384a29 100644 (file)
@@ -134,7 +134,7 @@ enum hva_h264_sei_payload_type {
        SEI_FRAME_PACKING_ARRANGEMENT = 45
 };
 
-/**
+/*
  * stereo Video Info struct
  */
 struct hva_h264_stereo_video_sei {
@@ -146,7 +146,9 @@ struct hva_h264_stereo_video_sei {
        u8 right_view_self_contained_flag;
 };
 
-/**
+/*
+ * struct hva_h264_td
+ *
  * @frame_width: width in pixels of the buffer containing the input frame
  * @frame_height: height in pixels of the buffer containing the input frame
  * @frame_num: the parameter to be written in the slice header
@@ -352,7 +354,9 @@ struct hva_h264_td {
        u32 addr_brc_in_out_parameter;
 };
 
-/**
+/*
+ * struct hva_h264_slice_po
+ *
  * @ slice_size: slice size
  * @ slice_start_time: start time
  * @ slice_stop_time: stop time
@@ -365,7 +369,9 @@ struct hva_h264_slice_po {
        u32 slice_num;
 };
 
-/**
+/*
+ * struct hva_h264_po
+ *
  * @ bitstream_size: bitstream size
  * @ dct_bitstream_size: dtc bitstream size
  * @ stuffing_bits: number of stuffing bits inserted by the encoder
@@ -391,7 +397,9 @@ struct hva_h264_task {
        struct hva_h264_po po;
 };
 
-/**
+/*
+ * struct hva_h264_ctx
+ *
  * @seq_info:  sequence information buffer
  * @ref_frame: reference frame buffer
  * @rec_frame: reconstructed frame buffer
index 45bd10544189e9a3c2144c11b1887e6dd091a2a8..e395aa85c8ad66341f9e17b9d488120cfe43feed 100644 (file)
@@ -926,7 +926,7 @@ static struct vpe_ctx *file2ctx(struct file *file)
  * mem2mem callbacks
  */
 
-/**
+/*
  * job_ready() - check whether an instance is ready to be scheduled to run
  */
 static int job_ready(void *priv)
index b01fba020d5f7dba97a6e23bda9763b411976a8d..065483e62db4d7ede0893c61a2fc3d099e9750ad 100644 (file)
@@ -343,7 +343,7 @@ static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
  * mem2mem callbacks
  */
 
-/**
+/*
  * job_ready() - check whether an instance is ready to be scheduled to run
  */
 static int job_ready(void *priv)
@@ -388,9 +388,9 @@ static void device_run(void *priv)
        schedule_irq(dev, ctx->transtime);
 }
 
-static void device_isr(unsigned long priv)
+static void device_isr(struct timer_list *t)
 {
-       struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
+       struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
        struct vim2m_ctx *curr_ctx;
        struct vb2_v4l2_buffer *src_vb, *dst_vb;
        unsigned long flags;
@@ -1024,7 +1024,7 @@ static int vim2m_probe(struct platform_device *pdev)
        v4l2_info(&dev->v4l2_dev,
                        "Device registered as /dev/video%d\n", vfd->num);
 
-       setup_timer(&dev->timer, device_isr, (long)dev);
+       timer_setup(&dev->timer, device_isr, 0);
        platform_set_drvdata(pdev, dev);
 
        dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
index 8b5cbb6b7a704651117960d2ff7bd95a24ca4e7d..4257451f1bd8c107d89778fd68d8b435ccb28fb6 100644 (file)
@@ -70,6 +70,7 @@ struct vsp1_dl_body {
  * @dma: DMA address for the header
  * @body0: first display list body
  * @fragments: list of extra display list bodies
+ * @has_chain: if true, indicates that there's a partition chain
  * @chain: entry in the display list partition chain
  */
 struct vsp1_dl_list {
index 271f725b17e85c77017f08ac9181510369f16872..540ac887a63cdd562213bf377fbf05574fb27143 100644 (file)
@@ -158,7 +158,7 @@ enum si476x_ctrl_idx {
 };
 static struct v4l2_ctrl_config si476x_ctrls[] = {
 
-       /**
+       /*
         * SI476X during its station seeking(or tuning) process uses several
         * parameters to detrmine if "the station" is valid:
         *
@@ -197,7 +197,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
                .step   = 2,
        },
 
-       /**
+       /*
         * #V4L2_CID_SI476X_HARMONICS_COUNT -- number of harmonics
         * built-in power-line noise supression filter is to reject
         * during AM-mode operation.
@@ -213,7 +213,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
                .step   = 1,
        },
 
-       /**
+       /*
         * #V4L2_CID_SI476X_DIVERSITY_MODE -- configuration which
         * two tuners working in diversity mode are to work in.
         *
@@ -237,7 +237,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
                .max    = ARRAY_SIZE(phase_diversity_modes) - 1,
        },
 
-       /**
+       /*
         * #V4L2_CID_SI476X_INTERCHIP_LINK -- inter-chip link in
         * diversity mode indicator. Allows user to determine if two
         * chips working in diversity mode have established a link
@@ -296,11 +296,15 @@ struct si476x_radio_ops {
 /**
  * struct si476x_radio - radio device
  *
- * @core: Pointer to underlying core device
+ * @v4l2dev: Pointer to V4L2 device created by V4L2 subsystem
  * @videodev: Pointer to video device created by V4L2 subsystem
+ * @ctrl_handler: V4L2 controls handler
+ * @core: Pointer to underlying core device
  * @ops: Vtable of functions. See struct si476x_radio_ops for details
- * @kref: Reference counter
- * @core_lock: An r/w semaphore to brebvent the deletion of underlying
+ * @debugfs: pointer to &strucd dentry for debugfs
+ * @audmode: audio mode, as defined for the rxsubchans field
+ *          at videodev2.h
+ *
  * core structure is the radio device is being used
  */
 struct si476x_radio {
index 903fcd5e99c07e3928c0ddf658cf146cc0a568e6..3cbdc085c65df309dc5e43ec97e95806af21aa99 100644 (file)
@@ -1330,7 +1330,7 @@ static int wl1273_fm_vidioc_s_input(struct file *file, void *priv,
 
 /**
  * wl1273_fm_set_tx_power() -  Set the transmission power value.
- * @core:                      A pointer to the device struct.
+ * @radio:                     A pointer to the device struct.
  * @power:                     The new power value.
  */
 static int wl1273_fm_set_tx_power(struct wl1273_device *radio, u16 power)
index f54bc5d23893da0b5ce0e1e66592f1d0ab30746b..ec4ded84cd170b93d231eeff298c6a9d0a5b56c9 100644 (file)
@@ -339,7 +339,7 @@ static void img_ir_decoder_preprocess(struct img_ir_decoder *decoder)
 /**
  * img_ir_decoder_convert() - Generate internal timings in decoder.
  * @decoder:   Decoder to be converted to internal timings.
- * @timings:   Timing register values.
+ * @reg_timings: Timing register values.
  * @clock_hz:  IR clock rate in Hz.
  *
  * Fills out the repeat timings and timing register values for a specific clock
index b25b35b3f6da530b1869ae4844a0e689f22ed186..eb943e862515dc55800ef080ebf75860737e7033 100644 (file)
@@ -492,7 +492,7 @@ static void free_imon_context(struct imon_context *ictx)
        dev_dbg(dev, "%s: iMON context freed\n", __func__);
 }
 
-/**
+/*
  * Called when the Display device (e.g. /dev/lcd0)
  * is opened by the application.
  */
@@ -542,7 +542,7 @@ exit:
        return retval;
 }
 
-/**
+/*
  * Called when the display device (e.g. /dev/lcd0)
  * is closed by the application.
  */
@@ -575,7 +575,7 @@ static int display_close(struct inode *inode, struct file *file)
        return retval;
 }
 
-/**
+/*
  * Sends a packet to the device -- this function must be called with
  * ictx->lock held, or its unlock/lock sequence while waiting for tx
  * to complete can/will lead to a deadlock.
@@ -664,7 +664,7 @@ static int send_packet(struct imon_context *ictx)
        return retval;
 }
 
-/**
+/*
  * Sends an associate packet to the iMON 2.4G.
  *
  * This might not be such a good idea, since it has an id collision with
@@ -694,7 +694,7 @@ static int send_associate_24g(struct imon_context *ictx)
        return retval;
 }
 
-/**
+/*
  * Sends packets to setup and show clock on iMON display
  *
  * Arguments: year - last 2 digits of year, month - 1..12,
@@ -781,7 +781,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
        return retval;
 }
 
-/**
+/*
  * These are the sysfs functions to handle the association on the iMON 2.4G LT.
  */
 static ssize_t show_associate_remote(struct device *d,
@@ -823,7 +823,7 @@ static ssize_t store_associate_remote(struct device *d,
        return count;
 }
 
-/**
+/*
  * sysfs functions to control internal imon clock
  */
 static ssize_t show_imon_clock(struct device *d,
@@ -923,7 +923,7 @@ static const struct attribute_group imon_rf_attr_group = {
        .attrs = imon_rf_sysfs_entries
 };
 
-/**
+/*
  * Writes data to the VFD.  The iMON VFD is 2x16 characters
  * and requires data in 5 consecutive USB interrupt packets,
  * each packet but the last carrying 7 bytes.
@@ -1008,7 +1008,7 @@ exit:
        return (!retval) ? n_bytes : retval;
 }
 
-/**
+/*
  * Writes data to the LCD.  The iMON OEM LCD screen expects 8-byte
  * packets. We accept data as 16 hexadecimal digits, followed by a
  * newline (to make it easy to drive the device from a command-line
@@ -1066,7 +1066,7 @@ exit:
        return (!retval) ? n_bytes : retval;
 }
 
-/**
+/*
  * Callback function for USB core API: transmit data
  */
 static void usb_tx_callback(struct urb *urb)
@@ -1087,7 +1087,7 @@ static void usb_tx_callback(struct urb *urb)
        complete(&ictx->tx.finished);
 }
 
-/**
+/*
  * report touchscreen input
  */
 static void imon_touch_display_timeout(struct timer_list *t)
@@ -1103,7 +1103,7 @@ static void imon_touch_display_timeout(struct timer_list *t)
        input_sync(ictx->touch);
 }
 
-/**
+/*
  * iMON IR receivers support two different signal sets -- those used by
  * the iMON remotes, and those used by the Windows MCE remotes (which is
  * really just RC-6), but only one or the other at a time, as the signals
@@ -1191,7 +1191,7 @@ static inline int tv2int(const struct timeval *a, const struct timeval *b)
        return sec;
 }
 
-/**
+/*
  * The directional pad behaves a bit differently, depending on whether this is
  * one of the older ffdc devices or a newer device. Newer devices appear to
  * have a higher resolution matrix for more precise mouse movement, but it
@@ -1543,7 +1543,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
        }
 }
 
-/**
+/*
  * figure out if these is a press or a release. We don't actually
  * care about repeats, as those will be auto-generated within the IR
  * subsystem for repeating scancodes.
@@ -1592,10 +1592,10 @@ static int imon_parse_press_type(struct imon_context *ictx,
        return press_type;
 }
 
-/**
+/*
  * Process the incoming packet
  */
-/**
+/*
  * Convert bit count to time duration (in us) and submit
  * the value to lirc_dev.
  */
@@ -1608,7 +1608,7 @@ static void submit_data(struct imon_context *context)
        ir_raw_event_store_with_filter(context->rdev, &ev);
 }
 
-/**
+/*
  * Process the incoming packet
  */
 static void imon_incoming_ir_raw(struct imon_context *context,
@@ -1831,7 +1831,7 @@ not_input_data:
        }
 }
 
-/**
+/*
  * Callback function for USB core API: receive data
  */
 static void usb_rx_callback_intf0(struct urb *urb)
@@ -2485,7 +2485,7 @@ static void imon_init_display(struct imon_context *ictx,
 
 }
 
-/**
+/*
  * Callback function for USB core API: Probe
  */
 static int imon_probe(struct usb_interface *interface,
@@ -2583,7 +2583,7 @@ fail:
        return ret;
 }
 
-/**
+/*
  * Callback function for USB core API: disconnect
  */
 static void imon_disconnect(struct usb_interface *interface)
index e2bd68c42edfa8ab870ebaf294ac06eef8a4e1a0..22c8aee3df4fee44ec294da322e0afc66c57651c 100644 (file)
@@ -39,7 +39,7 @@ enum jvc_state {
 /**
  * ir_jvc_decode() - Decode one JVC pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:   the struct ir_raw_event descriptor of the pulse/space
+ * @ev:   the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index 8f2f37412fc5e24e90ebe763e16628ca8f7f6483..4fd4521693d95b417643c4efa5af3a82e86c4428 100644 (file)
@@ -25,8 +25,8 @@
 /**
  * ir_lirc_decode() - Send raw IR data to lirc_dev to be relayed to the
  *                   lircd userspace daemon for decoding.
- * @input_dev: the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @dev:       the struct rc_dev descriptor of the device
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the lirc interfaces aren't wired up.
  */
index a95d09acc22a56a81f63260a8fecb7ecb216fe6c..6880c190dcd258041e506cd3275282930d22d287 100644 (file)
@@ -41,7 +41,7 @@ enum nec_state {
 /**
  * ir_nec_decode() - Decode one NEC pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
@@ -183,7 +183,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
  * ir_nec_scancode_to_raw() - encode an NEC scancode ready for modulation.
  * @protocol:  specific protocol to use
  * @scancode:  a single NEC scancode.
- * @raw:       raw data to be modulated.
  */
 static u32 ir_nec_scancode_to_raw(enum rc_proto protocol, u32 scancode)
 {
index 758c60956850fc3ac1c0f25aea4645d9518a21fc..d94e07b02f3b0b475f1c2910be408cdd3ea14e68 100644 (file)
@@ -48,7 +48,7 @@ enum sanyo_state {
 /**
  * ir_sanyo_decode() - Decode one SANYO pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index 129b558acc9214abda6ce249eb3d8e2095591196..7140dd6160eee2c4de9e7b4448c961117f9e9a0d 100644 (file)
@@ -39,7 +39,7 @@ enum sharp_state {
 /**
  * ir_sharp_decode() - Decode one Sharp pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index 6f464be1c8d7af2db47a4684dda6bc10a7ed56f7..712bc6d76e9283944adbabe06f54f9ea536948ed 100644 (file)
@@ -35,7 +35,7 @@ enum xmp_state {
 /**
  * ir_xmp_decode() - Decode one XMP pulse or space
  * @dev:       the struct rc_dev descriptor of the device
- * @duration:  the struct ir_raw_event descriptor of the pulse/space
+ * @ev:                the struct ir_raw_event descriptor of the pulse/space
  *
  * This function returns -EINVAL if the pulse violates the state machine
  */
index f6e5ba4fbb49f5b46b0c06cc9da09cb16265eaca..d78483a504c9f4921d70765a6ccf000d9eefc979 100644 (file)
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
 /**
  * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
  * @dev:       the struct rc_dev device descriptor
- * @type:      the type of the event that has occurred
+ * @ev:                the event that has occurred
  *
  * This routine (which may be called from an interrupt context) works
  * in similar manner to ir_raw_event_store_edge.
index 17950e29d4e3ec72a18e4ad2bee2286c99ab99e3..c144b77eac987b5e5431b7e49cb27941002d6b04 100644 (file)
@@ -39,41 +39,41 @@ static const struct {
        [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 },
        [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 },
        [RC_PROTO_RC5] = { .name = "rc-5",
-               .scancode_bits = 0x1f7f, .repeat_period = 164 },
+               .scancode_bits = 0x1f7f, .repeat_period = 250 },
        [RC_PROTO_RC5X_20] = { .name = "rc-5x-20",
-               .scancode_bits = 0x1f7f3f, .repeat_period = 164 },
+               .scancode_bits = 0x1f7f3f, .repeat_period = 250 },
        [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz",
-               .scancode_bits = 0x2fff, .repeat_period = 164 },
+               .scancode_bits = 0x2fff, .repeat_period = 250 },
        [RC_PROTO_JVC] = { .name = "jvc",
                .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_SONY12] = { .name = "sony-12",
-               .scancode_bits = 0x1f007f, .repeat_period = 100 },
+               .scancode_bits = 0x1f007f, .repeat_period = 250 },
        [RC_PROTO_SONY15] = { .name = "sony-15",
-               .scancode_bits = 0xff007f, .repeat_period = 100 },
+               .scancode_bits = 0xff007f, .repeat_period = 250 },
        [RC_PROTO_SONY20] = { .name = "sony-20",
-               .scancode_bits = 0x1fff7f, .repeat_period = 100 },
+               .scancode_bits = 0x1fff7f, .repeat_period = 250 },
        [RC_PROTO_NEC] = { .name = "nec",
-               .scancode_bits = 0xffff, .repeat_period = 160 },
+               .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_NECX] = { .name = "nec-x",
-               .scancode_bits = 0xffffff, .repeat_period = 160 },
+               .scancode_bits = 0xffffff, .repeat_period = 250 },
        [RC_PROTO_NEC32] = { .name = "nec-32",
-               .scancode_bits = 0xffffffff, .repeat_period = 160 },
+               .scancode_bits = 0xffffffff, .repeat_period = 250 },
        [RC_PROTO_SANYO] = { .name = "sanyo",
                .scancode_bits = 0x1fffff, .repeat_period = 250 },
        [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd",
-               .scancode_bits = 0xffff, .repeat_period = 150 },
+               .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse",
-               .scancode_bits = 0x1fffff, .repeat_period = 150 },
+               .scancode_bits = 0x1fffff, .repeat_period = 250 },
        [RC_PROTO_RC6_0] = { .name = "rc-6-0",
-               .scancode_bits = 0xffff, .repeat_period = 164 },
+               .scancode_bits = 0xffff, .repeat_period = 250 },
        [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20",
-               .scancode_bits = 0xfffff, .repeat_period = 164 },
+               .scancode_bits = 0xfffff, .repeat_period = 250 },
        [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24",
-               .scancode_bits = 0xffffff, .repeat_period = 164 },
+               .scancode_bits = 0xffffff, .repeat_period = 250 },
        [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32",
-               .scancode_bits = 0xffffffff, .repeat_period = 164 },
+               .scancode_bits = 0xffffffff, .repeat_period = 250 },
        [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce",
-               .scancode_bits = 0xffff7fff, .repeat_period = 164 },
+               .scancode_bits = 0xffff7fff, .repeat_period = 250 },
        [RC_PROTO_SHARP] = { .name = "sharp",
                .scancode_bits = 0x1fff, .repeat_period = 250 },
        [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 },
@@ -170,10 +170,11 @@ static struct rc_map_list empty_map = {
  * @name:      name to assign to the table
  * @rc_proto:  ir type to assign to the new table
  * @size:      initial size of the table
- * @return:    zero on success or a negative error code
  *
  * This routine will initialize the rc_map and will allocate
  * memory to hold at least the specified number of elements.
+ *
+ * return:     zero on success or a negative error code
  */
 static int ir_create_table(struct rc_map *rc_map,
                           const char *name, u64 rc_proto, size_t size)
@@ -216,10 +217,11 @@ static void ir_free_table(struct rc_map *rc_map)
  * ir_resize_table() - resizes a scancode table if necessary
  * @rc_map:    the rc_map to resize
  * @gfp_flags: gfp flags to use when allocating memory
- * @return:    zero on success or a negative error code
  *
  * This routine will shrink the rc_map if it has lots of
  * unused entries and grow it if it is full.
+ *
+ * return:     zero on success or a negative error code
  */
 static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
 {
@@ -265,11 +267,13 @@ static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
  * @dev:       the struct rc_dev device descriptor
  * @rc_map:    scancode table to be adjusted
  * @index:     index of the mapping that needs to be updated
- * @keycode:   the desired keycode
- * @return:    previous keycode assigned to the mapping
+ * @new_keycode: the desired keycode
  *
  * This routine is used to update scancode->keycode mapping at given
  * position.
+ *
+ * return:     previous keycode assigned to the mapping
+ *
  */
 static unsigned int ir_update_mapping(struct rc_dev *dev,
                                      struct rc_map *rc_map,
@@ -320,12 +324,13 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
  * @scancode:  the desired scancode
  * @resize:    controls whether we allowed to resize the table to
  *             accommodate not yet present scancodes
- * @return:    index of the mapping containing scancode in question
- *             or -1U in case of failure.
  *
  * This routine is used to locate given scancode in rc_map.
  * If scancode is not yet present the routine will allocate a new slot
  * for it.
+ *
+ * return:     index of the mapping containing scancode in question
+ *             or -1U in case of failure.
  */
 static unsigned int ir_establish_scancode(struct rc_dev *dev,
                                          struct rc_map *rc_map,
@@ -375,11 +380,12 @@ static unsigned int ir_establish_scancode(struct rc_dev *dev,
 /**
  * ir_setkeycode() - set a keycode in the scancode->keycode table
  * @idev:      the struct input_dev device descriptor
- * @scancode:  the desired scancode
- * @keycode:   result
- * @return:    -EINVAL if the keycode could not be inserted, otherwise zero.
+ * @ke:                Input keymap entry
+ * @old_keycode: result
  *
  * This routine is used to handle evdev EVIOCSKEY ioctl.
+ *
+ * return:     -EINVAL if the keycode could not be inserted, otherwise zero.
  */
 static int ir_setkeycode(struct input_dev *idev,
                         const struct input_keymap_entry *ke,
@@ -422,11 +428,11 @@ out:
 /**
  * ir_setkeytable() - sets several entries in the scancode->keycode table
  * @dev:       the struct rc_dev device descriptor
- * @to:                the struct rc_map to copy entries to
  * @from:      the struct rc_map to copy entries from
- * @return:    -ENOMEM if all keycodes could not be inserted, otherwise zero.
  *
  * This routine is used to handle table initialization.
+ *
+ * return:     -ENOMEM if all keycodes could not be inserted, otherwise zero.
  */
 static int ir_setkeytable(struct rc_dev *dev,
                          const struct rc_map *from)
@@ -474,10 +480,11 @@ static int rc_map_cmp(const void *key, const void *elt)
  * ir_lookup_by_scancode() - locate mapping by scancode
  * @rc_map:    the struct rc_map to search
  * @scancode:  scancode to look for in the table
- * @return:    index in the table, -1U if not found
  *
  * This routine performs binary search in RC keykeymap table for
  * given scancode.
+ *
+ * return:     index in the table, -1U if not found
  */
 static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
                                          unsigned int scancode)
@@ -495,11 +502,11 @@ static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
 /**
  * ir_getkeycode() - get a keycode from the scancode->keycode table
  * @idev:      the struct input_dev device descriptor
- * @scancode:  the desired scancode
- * @keycode:   used to return the keycode, if found, or KEY_RESERVED
- * @return:    always returns zero.
+ * @ke:                Input keymap entry
  *
  * This routine is used to handle evdev EVIOCGKEY ioctl.
+ *
+ * return:     always returns zero.
  */
 static int ir_getkeycode(struct input_dev *idev,
                         struct input_keymap_entry *ke)
@@ -556,11 +563,12 @@ out:
  * rc_g_keycode_from_table() - gets the keycode that corresponds to a scancode
  * @dev:       the struct rc_dev descriptor of the device
  * @scancode:  the scancode to look for
- * @return:    the corresponding keycode, or KEY_RESERVED
  *
  * This routine is used by drivers which need to convert a scancode to a
  * keycode. Normally it should not be used since drivers should have no
  * interest in keycodes.
+ *
+ * return:     the corresponding keycode, or KEY_RESERVED
  */
 u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode)
 {
@@ -625,7 +633,8 @@ EXPORT_SYMBOL_GPL(rc_keyup);
 
 /**
  * ir_timer_keyup() - generates a keyup event after a timeout
- * @cookie:    a pointer to the struct rc_dev for the device
+ *
+ * @t:         a pointer to the struct timer_list
  *
  * This routine will generate a keyup event some time after a keydown event
  * is generated when no further activity has been detected.
@@ -780,7 +789,8 @@ EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
  *                       provides sensible defaults
  * @dev:       the struct rc_dev descriptor of the device
  * @filter:    the scancode and mask
- * @return:    0 or -EINVAL if the filter is not valid
+ *
+ * return:     0 or -EINVAL if the filter is not valid
  */
 static int rc_validate_filter(struct rc_dev *dev,
                              struct rc_scancode_filter *filter)
index 76120664b700ad6f8f5044e9f87cf1a92ad455ac..9ee2c9196b4d81fd64c90937eccaac7a69252383 100644 (file)
@@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val);
 static irqreturn_t sir_interrupt(int irq, void *dev_id);
 static void send_space(unsigned long len);
 static void send_pulse(unsigned long len);
-static void init_hardware(void);
+static int init_hardware(void);
 static void drop_hardware(void);
 /* Initialisation */
 
@@ -263,11 +263,36 @@ static void send_pulse(unsigned long len)
        }
 }
 
-static void init_hardware(void)
+static int init_hardware(void)
 {
+       u8 scratch, scratch2, scratch3;
        unsigned long flags;
 
        spin_lock_irqsave(&hardware_lock, flags);
+
+       /*
+        * This is a simple port existence test, borrowed from the autoconfig
+        * function in drivers/tty/serial/8250/8250_port.c
+        */
+       scratch = sinp(UART_IER);
+       soutp(UART_IER, 0);
+#ifdef __i386__
+       outb(0xff, 0x080);
+#endif
+       scratch2 = sinp(UART_IER) & 0x0f;
+       soutp(UART_IER, 0x0f);
+#ifdef __i386__
+       outb(0x00, 0x080);
+#endif
+       scratch3 = sinp(UART_IER) & 0x0f;
+       soutp(UART_IER, scratch);
+       if (scratch2 != 0 || scratch3 != 0x0f) {
+               /* we fail, there's nothing here */
+               spin_unlock_irqrestore(&hardware_lock, flags);
+               pr_err("port existence test failed, cannot continue\n");
+               return -ENODEV;
+       }
+
        /* reset UART */
        outb(0, io + UART_MCR);
        outb(0, io + UART_IER);
@@ -285,6 +310,8 @@ static void init_hardware(void)
        /* turn on UART */
        outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
        spin_unlock_irqrestore(&hardware_lock, flags);
+
+       return 0;
 }
 
 static void drop_hardware(void)
@@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev)
                pr_err("IRQ %d already in use.\n", irq);
                return retval;
        }
+
+       retval = init_hardware();
+       if (retval) {
+               del_timer_sync(&timerlist);
+               return retval;
+       }
+
        pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
 
        retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
        if (retval < 0)
                return retval;
 
-       init_hardware();
-
        return 0;
 }
 
index a8e39c635f3400025e6ac69ff2358206b511482a..d2efd7b2c3bcc71ecac638168903ec697868a6d3 100644 (file)
@@ -49,7 +49,7 @@ struct st_rc_device {
 #define IRB_RX_NOISE_SUPPR      0x5c   /* noise suppression  */
 #define IRB_RX_POLARITY_INV     0x68   /* polarity inverter  */
 
-/**
+/*
  * IRQ set: Enable full FIFO                 1  -> bit  3;
  *          Enable overrun IRQ               1  -> bit  2;
  *          Enable last symbol IRQ           1  -> bit  1:
@@ -72,7 +72,7 @@ static void st_rc_send_lirc_timeout(struct rc_dev *rdev)
        ir_raw_event_store(rdev, &ev);
 }
 
-/**
+/*
  * RX graphical example to better understand the difference between ST IR block
  * output and standard definition used by LIRC (and most of the world!)
  *
@@ -317,7 +317,7 @@ static int st_rc_probe(struct platform_device *pdev)
        device_init_wakeup(dev, true);
        dev_pm_set_wake_irq(dev, rc_dev->irq);
 
-       /**
+       /*
         * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW
         * lircd expects a long space first before a signal train to sync.
         */
index 4eebfcfc10f34bc7f49869a6bfee265b5842b15a..c9a70fda88a88bcd02b45137581896857c612bb9 100644 (file)
@@ -191,7 +191,7 @@ static void sz_push_half_space(struct streamzap_ir *sz,
        sz_push_full_space(sz, value & SZ_SPACE_MASK);
 }
 
-/**
+/*
  * streamzap_callback - usb IRQ handler callback
  *
  * This procedure is invoked on reception of data from
@@ -321,7 +321,7 @@ out:
        return NULL;
 }
 
-/**
+/*
  *     streamzap_probe
  *
  *     Called by usb-core to associated with a candidate device
@@ -450,7 +450,7 @@ free_sz:
        return retval;
 }
 
-/**
+/*
  * streamzap_disconnect
  *
  * Called by the usb core when the device is removed from the system.
index 8b39d8dc97a075972c5f1824e2a286a6c675f4fd..5c87c5c6a455691a1366b623ceedd21b9e6b5120 100644 (file)
@@ -1397,9 +1397,9 @@ static u32 MT2063_Round_fLO(u32 f_LO, u32 f_LO_Step, u32 f_ref)
  *                        risk of overflow.  It accurately calculates
  *                        f_ref * num / denom to within 1 HZ with fixed math.
  *
- * @num :      Fractional portion of the multiplier
+ * @f_ref:     SRO frequency.
+ * @num:       Fractional portion of the multiplier
  * @denom:     denominator portion of the ratio
- * @f_Ref:     SRO frequency.
  *
  * This calculation handles f_ref as two separate 14-bit fields.
  * Therefore, a maximum value of 2^28-1 may safely be used for f_ref.
@@ -1464,8 +1464,6 @@ static u32 MT2063_CalcLO1Mult(u32 *Div,
  * @f_LO:      desired LO frequency.
  * @f_LO_Step: Minimum step size for the LO (in Hz).
  * @f_Ref:     SRO frequency.
- * @f_Avoid:   Range of PLL frequencies to avoid near
- *             integer multiples of f_Ref (in Hz).
  *
  * Returns: Recalculated LO frequency.
  */
index 34dc7e062471c651192f7d5a01ff2780fc28f64e..d9093a3c57c5b275ad4e0956c17832f31e023e64 100644 (file)
@@ -105,9 +105,9 @@ static struct tda18271_config hauppauge_woodbury_tunerconfig = {
 
 static void au0828_restart_dvb_streaming(struct work_struct *work);
 
-static void au0828_bulk_timeout(unsigned long data)
+static void au0828_bulk_timeout(struct timer_list *t)
 {
-       struct au0828_dev *dev = (struct au0828_dev *) data;
+       struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
 
        dprintk(1, "%s called\n", __func__);
        dev->bulk_timeout_running = 0;
@@ -648,9 +648,7 @@ int au0828_dvb_register(struct au0828_dev *dev)
                return ret;
        }
 
-       dev->bulk_timeout.function = au0828_bulk_timeout;
-       dev->bulk_timeout.data = (unsigned long) dev;
-       init_timer(&dev->bulk_timeout);
+       timer_setup(&dev->bulk_timeout, au0828_bulk_timeout, 0);
 
        return 0;
 }
index 654f67c258635152cf4d4ca7f6f12228f52cdc8e..a240153821e0cd803652804a25f0e998c207305a 100644 (file)
@@ -954,9 +954,9 @@ int au0828_analog_unregister(struct au0828_dev *dev)
 /* This function ensures that video frames continue to be delivered even if
    the ITU-656 input isn't receiving any data (thereby preventing applications
    such as tvtime from hanging) */
-static void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(struct timer_list *t)
 {
-       struct au0828_dev *dev = (struct au0828_dev *) data;
+       struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
        struct au0828_dmaqueue *dma_q = &dev->vidq;
        struct au0828_buffer *buf;
        unsigned char *vid_data;
@@ -978,9 +978,9 @@ static void au0828_vid_buffer_timeout(unsigned long data)
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(struct timer_list *t)
 {
-       struct au0828_dev *dev = (struct au0828_dev *) data;
+       struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
        struct au0828_dmaqueue *dma_q = &dev->vbiq;
        struct au0828_buffer *buf;
        unsigned char *vbi_data;
@@ -1953,10 +1953,8 @@ int au0828_analog_register(struct au0828_dev *dev,
        INIT_LIST_HEAD(&dev->vidq.active);
        INIT_LIST_HEAD(&dev->vbiq.active);
 
-       setup_timer(&dev->vid_timeout, au0828_vid_buffer_timeout,
-                   (unsigned long)dev);
-       setup_timer(&dev->vbi_timeout, au0828_vbi_buffer_timeout,
-                   (unsigned long)dev);
+       timer_setup(&dev->vid_timeout, au0828_vid_buffer_timeout, 0);
+       timer_setup(&dev->vbi_timeout, au0828_vbi_buffer_timeout, 0);
 
        dev->width = NTSC_STD_W;
        dev->height = NTSC_STD_H;
index f9772ad0a2a5c114ebbae1c70f381b84f6525ea5..5a2f81311fb7767926f160b8ed3d167455ae52f6 100644 (file)
@@ -26,7 +26,7 @@
 #include "cinergyT2.h"
 
 
-/**
+/*
  *  convert linux-dvb frontend parameter set into TPS.
  *  See ETSI ETS-300744, section 4.6.2, table 9 for details.
  *
index 92098c1b78e51c69b4cba8179d56f24f2a8410fe..366b055299157e617020ee00d9c356cf0758732f 100644 (file)
@@ -1677,10 +1677,10 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
                return -EINVAL;
        }
 
-       /** Update PLL if needed ratio **/
+       /* Update PLL if needed ratio */
        state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, 0);
 
-       /** Get optimize PLL ratio to remove spurious **/
+       /* Get optimize PLL ratio to remove spurious */
        pll_ratio = dib8090_compute_pll_parameters(fe);
        if (pll_ratio == 17)
                timf = 21387946;
@@ -1691,7 +1691,7 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
        else
                timf = 18179756;
 
-       /** Update ratio **/
+       /* Update ratio */
        state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, pll_ratio);
 
        state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, timf);
@@ -3357,7 +3357,7 @@ static int novatd_sleep_override(struct dvb_frontend* fe)
        return state->sleep(fe);
 }
 
-/**
+/*
  * novatd_frontend_attach - Nova-TD specific attach
  *
  * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
index 8207e6900656bef61de4f0df10b37b5cc6110f98..bcacb0f220282d9c3537e8847b178d7f0ec71727 100644 (file)
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
 
 int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
 {
-       u8 wbuf[1] = { offs };
-       return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
+       u8 *buf;
+       int rc;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       buf[0] = offs;
+
+       rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
+       *val = buf[1];
+       kfree(buf);
+
+       return rc;
 }
 EXPORT_SYMBOL(dibusb_read_eeprom_byte);
 
index 41261317bd5cc95b97a26896aa67c5669a7552f1..b6046e0e07f6845c150d1894d3ad1c34f04b6b40 100644 (file)
@@ -297,7 +297,7 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
 }
 
 
-/**
+/*
  * (reg, val) commad list to initialize this module.
  *  captured on a Windows box.
  */
index 62abe6c43a324588af7f269d36a55e601ae6c962..16875945e662d0f303930e9993706564bb75485a 100644 (file)
@@ -21,7 +21,7 @@ MODULE_PARM_DESC(debug,
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
-/**
+/*
  * Indirect I2C access to the PLL via FE.
  * whole I2C protocol data to the PLL is sent via the FE's I2C register.
  * This is done by a control msg to the FE with the I2C data accompanied, and
index f1537daf4e2ea4f3c6cb32c4cf675621f37a4d9c..1b30434b72ef2932f4cd1c57c527ade6b06ebdac 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * OV519 driver
  *
  * Copyright (C) 2008-2011 Jean-François Moine <moinejf@free.fr>
index 3792fedff9515e85734200d8ffb22aa1ed15bb93..1283b3bd9800cd63bb33ce2f460d11c616a0fd33 100644 (file)
@@ -649,11 +649,10 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
 }
 
 /**
- *
  * Uncompress a pwc23 buffer.
- *
- * src: raw data
- * dst: image output
+ * @pdev: pointer to pwc device's internal struct
+ * @src: raw data
+ * @dst: image output
  */
 void pwc_dec23_decompress(struct pwc_device *pdev,
                          const void *src,
index 8c1f926567ec04425f36fd9fd68d80cf37df98a4..d07349cf94898550972e069cf30d143a261aaa97 100644 (file)
@@ -74,7 +74,7 @@ struct smsusb_device_t {
 static int smsusb_submit_urb(struct smsusb_device_t *dev,
                             struct smsusb_urb_t *surb);
 
-/**
+/*
  * Completing URB's callback handler - bottom half (proccess context)
  * submits the URB prepared on smsusb_onresponse()
  */
@@ -86,7 +86,7 @@ static void do_submit_urb(struct work_struct *work)
        smsusb_submit_urb(dev, surb);
 }
 
-/**
+/*
  * Completing URB's callback handler - top half (interrupt context)
  * adds completing sms urb to the global surbs list and activtes the worker
  * thread the surb
index b842f367249f3361bd46ae73dd1aacfc7b500100..a142b9dc0feb2bb99be8cc728dc369710a5825a4 100644 (file)
@@ -76,7 +76,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 #define TTUSB_REV_2_2  0x22
 #define TTUSB_BUDGET_NAME "ttusb_stc_fw"
 
-/**
+/*
  *  since we're casting (struct ttusb*) <-> (struct dvb_demux*) around
  *  the dvb_demux field must be the first in struct!!
  */
@@ -713,7 +713,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
                                        }
                                }
 
-                       /**
+                       /*
                         * if length is valid and we reached the end:
                         * goto next muxpack
                         */
@@ -729,7 +729,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
                                        /* maximum bytes, until we know the length */
                                        ttusb->muxpack_len = 2;
 
-                               /**
+                               /*
                                 * no muxpacks left?
                                 * return to search-sync state
                                 */
index b55b79b8e921c9cd9717ccbad251f9b8837f6765..127f8a0c098bd417801587cecc4ea208ecd5725c 100644 (file)
@@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
 
 static const struct usb_device_id usbtv_id_table[] = {
        { USB_DEVICE(0x1b71, 0x3002) },
+       { USB_DEVICE(0x1f71, 0x3301) },
        {}
 };
 MODULE_DEVICE_TABLE(usb, usbtv_id_table);
index 8db45dfc271b2823d97da42ecbf44c02d6322152..82852f23a3b6af07085754960cd7d059eda2f5f9 100644 (file)
@@ -239,7 +239,7 @@ static const struct analog_demod_ops tuner_analog_ops = {
  * @type:              type of the tuner (e. g. tuner number)
  * @new_mode_mask:     Indicates if tuner supports TV and/or Radio
  * @new_config:                an optional parameter used by a few tuners to adjust
                      internal parameters, like LNA mode
*                     internal parameters, like LNA mode
  * @tuner_callback:    an optional function to be called when switching
  *                     to analog mode
  *
@@ -750,6 +750,7 @@ static int tuner_remove(struct i2c_client *client)
 /**
  * check_mode - Verify if tuner supports the requested mode
  * @t: a pointer to the module's internal struct_tuner
+ * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type.
  *
  * This function checks if the tuner is capable of tuning analog TV,
  * digital TV or radio, depending on what the caller wants. If the
@@ -757,6 +758,7 @@ static int tuner_remove(struct i2c_client *client)
  * returns 0.
  * This function is needed for boards that have a separate tuner for
  * radio (like devices with tea5767).
+ *
  * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
  *       select a TV frequency. So, t_mode = T_ANALOG_TV could actually
  *      be used to represent a Digital TV too.
index a7c3464976f2436133cbe729a2a1ef507c72c926..e5acfab470a5ee6bd3dc9bea4c333abe44c7a6f5 100644 (file)
@@ -558,8 +558,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
                if (!asd)
                        continue;
 
-               ret = v4l2_async_match_notify(notifier, notifier->v4l2_dev, sd,
-                                             asd);
+               ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
                if (ret)
                        goto err_unbind;
 
index 5c8c49d240d14cf959a83de8d9f365873c907e95..930f9c53a64e4247b74459f1e1a465b2dee13db8 100644 (file)
@@ -245,11 +245,11 @@ EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
 
 /**
  * v4l2_match_dv_timings - check if two timings match
- * @t1 - compare this v4l2_dv_timings struct...
- * @t2 - with this struct.
- * @pclock_delta - the allowed pixelclock deviation.
- * @match_reduced_fps - if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
- * match.
+ * @t1: compare this v4l2_dv_timings struct...
+ * @t2: with this struct.
+ * @pclock_delta: the allowed pixelclock deviation.
+ * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
+ *     match.
  *
  * Compare t1 with t2 with a given margin of error for the pixelclock.
  */
index 681b192420d922f7677d32802672c9af2c810e86..fb72c7ac04d48298148a08e4d7c23c35c9451478 100644 (file)
@@ -458,11 +458,6 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
                if (!is_available)
                        continue;
 
-               if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
-                       ret = -EINVAL;
-                       break;
-               }
-
                if (has_port) {
                        struct fwnode_endpoint ep;
 
@@ -474,6 +469,11 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
                                continue;
                }
 
+               if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+                       ret = -EINVAL;
+                       break;
+               }
+
                ret = v4l2_async_notifier_fwnode_parse_endpoint(
                        dev, notifier, fwnode, asd_struct_size, parse_endpoint);
                if (ret < 0)
index f62e68aa04c42712060a9898a55ae51511c2cd55..bc580fbe18fac9d9567f491f963b6ca4d9725d36 100644 (file)
@@ -183,6 +183,7 @@ EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
 
 /**
  * v4l2_m2m_try_run() - select next job to perform and run it if possible
+ * @m2m_dev: per-device context
  *
  * Get next transaction (if present) from the waiting jobs list and run it.
  */
@@ -281,6 +282,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
 
 /**
  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ * @m2m_ctx: m2m context with jobs to be canceled
  *
  * In case of streamoff or release called on any context,
  * 1] If the context is currently running, then abort job will be called
index 1dbf6f7785bba98c881be37bf355dbc43a71dd05..e87fb13b22dc7f4b7bd986596bdfc3f08091b14c 100644 (file)
@@ -222,7 +222,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
 }
 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
 
-/**
+/*
  * __videobuf_free() - free all the buffers and their control structures
  *
  * This function can only be called if streaming/reading is off, i.e. no buffers
index 0b5c43f7e020da59c939369ebd9d0a27a116ce2b..f412429cf5ba586958a3693a35eee48ad7043c51 100644 (file)
@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
        dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
                data, size, dma->nr_pages);
 
-       err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+       err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
                             flags, dma->pages, NULL);
 
        if (err != dma->nr_pages) {
                dma->nr_pages = (err >= 0) ? err : 0;
-               dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+               dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+                       dma->nr_pages);
                return err < 0 ? err : -EINVAL;
        }
        return 0;
index cb115ba6a1d280e339247f14759cfe0c0a6fab6a..a8589d96ef723f04fc6a6a6fc63d5b794aa0cd70 100644 (file)
@@ -188,7 +188,7 @@ module_param(debug, int, 0644);
 static void __vb2_queue_cancel(struct vb2_queue *q);
 static void __enqueue_in_driver(struct vb2_buffer *vb);
 
-/**
+/*
  * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
  */
 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
@@ -229,7 +229,7 @@ free:
        return ret;
 }
 
-/**
+/*
  * __vb2_buf_mem_free() - free memory of the given buffer
  */
 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
@@ -243,7 +243,7 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
        }
 }
 
-/**
+/*
  * __vb2_buf_userptr_put() - release userspace memory associated with
  * a USERPTR buffer
  */
@@ -258,7 +258,7 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
        }
 }
 
-/**
+/*
  * __vb2_plane_dmabuf_put() - release memory associated with
  * a DMABUF shared plane
  */
@@ -277,7 +277,7 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
        p->dbuf_mapped = 0;
 }
 
-/**
+/*
  * __vb2_buf_dmabuf_put() - release memory associated with
  * a DMABUF shared buffer
  */
@@ -289,7 +289,7 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
                __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
 }
 
-/**
+/*
  * __setup_offsets() - setup unique offsets ("cookies") for every plane in
  * the buffer.
  */
@@ -317,7 +317,7 @@ static void __setup_offsets(struct vb2_buffer *vb)
        }
 }
 
-/**
+/*
  * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
  * video buffer memory for all buffers/planes on the queue and initializes the
  * queue
@@ -386,7 +386,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
        return buffer;
 }
 
-/**
+/*
  * __vb2_free_mem() - release all video buffer memory for a given queue
  */
 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
@@ -410,7 +410,7 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
        }
 }
 
-/**
+/*
  * __vb2_queue_free() - free buffers at the end of the queue - video memory and
  * related information, if no buffers are left return the queue to an
  * uninitialized state. Might be called even if the queue has already been freed.
@@ -544,7 +544,7 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
 }
 EXPORT_SYMBOL(vb2_buffer_in_use);
 
-/**
+/*
  * __buffers_in_use() - return true if any buffers on the queue are in use and
  * the queue cannot be freed (by the means of REQBUFS(0)) call
  */
@@ -564,7 +564,7 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
 }
 EXPORT_SYMBOL_GPL(vb2_core_querybuf);
 
-/**
+/*
  * __verify_userptr_ops() - verify that all memory operations required for
  * USERPTR queue type have been provided
  */
@@ -577,7 +577,7 @@ static int __verify_userptr_ops(struct vb2_queue *q)
        return 0;
 }
 
-/**
+/*
  * __verify_mmap_ops() - verify that all memory operations required for
  * MMAP queue type have been provided
  */
@@ -590,7 +590,7 @@ static int __verify_mmap_ops(struct vb2_queue *q)
        return 0;
 }
 
-/**
+/*
  * __verify_dmabuf_ops() - verify that all memory operations required for
  * DMABUF queue type have been provided
  */
@@ -953,7 +953,7 @@ void vb2_discard_done(struct vb2_queue *q)
 }
 EXPORT_SYMBOL_GPL(vb2_discard_done);
 
-/**
+/*
  * __prepare_mmap() - prepare an MMAP buffer
  */
 static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
@@ -966,7 +966,7 @@ static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
        return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
 }
 
-/**
+/*
  * __prepare_userptr() - prepare a USERPTR buffer
  */
 static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
@@ -1082,7 +1082,7 @@ err:
        return ret;
 }
 
-/**
+/*
  * __prepare_dmabuf() - prepare a DMABUF buffer
  */
 static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
@@ -1215,7 +1215,7 @@ err:
        return ret;
 }
 
-/**
+/*
  * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
  */
 static void __enqueue_in_driver(struct vb2_buffer *vb)
@@ -1298,7 +1298,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
 }
 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
 
-/**
+/*
  * vb2_start_streaming() - Attempt to start streaming.
  * @q:         videobuf2 queue
  *
@@ -1427,7 +1427,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
 }
 EXPORT_SYMBOL_GPL(vb2_core_qbuf);
 
-/**
+/*
  * __vb2_wait_for_done_vb() - wait for a buffer to become available
  * for dequeuing
  *
@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
        return 0;
 }
 
-/**
+/*
  * __vb2_get_done_vb() - get a buffer ready for dequeuing
  *
  * Will sleep if required for nonblocking == false.
@@ -1553,7 +1553,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
 }
 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
 
-/**
+/*
  * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
  */
 static void __vb2_dqbuf(struct vb2_buffer *vb)
@@ -1625,7 +1625,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
 }
 EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
 
-/**
+/*
  * __vb2_queue_cancel() - cancel and stop (pause) streaming
  *
  * Removes all queued buffers from driver's queue and all buffers queued by
@@ -1773,7 +1773,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
 }
 EXPORT_SYMBOL_GPL(vb2_core_streamoff);
 
-/**
+/*
  * __find_plane_by_offset() - find plane associated with the given offset off
  */
 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
@@ -2104,7 +2104,7 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
 }
 EXPORT_SYMBOL_GPL(vb2_core_poll);
 
-/**
+/*
  * struct vb2_fileio_buf - buffer context used by file io emulator
  *
  * vb2 provides a compatibility layer and emulator of file io (read and
@@ -2118,7 +2118,7 @@ struct vb2_fileio_buf {
        unsigned int queued:1;
 };
 
-/**
+/*
  * struct vb2_fileio_data - queue context used by file io emulator
  *
  * @cur_index: the index of the buffer currently being read from or
@@ -2155,7 +2155,7 @@ struct vb2_fileio_data {
        unsigned write_immediately:1;
 };
 
-/**
+/*
  * __vb2_init_fileio() - initialize file io emulator
  * @q:         videobuf2 queue
  * @read:      mode selector (1 means read, 0 means write)
@@ -2274,7 +2274,7 @@ err_kfree:
        return ret;
 }
 
-/**
+/*
  * __vb2_cleanup_fileio() - free resourced used by file io emulator
  * @q:         videobuf2 queue
  */
@@ -2293,7 +2293,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
        return 0;
 }
 
-/**
+/*
  * __vb2_perform_fileio() - perform a single file io (read or write) operation
  * @q:         videobuf2 queue
  * @data:      pointed to target userspace buffer
index 4bb8424114ce6d12e34d28b1620a1fafeeafa5ca..89e51989332bb0916e98e355767ce5518324bd7a 100644 (file)
@@ -120,7 +120,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
        h->put(h->arg);
 }
 
-/**
+/*
  * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
  * video buffers
  */
index 0c0669976bdc1c1424340edbc7c5d68c3c2ec8b6..4075314a698933ab9f0f25c60064a7c2d2cea98a 100644 (file)
@@ -49,7 +49,7 @@ module_param(debug, int, 0644);
 #define V4L2_BUFFER_OUT_FLAGS  (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
                                 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
 
-/**
+/*
  * __verify_planes_array() - verify that the planes array passed in struct
  * v4l2_buffer from userspace can be safely used
  */
@@ -78,7 +78,7 @@ static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
        return __verify_planes_array(vb, pb);
 }
 
-/**
+/*
  * __verify_length() - Verify that the bytesused value for each plane fits in
  * the plane length and that the data offset doesn't exceed the bytesused value.
  */
@@ -181,7 +181,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
        return __verify_planes_array(q->bufs[b->index], b);
 }
 
-/**
+/*
  * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
  * returned to userspace
  */
@@ -286,7 +286,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
                q->last_buffer_dequeued = true;
 }
 
-/**
+/*
  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
  * v4l2_buffer by the userspace. It also verifies that struct
  * v4l2_buffer has a valid number of planes.
@@ -446,7 +446,7 @@ static const struct vb2_buf_ops v4l2_buf_ops = {
        .copy_timestamp         = __copy_timestamp,
 };
 
-/**
+/*
  * vb2_querybuf() - query video buffer information
  * @q:         videobuf queue
  * @b:         buffer struct passed from userspace to vidioc_querybuf handler
index 22de7f5ed03236cda482dc8e994471522b6b39b5..57b13dfbd21e2d3ba14d46b6d7288fc12ca23898 100644 (file)
@@ -1492,9 +1492,9 @@ static int msb_ftl_scan(struct msb_data *msb)
        return 0;
 }
 
-static void msb_cache_flush_timer(unsigned long data)
+static void msb_cache_flush_timer(struct timer_list *t)
 {
-       struct msb_data *msb = (struct msb_data *)data;
+       struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
        msb->need_flush_cache = true;
        queue_work(msb->io_queue, &msb->io_work);
 }
@@ -1514,8 +1514,7 @@ static void msb_cache_discard(struct msb_data *msb)
 
 static int msb_cache_init(struct msb_data *msb)
 {
-       setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
-               (unsigned long)msb);
+       timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
 
        if (!msb->cache)
                msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
index c9714072e22465d4b23d8101038f782b084b2dca..59c82cdcf48d8a508613dbc7b1c98654285de28f 100644 (file)
@@ -377,6 +377,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        u8 *ptr;
        u8 *rx_buf;
        u8 sum;
+       u8 rx_byte;
        int ret = 0, final_ret;
 
        len = cros_ec_prepare_tx(ec_dev, ec_msg);
@@ -421,25 +422,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_packet(ec_dev,
-                                       ec_msg->insize + sizeof(*response));
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_packet(ec_dev,
+                               ec_msg->insize + sizeof(*response));
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -508,6 +506,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        int i, len;
        u8 *ptr;
        u8 *rx_buf;
+       u8 rx_byte;
        int sum;
        int ret = 0, final_ret;
 
@@ -544,25 +543,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_response(ec_dev,
-                                       ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_response(ec_dev,
+                               ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +663,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
                           sizeof(struct ec_response_get_protocol_info);
        ec_dev->dout_size = sizeof(struct ec_host_request);
 
+       ec_spi->last_transfer_ns = ktime_get_ns();
 
        err = cros_ec_register(ec_dev);
        if (err) {
index 691dab791f7af81d91ed3892a945c74c1f0d3fa1..59d61b04c197b64e1735c8641a673a19fefc0c7e 100644 (file)
@@ -40,9 +40,9 @@ static const struct mfd_cell rtsx_usb_cells[] = {
        },
 };
 
-static void rtsx_usb_sg_timed_out(unsigned long data)
+static void rtsx_usb_sg_timed_out(struct timer_list *t)
 {
-       struct rtsx_ucr *ucr = (struct rtsx_ucr *)data;
+       struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
 
        dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
        usb_sg_cancel(&ucr->current_sg);
@@ -663,7 +663,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
                goto out_init_fail;
 
        /* initialize USB SG transfer timer */
-       setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
+       timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
 
        ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
                                      ARRAY_SIZE(rtsx_usb_cells));
index da16bf45fab43ee9a946beef340f4cd2a224156e..dc94ffc6321a84dd25ce08d0f1a9374d40d4cead 100644 (file)
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
 EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
 
 static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
-                             struct device_node *node)
+                             struct device_node *parent)
 {
+       struct device_node *node;
+
        if (pdata && pdata->codec)
                return true;
 
-       if (of_find_node_by_name(node, "codec"))
+       node = of_get_child_by_name(parent, "codec");
+       if (node) {
+               of_node_put(node);
                return true;
+       }
 
        return false;
 }
index d66502d36ba0b3202d1c15c08540fa8aade42a32..dd19f17a1b637543965dd94e64d0d44b9178f64c 100644 (file)
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
 };
 
 
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
 {
-#ifdef CONFIG_OF
-       if (of_find_node_by_name(node, "vibra"))
+       struct device_node *node;
+
+       node = of_get_child_by_name(parent, "vibra");
+       if (node) {
+               of_node_put(node);
                return true;
-#endif
+       }
+
        return false;
 }
 
index bb7fd3f4edab7f4ce4a45854dade29a0b113ae14..19969ee86d6f781c64f2acf632781a600c647c0d 100644 (file)
@@ -2083,6 +2083,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
        /* There should only be one entry, but go through the list
         * anyway
         */
+       if (afu->phb == NULL)
+               return result;
+
        list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                if (!afu_dev->driver)
                        continue;
@@ -2124,8 +2127,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
                         * Tell the AFU drivers; but we don't care what they
                         * say, we're going away.
                         */
-                       if (afu->phb != NULL)
-                               cxl_vphb_error_detected(afu, state);
+                       cxl_vphb_error_detected(afu, state);
                }
                return PCI_ERS_RESULT_DISCONNECT;
        }
@@ -2265,6 +2267,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
                if (cxl_afu_select_best_mode(afu))
                        goto err;
 
+               if (afu->phb == NULL)
+                       continue;
+
                list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                        /* Reset the device context.
                         * TODO: make this less disruptive
@@ -2327,6 +2332,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
        for (i = 0; i < adapter->slices; i++) {
                afu = adapter->afu[i];
 
+               if (afu->phb == NULL)
+                       continue;
+
                list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
                        if (afu_dev->driver && afu_dev->driver->err_handler &&
                            afu_dev->driver->err_handler->resume)
index e0b4b36ef01052ea1dd368b33e2170be83bda714..4d63ac8a82e0022f10f424b4bab4dc85820d0981 100644 (file)
@@ -425,7 +425,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
        memset(msg, 0, sizeof(msg));
        msg[0].addr = client->addr;
        msg[0].buf = addrbuf;
-       addrbuf[0] = 0x90 + offset;
+       /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
+       addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
        msg[0].len = 1;
        msg[1].addr = client->addr;
        msg[1].flags = I2C_M_RD;
@@ -561,18 +562,19 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
 static int at24_read(void *priv, unsigned int off, void *val, size_t count)
 {
        struct at24_data *at24 = priv;
-       struct i2c_client *client;
+       struct device *dev = &at24->client[0]->dev;
        char *buf = val;
        int ret;
 
        if (unlikely(!count))
                return count;
 
-       client = at24_translate_offset(at24, &off);
+       if (off + count > at24->chip.byte_len)
+               return -EINVAL;
 
-       ret = pm_runtime_get_sync(&client->dev);
+       ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
-               pm_runtime_put_noidle(&client->dev);
+               pm_runtime_put_noidle(dev);
                return ret;
        }
 
@@ -588,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
                status = at24->read_func(at24, buf, off, count);
                if (status < 0) {
                        mutex_unlock(&at24->lock);
-                       pm_runtime_put(&client->dev);
+                       pm_runtime_put(dev);
                        return status;
                }
                buf += status;
@@ -598,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
 
        mutex_unlock(&at24->lock);
 
-       pm_runtime_put(&client->dev);
+       pm_runtime_put(dev);
 
        return 0;
 }
@@ -606,18 +608,19 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
 static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 {
        struct at24_data *at24 = priv;
-       struct i2c_client *client;
+       struct device *dev = &at24->client[0]->dev;
        char *buf = val;
        int ret;
 
        if (unlikely(!count))
                return -EINVAL;
 
-       client = at24_translate_offset(at24, &off);
+       if (off + count > at24->chip.byte_len)
+               return -EINVAL;
 
-       ret = pm_runtime_get_sync(&client->dev);
+       ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
-               pm_runtime_put_noidle(&client->dev);
+               pm_runtime_put_noidle(dev);
                return ret;
        }
 
@@ -633,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
                status = at24->write_func(at24, buf, off, count);
                if (status < 0) {
                        mutex_unlock(&at24->lock);
-                       pm_runtime_put(&client->dev);
+                       pm_runtime_put(dev);
                        return status;
                }
                buf += status;
@@ -643,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 
        mutex_unlock(&at24->lock);
 
-       pm_runtime_put(&client->dev);
+       pm_runtime_put(dev);
 
        return 0;
 }
@@ -730,6 +733,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
                dev_warn(&client->dev,
                        "page_size looks suspicious (no power of 2)!\n");
 
+       /*
+        * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
+        * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
+        *
+        * Eventually we'll get rid of the magic values altoghether in favor of
+        * real structs, but for now just manually set the right size.
+        */
+       if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
+               chip.byte_len = 6;
+
        /* Use I2C operations unless we're stuck with SMBus extensions. */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
                if (chip.flags & AT24_FLAG_ADDR16)
@@ -863,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
        at24->nvmem_config.reg_read = at24_read;
        at24->nvmem_config.reg_write = at24_write;
        at24->nvmem_config.priv = at24;
-       at24->nvmem_config.stride = 4;
+       at24->nvmem_config.stride = 1;
        at24->nvmem_config.word_size = 1;
        at24->nvmem_config.size = chip.byte_len;
 
index eda38cbe85307edd67863122e24451d269d66866..41f2a9f6851d9e74a58fd06030cb2f00517ea8d0 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/pci.h>
 #include <linux/mutex.h>
 #include <linux/miscdevice.h>
-#include <linux/pti.h>
+#include <linux/intel-pti.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
index ea80ff4cd7f99bc27aab2951ea8d3f9d67624912..ccfa98af1dd3fc93fdaa52b6ced6db3dc0f9db79 100644 (file)
@@ -122,6 +122,10 @@ struct mmc_blk_data {
        struct device_attribute force_ro;
        struct device_attribute power_ro_lock;
        int     area_type;
+
+       /* debugfs files (only in main mmc_blk_data) */
+       struct dentry *status_dentry;
+       struct dentry *ext_csd_dentry;
 };
 
 /* Device type for RPMB character devices */
@@ -233,9 +237,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
 
        /* Dispatch locking to the block layer */
        req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               count = PTR_ERR(req);
+               goto out_put;
+       }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
        blk_execute_rq(mq->queue, NULL, req, 0);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
+       blk_put_request(req);
 
        if (!ret) {
                pr_info("%s: Locking boot partition ro until next power on\n",
@@ -248,7 +257,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
                                set_disk_ro(part_md->disk, 1);
                        }
        }
-
+out_put:
        mmc_blk_put(md);
        return count;
 }
@@ -624,6 +633,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
        req = blk_get_request(mq->queue,
                idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
                __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto cmd_done;
+       }
        idatas[0] = idata;
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
@@ -691,6 +704,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
        req = blk_get_request(mq->queue,
                idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
                __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto cmd_err;
+       }
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
        req_to_mmc_queue_req(req)->drv_op_data = idata;
@@ -2550,6 +2567,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
 
        /* Ask the block layer about the card status */
        req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
        blk_execute_rq(mq->queue, NULL, req, 0);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
@@ -2557,6 +2576,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
                *val = ret;
                ret = 0;
        }
+       blk_put_request(req);
 
        return ret;
 }
@@ -2583,10 +2603,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
 
        /* Ask the block layer for the EXT CSD */
        req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto out_free;
+       }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
        req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
        blk_execute_rq(mq->queue, NULL, req, 0);
        err = req_to_mmc_queue_req(req)->drv_op_result;
+       blk_put_request(req);
        if (err) {
                pr_err("FAILED %d\n", err);
                goto out_free;
@@ -2632,7 +2657,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
        .llseek         = default_llseek,
 };
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
        struct dentry *root;
 
@@ -2642,28 +2667,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card)
        root = card->debugfs_root;
 
        if (mmc_card_mmc(card) || mmc_card_sd(card)) {
-               if (!debugfs_create_file("status", S_IRUSR, root, card,
-                                        &mmc_dbg_card_status_fops))
+               md->status_dentry =
+                       debugfs_create_file("status", S_IRUSR, root, card,
+                                           &mmc_dbg_card_status_fops);
+               if (!md->status_dentry)
                        return -EIO;
        }
 
        if (mmc_card_mmc(card)) {
-               if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
-                                        &mmc_dbg_ext_csd_fops))
+               md->ext_csd_dentry =
+                       debugfs_create_file("ext_csd", S_IRUSR, root, card,
+                                           &mmc_dbg_ext_csd_fops);
+               if (!md->ext_csd_dentry)
                        return -EIO;
        }
 
        return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+                                  struct mmc_blk_data *md)
+{
+       if (!card->debugfs_root)
+               return;
+
+       if (!IS_ERR_OR_NULL(md->status_dentry)) {
+               debugfs_remove(md->status_dentry);
+               md->status_dentry = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
+               debugfs_remove(md->ext_csd_dentry);
+               md->ext_csd_dentry = NULL;
+       }
+}
 
 #else
 
-static int mmc_blk_add_debugfs(struct mmc_card *card)
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
 {
        return 0;
 }
 
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+                                  struct mmc_blk_data *md)
+{
+}
+
 #endif /* CONFIG_DEBUG_FS */
 
 static int mmc_blk_probe(struct mmc_card *card)
@@ -2703,7 +2753,7 @@ static int mmc_blk_probe(struct mmc_card *card)
        }
 
        /* Add two debugfs entries */
-       mmc_blk_add_debugfs(card);
+       mmc_blk_add_debugfs(card, md);
 
        pm_runtime_set_autosuspend_delay(&card->dev, 3000);
        pm_runtime_use_autosuspend(&card->dev);
@@ -2729,6 +2779,7 @@ static void mmc_blk_remove(struct mmc_card *card)
 {
        struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
 
+       mmc_blk_remove_debugfs(card, md);
        mmc_blk_remove_parts(card, md);
        pm_runtime_get_sync(&card->dev);
        mmc_claim_host(card->host);
index a4b49e25fe963b135d71c0532ce0bf5c8951a3cd..7586ff2ad1f17274f764a62d3622f9c8d2209c0b 100644 (file)
@@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev)
                return ret;
 
        ret = host->bus_ops->suspend(host);
+       if (ret)
+               pm_generic_resume(dev);
+
        return ret;
 }
 
index f06cd91964ce970b0c623dd662c335eb0fc4dadb..79a5b985ccf5ee8fe5ba06b5aec717f36799794e 100644 (file)
@@ -75,9 +75,11 @@ struct mmc_fixup {
 #define EXT_CSD_REV_ANY (-1u)
 
 #define CID_MANFID_SANDISK      0x2
+#define CID_MANFID_ATP          0x9
 #define CID_MANFID_TOSHIBA      0x11
 #define CID_MANFID_MICRON       0x13
 #define CID_MANFID_SAMSUNG      0x15
+#define CID_MANFID_APACER       0x27
 #define CID_MANFID_KINGSTON     0x70
 #define CID_MANFID_HYNIX       0x90
 
index 01e459a34f3321046498110a6815a126b27a8a5e..0f4a7d7b26261486e6ff43bea47d756ad5870843 100644 (file)
@@ -314,4 +314,5 @@ err:
 void mmc_remove_card_debugfs(struct mmc_card *card)
 {
        debugfs_remove_recursive(card->debugfs_root);
+       card->debugfs_root = NULL;
 }
index 35a9e4fd1a9f514ae61e4ec77455915e4870009e..64b03d6eaf184a30c133440299f5820943c8b2bf 100644 (file)
@@ -160,9 +160,9 @@ out:
        return err;
 }
 
-static void mmc_retune_timer(unsigned long data)
+static void mmc_retune_timer(struct timer_list *t)
 {
-       struct mmc_host *host = (struct mmc_host *)data;
+       struct mmc_host *host = from_timer(host, t, retune_timer);
 
        mmc_retune_needed(host);
 }
@@ -389,7 +389,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        init_waitqueue_head(&host->wq);
        INIT_DELAYED_WORK(&host->detect, mmc_rescan);
        INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
-       setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
+       timer_setup(&host->retune_timer, mmc_retune_timer, 0);
 
        /*
         * By default, hosts do not support SGIO or large requests.
index a552f61060d2127d2539f73abf0f3690829c0898..208a762b87ef2876914d641b38fc61858429a22e 100644 (file)
@@ -781,7 +781,7 @@ MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
-MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
        card->ext_csd.device_life_time_est_typ_a,
        card->ext_csd.device_life_time_est_typ_b);
@@ -791,7 +791,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
 
 static ssize_t mmc_fwrev_show(struct device *dev,
@@ -1290,7 +1290,7 @@ out_err:
 
 static void mmc_select_driver_type(struct mmc_card *card)
 {
-       int card_drv_type, drive_strength, drv_type;
+       int card_drv_type, drive_strength, drv_type = 0;
        int fixed_drv_type = card->host->fixed_drv_type;
 
        card_drv_type = card->ext_csd.raw_driver_strength |
index f664e9cbc9f8b66aa9f4f3c8fdd55882d99c8c79..75d317623852dc9f55586e41a176311a48144e1d 100644 (file)
@@ -52,6 +52,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
        MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
                  MMC_QUIRK_BLK_NO_CMD23),
 
+       /*
+        * Some SD cards lockup while using CMD23 multiblock transfers.
+        */
+       MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
+                 MMC_QUIRK_BLK_NO_CMD23),
+       MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
+                 MMC_QUIRK_BLK_NO_CMD23),
+
        /*
         * Some MMC cards need longer data read timeout than indicated in CSD.
         */
index 45bf78f327163e009d6dc2abd15b0c69ca06071c..62b84dd8f9fe3467d4c69c925c0082efb1231a22 100644 (file)
@@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 
 static ssize_t mmc_dsr_show(struct device *dev,
index 3fb7d2eec93f4d72d63dbc5983e47a2b8dbc710d..c283291db705238dab53ee0104808a198aa4288e 100644 (file)
@@ -29,6 +29,9 @@
 #define CORE_VERSION_MAJOR_MASK                (0xf << CORE_VERSION_MAJOR_SHIFT)
 #define CORE_VERSION_MINOR_MASK                0xff
 
+#define CORE_MCI_GENERICS              0x70
+#define SWITCHABLE_SIGNALING_VOLTAGE   BIT(29)
+
 #define CORE_HC_MODE           0x78
 #define HC_MODE_EN             0x1
 #define CORE_POWER             0x0
@@ -1028,11 +1031,22 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
        bool done = false;
+       u32 val;
 
        pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
                        mmc_hostname(host->mmc), __func__, req_type,
                        msm_host->curr_pwr_state, msm_host->curr_io_level);
 
+       /*
+        * The power interrupt will not be generated for signal voltage
+        * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
+        */
+       val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
+       if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
+           !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
+               return;
+       }
+
        /*
         * The IRQ for request type IO High/LOW will be generated when -
         * there is a state change in 1.8V enable bit (bit 3) of
index 2f14334e42df91135fc88fdc474c0d318c4dfad3..e9290a3439d54c2a82ffd2533fc61e4d54637296 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
@@ -3650,23 +3651,30 @@ int sdhci_setup_host(struct sdhci_host *host)
 
        spin_lock_init(&host->lock);
 
+       /*
+        * Maximum number of sectors in one transfer. Limited by SDMA boundary
+        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+        * is less anyway.
+        */
+       mmc->max_req_size = 524288;
+
        /*
         * Maximum number of segments. Depends on if the hardware
         * can do scatter/gather or not.
         */
-       if (host->flags & SDHCI_USE_ADMA)
+       if (host->flags & SDHCI_USE_ADMA) {
                mmc->max_segs = SDHCI_MAX_SEGS;
-       else if (host->flags & SDHCI_USE_SDMA)
+       } else if (host->flags & SDHCI_USE_SDMA) {
                mmc->max_segs = 1;
-       else /* PIO */
+               if (swiotlb_max_segment()) {
+                       unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
+                                               IO_TLB_SEGSIZE;
+                       mmc->max_req_size = min(mmc->max_req_size,
+                                               max_req_size);
+               }
+       } else { /* PIO */
                mmc->max_segs = SDHCI_MAX_SEGS;
-
-       /*
-        * Maximum number of sectors in one transfer. Limited by SDMA boundary
-        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
-        * is less anyway.
-        */
-       mmc->max_req_size = 524288;
+       }
 
        /*
         * Maximum segment size. Could be one segment with the maximum number
index f80e911b8843819db8dcd1956c76ce2bf60b5ab8..73b6055774474e322b07cda4144c48b5b235a55c 100644 (file)
@@ -1114,7 +1114,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
        if (!ops->oobbuf)
                ops->ooblen = 0;
 
-       if (offs < 0 || offs + ops->len >= mtd->size)
+       if (offs < 0 || offs + ops->len > mtd->size)
                return -EINVAL;
 
        if (ops->ooblen) {
index e43fea896d1ed8437a426a5fe0db010ceff7508a..d58a61c093047d80425e6b8c75083b57646a46b4 100644 (file)
@@ -79,14 +79,14 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
        pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
              mtd->index, mtd->name);
 
-       ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+       ret = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
        if (ret < 0) {
                deactivate_locked_super(sb);
                return ERR_PTR(ret);
        }
 
        /* go */
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
        return dget(sb->s_root);
 
        /* new mountpoint for an already mounted superblock */
@@ -202,7 +202,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
 not_an_MTD_device:
 #endif /* CONFIG_BLOCK */
 
-       if (!(flags & MS_SILENT))
+       if (!(flags & SB_SILENT))
                printk(KERN_NOTICE
                       "MTD: Attempt to mount non-MTD device \"%s\"\n",
                       dev_name);
index e0eb51d8c0129937b35157ccdc107e5ef54c038a..dd56a671ea4285af0f5079bc652ecf4a32410272 100644 (file)
@@ -1763,7 +1763,7 @@ try_dmaread:
                        err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
                                                              addr);
                        /* erased page bitflips corrected */
-                       if (err > 0)
+                       if (err >= 0)
                                return err;
                }
 
index 484f7fbc3f7d2d11cd66fc3416e64ab38d47f852..a8bde6665c24f7e20e6103959ceee16c5d3ec5c8 100644 (file)
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
                goto out_ce;
        }
 
-       gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
-       if (IS_ERR(gpiomtd->nwp)) {
-               ret = PTR_ERR(gpiomtd->nwp);
+       gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiomtd->ale)) {
+               ret = PTR_ERR(gpiomtd->ale);
                goto out_ce;
        }
 
index 50f8d4a1b9832326070045d0c294d22393001fbd..d4d824ef64e9fb395af3bc549daae72b96731e16 100644 (file)
@@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                return ret;
        }
 
-       /* handle the block mark swapping */
-       block_mark_swapping(this, payload_virt, auxiliary_virt);
-
        /* Loop over status bytes, accumulating ECC status. */
        status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
@@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                max_bitflips = max_t(unsigned int, max_bitflips, *status);
        }
 
+       /* handle the block mark swapping */
+       block_mark_swapping(this, buf, auxiliary_virt);
+
        if (oob_required) {
                /*
                 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
index 3692dd5478799f044bbf56895e467b144074b00a..4237c7cebf0210dfe2d2b0684b5fc9b8fbea56d6 100644 (file)
@@ -989,9 +989,9 @@ restart:
 
 
 /* flush timer, runs a second after last write */
-static void sm_cache_flush_timer(unsigned long data)
+static void sm_cache_flush_timer(struct timer_list *t)
 {
-       struct sm_ftl *ftl = (struct sm_ftl *)data;
+       struct sm_ftl *ftl = from_timer(ftl, t, timer);
        queue_work(cache_flush_workqueue, &ftl->flush_work);
 }
 
@@ -1139,7 +1139,7 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
 
 
        mutex_init(&ftl->mutex);
-       setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+       timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
        INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
        init_completion(&ftl->erase_completion);
 
index fed75e75207a2472c341695b227e3daad738ada2..b8029ea03307f75322647be894bae56176da3aef 100644 (file)
@@ -66,9 +66,9 @@ static const struct cfhsi_config  hsi_default_config = {
 
 static LIST_HEAD(cfhsi_list);
 
-static void cfhsi_inactivity_tout(unsigned long arg)
+static void cfhsi_inactivity_tout(struct timer_list *t)
 {
-       struct cfhsi *cfhsi = (struct cfhsi *)arg;
+       struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
 
        netdev_dbg(cfhsi->ndev, "%s.\n",
                __func__);
@@ -737,9 +737,9 @@ out_of_sync:
        schedule_work(&cfhsi->out_of_sync_work);
 }
 
-static void cfhsi_rx_slowpath(unsigned long arg)
+static void cfhsi_rx_slowpath(struct timer_list *t)
 {
-       struct cfhsi *cfhsi = (struct cfhsi *)arg;
+       struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
 
        netdev_dbg(cfhsi->ndev, "%s.\n",
                __func__);
@@ -997,9 +997,9 @@ static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
        wake_up_interruptible(&cfhsi->wake_down_wait);
 }
 
-static void cfhsi_aggregation_tout(unsigned long arg)
+static void cfhsi_aggregation_tout(struct timer_list *t)
 {
-       struct cfhsi *cfhsi = (struct cfhsi *)arg;
+       struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
 
        netdev_dbg(cfhsi->ndev, "%s.\n",
                __func__);
@@ -1211,14 +1211,11 @@ static int cfhsi_open(struct net_device *ndev)
        init_waitqueue_head(&cfhsi->flush_fifo_wait);
 
        /* Setup the inactivity timer. */
-       setup_timer(&cfhsi->inactivity_timer, cfhsi_inactivity_tout,
-                   (unsigned long)cfhsi);
+       timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
        /* Setup the slowpath RX timer. */
-       setup_timer(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath,
-                   (unsigned long)cfhsi);
+       timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
        /* Setup the aggregation timer. */
-       setup_timer(&cfhsi->aggregation_timer, cfhsi_aggregation_tout,
-                   (unsigned long)cfhsi);
+       timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
 
        /* Activate HSI interface. */
        res = cfhsi->ops->cfhsi_up(cfhsi->ops);
index a13a4896a8bddad19ae48f8c58bbaf2f3c8dce84..0626dcfd1f3d83ceaad91968cffd65370189cfac 100644 (file)
  * Below is some version info we got:
  *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
  *                                Filter? connected?  Passive detection  ception in MB
- *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX25  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
- *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no        no       no        no
  *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
  *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
- *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes?
+ *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
 
 static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
        .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
-               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
index 85268be0c913df5f5dc595405ade48efbd8add0a..55513411a82e68e11d6b1ca30e90ea4337a0f2ee 100644 (file)
@@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
        /* if this frame is an echo, */
        if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
            !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
-               int n;
                unsigned long flags;
 
                spin_lock_irqsave(&priv->echo_lock, flags);
-               n = can_get_echo_skb(priv->ndev, msg->client);
+               can_get_echo_skb(priv->ndev, msg->client);
                spin_unlock_irqrestore(&priv->echo_lock, flags);
 
                /* count bytes of the echo instead of skb */
                stats->tx_bytes += cf_len;
                stats->tx_packets++;
 
-               if (n) {
-                       /* restart tx queue only if a slot is free */
-                       netif_wake_queue(priv->ndev);
-               }
+               /* restart tx queue (a slot is free) */
+               netif_wake_queue(priv->ndev);
 
                return 0;
        }
index b4efd711f824ccd1c832af8817e09bf2e00b2b5c..788c3464a3b0e95aaa101591750b9de493a34a18 100644 (file)
@@ -825,7 +825,10 @@ err_release_regions:
 err_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 /* free the board structure object, as well as its resources: */
index 131026fbc2d77cbc3ccb5903daa10f8920f8ae17..5adc95c922eef2d9f968a2dea3bac7c2dd3bfda2 100644 (file)
@@ -717,7 +717,10 @@ failure_release_regions:
 failure_disable_pci:
        pci_disable_device(pdev);
 
-       return err;
+       /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+        * the probe() function must return a negative errno in case of failure
+        * (err is unchanged if negative) */
+       return pcibios_err_to_errno(err);
 }
 
 static void peak_pci_remove(struct pci_dev *pdev)
index 4d4941469cfc06bfff3aeafaa0c3562b63702730..db6ea936dc3fc3fca00c939b2db6a938cf5011dc 100644 (file)
@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
                mbx_mask = hecc_read(priv, HECC_CANMIM);
                mbx_mask |= HECC_TX_MBOX_MASK;
                hecc_write(priv, HECC_CANMIM, mbx_mask);
+       } else {
+               /* repoll is done only if whole budget is used */
+               num_pkts = quota;
        }
 
        return num_pkts;
index b3d02759c226bbde64ea3bdcdd3314ca646b743b..b00358297424604634489a9b106bf313c06b7fc9 100644 (file)
@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
 
        case -ECONNRESET: /* unlink */
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
index 9fdb0f0bfa06a00a2ade5e74daef15798ab7d4a1..c6dcf93675c00585bd960f579988ea1c2d348d50 100644 (file)
@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
index 9b18d96ef52633ab34bb5ff39f4f643023dc308a..63587b8e6825add0dadc75b6e446981935d18adb 100644 (file)
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
                        }
 
                        if (pos + tmp->len > actual_len) {
-                               dev_err(dev->udev->dev.parent,
-                                       "Format error\n");
+                               dev_err_ratelimited(dev->udev->dev.parent,
+                                                   "Format error\n");
                                break;
                        }
 
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
        if (err) {
                netdev_err(netdev, "Error transmitting URB\n");
                usb_unanchor_urb(urb);
+               kfree(buf);
                usb_free_urb(urb);
                return err;
        }
@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
        case 0:
                break;
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
        default:
@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                goto resubmit_urb;
        }
 
-       while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+       while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
                msg = urb->transfer_buffer + pos;
 
                /* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                }
 
                if (pos + msg->len > urb->actual_length) {
-                       dev_err(dev->udev->dev.parent, "Format error\n");
+                       dev_err_ratelimited(dev->udev->dev.parent,
+                                           "Format error\n");
                        break;
                }
 
@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
                usb_unanchor_urb(urb);
+               kfree(buf);
 
                stats->tx_dropped++;
 
index 7f0272558befe9ecdeaf110d2b95f8754cea8939..8d8c2086424d09b2c93a377be756dead26dc7979 100644 (file)
@@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
@@ -862,7 +864,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
                goto cleanup_unregister_candev;
        }
 
-       dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n");
+       dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
 
        return 0;
 
index d000cb62d6ae8c68233e123bc63686d3ab71dd67..27861c417c9404c9c3df841daac95015978f3a69 100644 (file)
@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
                break;
 
        case -ENOENT:
+       case -EPIPE:
+       case -EPROTO:
        case -ESHUTDOWN:
                return;
 
index b6e2bfd7d2d6aae5f05794c71f818e14a844a6b4..8b1a859f5140c3ebbd9bd5e190112a7835afbe2c 100644 (file)
@@ -165,9 +165,16 @@ static unsigned int network_rec_config_shadow = 0;
 
 static unsigned int network_tr_ctrl_shadow = 0;
 
+/* Timers */
+static void e100_check_speed(struct timer_list *unused);
+static void e100_clear_network_leds(struct timer_list *unused);
+static void e100_check_duplex(struct timer_list *unused);
+static DEFINE_TIMER(speed_timer, e100_check_speed);
+static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds);
+static DEFINE_TIMER(duplex_timer, e100_check_duplex);
+static struct net_device *timer_dev;
+
 /* Network speed indication. */
-static DEFINE_TIMER(speed_timer, NULL);
-static DEFINE_TIMER(clear_led_timer, NULL);
 static int current_speed; /* Speed read from transceiver */
 static int current_speed_selection; /* Speed selected by user */
 static unsigned long led_next_time;
@@ -175,7 +182,6 @@ static int led_active;
 static int rx_queue_len;
 
 /* Duplex */
-static DEFINE_TIMER(duplex_timer, NULL);
 static int full_duplex;
 static enum duplex current_duplex;
 
@@ -200,9 +206,7 @@ static void update_rx_stats(struct net_device_stats *);
 static void update_tx_stats(struct net_device_stats *);
 static int e100_probe_transceiver(struct net_device* dev);
 
-static void e100_check_speed(unsigned long priv);
 static void e100_set_speed(struct net_device* dev, unsigned long speed);
-static void e100_check_duplex(unsigned long priv);
 static void e100_set_duplex(struct net_device* dev, enum duplex);
 static void e100_negotiate(struct net_device* dev);
 
@@ -214,7 +218,6 @@ static void e100_send_mdio_bit(unsigned char bit);
 static unsigned char e100_receive_mdio_bit(void);
 static void e100_reset_transceiver(struct net_device* net);
 
-static void e100_clear_network_leds(unsigned long dummy);
 static void e100_set_network_leds(int active);
 
 static const struct ethtool_ops e100_ethtool_ops;
@@ -381,17 +384,12 @@ etrax_ethernet_init(void)
        current_speed = 10;
        current_speed_selection = 0; /* Auto */
        speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
-       speed_timer.data = (unsigned long)dev;
-       speed_timer.function = e100_check_speed;
-
-       clear_led_timer.function = e100_clear_network_leds;
-       clear_led_timer.data = (unsigned long)dev;
 
        full_duplex = 0;
        current_duplex = autoneg;
        duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
-        duplex_timer.data = (unsigned long)dev;
-       duplex_timer.function = e100_check_duplex;
+
+       timer_dev = dev;
 
         /* Initialize mii interface */
        np->mii_if.phy_id_mask = 0x1f;
@@ -680,9 +678,9 @@ intel_check_speed(struct net_device* dev)
 }
 #endif
 static void
-e100_check_speed(unsigned long priv)
+e100_check_speed(struct timer_list *unused)
 {
-       struct net_device* dev = (struct net_device*)priv;
+       struct net_device* dev = timer_dev;
        struct net_local *np = netdev_priv(dev);
        static int led_initiated = 0;
        unsigned long data;
@@ -799,9 +797,9 @@ e100_set_speed(struct net_device* dev, unsigned long speed)
 }
 
 static void
-e100_check_duplex(unsigned long priv)
+e100_check_duplex(struct timer_list *unused)
 {
-       struct net_device *dev = (struct net_device *)priv;
+       struct net_device *dev = timer_dev;
        struct net_local *np = netdev_priv(dev);
        int old_duplex;
 
@@ -1669,9 +1667,9 @@ e100_hardware_send_packet(struct net_local *np, char *buf, int length)
 }
 
 static void
-e100_clear_network_leds(unsigned long dummy)
+e100_clear_network_leds(struct timer_list *unused)
 {
-       struct net_device *dev = (struct net_device *)dummy;
+       struct net_device *dev = timer_dev;
        struct net_local *np = netdev_priv(dev);
 
        spin_lock(&np->led_lock);
index ea01f24f15e77f4b9765d3bed76ce71527e39dad..b62d47210db8d1e1af522b4ddf1ed073e0977991 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/netdevice.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
-#include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/mii.h>
index b721a2009b5030f440bed9eab8ed4f7003ae25a4..23b45da784cb601a7abf84b212717aee7dc64403 100644 (file)
@@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
                                slice_num, false);
        bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
-                               slice_num, true);
+                               SLICE_NUM_MASK, true);
 
        /* Insert into TCAM now because we need to insert a second rule */
        bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
        /* Insert into Action and policer RAMs now, set chain ID to
         * the one we are chained to
         */
-       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+       ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
                                      queue_num, true);
        if (ret)
                goto out_err;
index 8171055fde7a0238fb2fbc691a482c211d4d8d5b..66d33e97cbc5426b71395f878bc4e648dc3f5182 100644 (file)
@@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
        u16 mask;
 
        mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
-       mask |= GENMASK(chip->g1_irq.nirqs, 0);
+       mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
        mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
 
        free_irq(chip->irq, chip);
@@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
        return 0;
 
 out_disable:
-       mask |= GENMASK(chip->g1_irq.nirqs, 0);
+       mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
        mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
 
 out_mapping:
@@ -2177,6 +2177,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
        { },
 };
 
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
+
+{
+       struct mv88e6xxx_mdio_bus *mdio_bus;
+       struct mii_bus *bus;
+
+       list_for_each_entry(mdio_bus, &chip->mdios, list) {
+               bus = mdio_bus->bus;
+
+               mdiobus_unregister(bus);
+       }
+}
+
 static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
                                    struct device_node *np)
 {
@@ -2201,27 +2214,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
                match = of_match_node(mv88e6xxx_mdio_external_match, child);
                if (match) {
                        err = mv88e6xxx_mdio_register(chip, child, true);
-                       if (err)
+                       if (err) {
+                               mv88e6xxx_mdios_unregister(chip);
                                return err;
+                       }
                }
        }
 
        return 0;
 }
 
-static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
-
-{
-       struct mv88e6xxx_mdio_bus *mdio_bus;
-       struct mii_bus *bus;
-
-       list_for_each_entry(mdio_bus, &chip->mdios, list) {
-               bus = mdio_bus->bus;
-
-               mdiobus_unregister(bus);
-       }
-}
-
 static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
index 436668bd50dc84dc72f587402348224fe2d9021e..46af8052e535361e7d73ce62f4e6a017c1352bbc 100644 (file)
@@ -149,9 +149,9 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
        mutex_unlock(&chip->reg_lock);
 }
 
-static void mv88e6xxx_phy_ppu_reenable_timer(unsigned long _ps)
+static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
 {
-       struct mv88e6xxx_chip *chip = (void *)_ps;
+       struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
 
        schedule_work(&chip->ppu_work);
 }
@@ -193,8 +193,7 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
 {
        mutex_init(&chip->ppu_mutex);
        INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
-       setup_timer(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer,
-                   (unsigned long)chip);
+       timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
 }
 
 static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
index a7801f6668a5d0e394fd01078bb10c50998cdf56..6315774d72b3304d5ccd1729cb150c183bbac19e 100644 (file)
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
                break;
        case PHY_INTERFACE_MODE_XGMII:
+       case PHY_INTERFACE_MODE_XAUI:
                cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
                break;
        case PHY_INTERFACE_MODE_RXAUI:
index fccce4b477782ae7d41b927e9a51bf7d79a74774..74263f8efe1a622d86af9a9b79f8d965a5870b17 100644 (file)
@@ -139,9 +139,9 @@ static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
 
 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
 
-static void eql_timer(unsigned long param)
+static void eql_timer(struct timer_list *t)
 {
-       equalizer_t *eql = (equalizer_t *) param;
+       equalizer_t *eql = from_timer(eql, t, timer);
        struct list_head *this, *tmp, *head;
 
        spin_lock(&eql->queue.lock);
@@ -178,7 +178,7 @@ static void __init eql_setup(struct net_device *dev)
 {
        equalizer_t *eql = netdev_priv(dev);
 
-       setup_timer(&eql->timer, eql_timer, (unsigned long)eql);
+       timer_setup(&eql->timer, eql_timer, 0);
        eql->timer.expires      = jiffies + EQL_DEFAULT_RESCHED_IVAL;
 
        spin_lock_init(&eql->queue.lock);
index 0658cde1586a3fe50a7f0cb22439bd9a5a659d40..7120f2b9c6efa486040ceebf89ed4adeda6fa28e 100644 (file)
@@ -1092,9 +1092,11 @@ static void tx_reclaim_skb(struct bfin_mac_local *lp)
        return;
 }
 
-static void tx_reclaim_skb_timeout(unsigned long lp)
+static void tx_reclaim_skb_timeout(struct timer_list *t)
 {
-       tx_reclaim_skb((struct bfin_mac_local *)lp);
+       struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer);
+
+       tx_reclaim_skb(lp);
 }
 
 static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
@@ -1650,8 +1652,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
        ndev->netdev_ops = &bfin_mac_netdev_ops;
        ndev->ethtool_ops = &bfin_mac_ethtool_ops;
 
-       setup_timer(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout,
-                   (unsigned long)lp);
+       timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0);
 
        lp->flags = 0;
        netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
index 658e92f79d36b1adb7cc192f5d9fe129ae7ec8cd..48220b6c600d38cd0d84b68d73ffb75b8ad10b01 100644 (file)
@@ -3080,9 +3080,9 @@ err_out:
  * The routine called when the error timer expires, to track the number of
  * recurring errors.
  */
-static void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(struct timer_list *t)
 {
-       struct et131x_adapter *adapter = (struct et131x_adapter *)data;
+       struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
        struct phy_device *phydev = adapter->netdev->phydev;
 
        if (et1310_in_phy_coma(adapter)) {
@@ -3624,8 +3624,7 @@ static int et131x_open(struct net_device *netdev)
        int result;
 
        /* Start the timer to track NIC errors */
-       setup_timer(&adapter->error_timer, et131x_error_timer_handler,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
        adapter->error_timer.expires = jiffies +
                msecs_to_jiffies(TX_ERROR_PERIOD);
        add_timer(&adapter->error_timer);
index 1c1ddd891ca3ef1acc3e93d9ba5ffefe7e64add7..97c5a89a9cf7a4f1a65dbef70d889efd7d661c20 100644 (file)
@@ -2859,9 +2859,9 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
                (netdev->features & GENMASK_ULL(63, 32)) >> 32;
 }
 
-static void ena_timer_service(unsigned long data)
+static void ena_timer_service(struct timer_list *t)
 {
-       struct ena_adapter *adapter = (struct ena_adapter *)data;
+       struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
        u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
        struct ena_admin_host_info *host_info =
                adapter->ena_dev->host_attr.host_info;
@@ -3278,8 +3278,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        ena_update_hints(adapter, &get_feat_ctx.hw_hints);
 
-       setup_timer(&adapter->timer_service, ena_timer_service,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->timer_service, ena_timer_service, 0);
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
 
        dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
index 57e796870595bb9a305a7579154b0dbd1cbeec60..105fdb958cefb1d28e2f57a46da522e32536c3c7 100644 (file)
@@ -50,7 +50,7 @@
 #define AQ_CFG_PCI_FUNC_MSIX_IRQS   9U
 #define AQ_CFG_PCI_FUNC_PORTS       2U
 
-#define AQ_CFG_SERVICE_TIMER_INTERVAL    (2 * HZ)
+#define AQ_CFG_SERVICE_TIMER_INTERVAL    (1 * HZ)
 #define AQ_CFG_POLLING_TIMER_INTERVAL   ((unsigned int)(2 * HZ))
 
 #define AQ_CFG_SKB_FRAGS_MAX   32U
@@ -80,6 +80,7 @@
 #define AQ_CFG_DRV_VERSION     __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
                                __stringify(NIC_MINOR_DRIVER_VERSION)"."\
                                __stringify(NIC_BUILD_DRIVER_VERSION)"."\
-                               __stringify(NIC_REVISION_DRIVER_VERSION)
+                               __stringify(NIC_REVISION_DRIVER_VERSION) \
+                               AQ_CFG_DRV_VERSION_SUFFIX
 
 #endif /* AQ_CFG_H */
index 70efb7467bf3a1c6f298e9d5697b38307e91dab7..f2d8063a2cefd8f7581f0e2182b81b1ce773a92a 100644 (file)
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
        "OutUCast",
        "OutMCast",
        "OutBCast",
-       "InUCastOctects",
-       "OutUCastOctects",
-       "InMCastOctects",
-       "OutMCastOctects",
-       "InBCastOctects",
-       "OutBCastOctects",
-       "InOctects",
-       "OutOctects",
+       "InUCastOctets",
+       "OutUCastOctets",
+       "InMCastOctets",
+       "OutMCastOctets",
+       "InBCastOctets",
+       "OutBCastOctets",
+       "InOctets",
+       "OutOctets",
        "InPacketsDma",
        "OutPacketsDma",
        "InOctetsDma",
index 0207927dc8a6ab4ac76c46fb17669b7e50e7ae1e..b3825de6cdfb03b7f176e4b5a4cee00a20306982 100644 (file)
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
        unsigned int mbps;
 };
 
+struct aq_stats_s {
+       u64 uprc;
+       u64 mprc;
+       u64 bprc;
+       u64 erpt;
+       u64 uptc;
+       u64 mptc;
+       u64 bptc;
+       u64 erpr;
+       u64 mbtc;
+       u64 bbtc;
+       u64 mbrc;
+       u64 bbrc;
+       u64 ubrc;
+       u64 ubtc;
+       u64 dpc;
+       u64 dma_pkt_rc;
+       u64 dma_pkt_tc;
+       u64 dma_oct_rc;
+       u64 dma_oct_tc;
+};
+
 #define AQ_HW_IRQ_INVALID 0U
 #define AQ_HW_IRQ_LEGACY  1U
 #define AQ_HW_IRQ_MSI     2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
        void (*destroy)(struct aq_hw_s *self);
 
        int (*get_hw_caps)(struct aq_hw_s *self,
-                          struct aq_hw_caps_s *aq_hw_caps);
+                          struct aq_hw_caps_s *aq_hw_caps,
+                          unsigned short device,
+                          unsigned short subsystem_device);
 
        int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
                               unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
 
        int (*hw_update_stats)(struct aq_hw_s *self);
 
-       int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
-                              unsigned int *p_count);
+       struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
 
        int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
 
index 483e97691eeae2de4604e49cdb8fd8d60fb0dda4..75a894a9251c2114e7d30d40ab795f89ded413fe 100644 (file)
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
 
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
+
 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
 {
        struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -163,14 +165,11 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
        return 0;
 }
 
-static void aq_nic_service_timer_cb(unsigned long param)
+static void aq_nic_service_timer_cb(struct timer_list *t)
 {
-       struct aq_nic_s *self = (struct aq_nic_s *)param;
-       struct net_device *ndev = aq_nic_get_ndev(self);
+       struct aq_nic_s *self = from_timer(self, t, service_timer);
+       int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
        int err = 0;
-       unsigned int i = 0U;
-       struct aq_ring_stats_rx_s stats_rx;
-       struct aq_ring_stats_tx_s stats_tx;
 
        if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
                goto err_exit;
@@ -182,28 +181,19 @@ static void aq_nic_service_timer_cb(unsigned long param)
        if (self->aq_hw_ops.hw_update_stats)
                self->aq_hw_ops.hw_update_stats(self->aq_hw);
 
-       memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
-       memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
-       for (i = AQ_DIMOF(self->aq_vec); i--;) {
-               if (self->aq_vec[i])
-                       aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
-       }
+       aq_nic_update_ndev_stats(self);
 
-       ndev->stats.rx_packets = stats_rx.packets;
-       ndev->stats.rx_bytes = stats_rx.bytes;
-       ndev->stats.rx_errors = stats_rx.errors;
-       ndev->stats.tx_packets = stats_tx.packets;
-       ndev->stats.tx_bytes = stats_tx.bytes;
-       ndev->stats.tx_errors = stats_tx.errors;
+       /* If no link - use faster timer rate to detect link up asap */
+       if (!netif_carrier_ok(self->ndev))
+               ctimer = max(ctimer / 2, 1);
 
 err_exit:
-       mod_timer(&self->service_timer,
-                 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+       mod_timer(&self->service_timer, jiffies + ctimer);
 }
 
-static void aq_nic_polling_timer_cb(unsigned long param)
+static void aq_nic_polling_timer_cb(struct timer_list *t)
 {
-       struct aq_nic_s *self = (struct aq_nic_s *)param;
+       struct aq_nic_s *self = from_timer(self, t, polling_timer);
        struct aq_vec_s *aq_vec = NULL;
        unsigned int i = 0U;
 
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
 
 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
                                   const struct ethtool_ops *et_ops,
-                                  struct device *dev,
+                                  struct pci_dev *pdev,
                                   struct aq_pci_func_s *aq_pci_func,
                                   unsigned int port,
                                   const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
        ndev->netdev_ops = ndev_ops;
        ndev->ethtool_ops = et_ops;
 
-       SET_NETDEV_DEV(ndev, dev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
 
        ndev->if_port = port;
        self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
 
        self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
                                                &self->aq_hw_ops);
-       err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
+       err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
+                                         pdev->device, pdev->subsystem_device);
        if (err < 0)
                goto err_exit;
 
@@ -440,14 +431,12 @@ int aq_nic_start(struct aq_nic_s *self)
        err = aq_nic_update_interrupt_moderation_settings(self);
        if (err)
                goto err_exit;
-       setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
-                   (unsigned long)self);
+       timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
        mod_timer(&self->service_timer, jiffies +
                        AQ_CFG_SERVICE_TIMER_INTERVAL);
 
        if (self->aq_nic_cfg.is_polling) {
-               setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
-                           (unsigned long)self);
+               timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
                mod_timer(&self->polling_timer, jiffies +
                          AQ_CFG_POLLING_TIMER_INTERVAL);
        } else {
@@ -751,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
 
 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
 {
-       struct aq_vec_s *aq_vec = NULL;
        unsigned int i = 0U;
        unsigned int count = 0U;
-       int err = 0;
+       struct aq_vec_s *aq_vec = NULL;
+       struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
 
-       err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
-       if (err < 0)
+       if (!stats)
                goto err_exit;
 
-       data += count;
+       data[i] = stats->uprc + stats->mprc + stats->bprc;
+       data[++i] = stats->uprc;
+       data[++i] = stats->mprc;
+       data[++i] = stats->bprc;
+       data[++i] = stats->erpt;
+       data[++i] = stats->uptc + stats->mptc + stats->bptc;
+       data[++i] = stats->uptc;
+       data[++i] = stats->mptc;
+       data[++i] = stats->bptc;
+       data[++i] = stats->ubrc;
+       data[++i] = stats->ubtc;
+       data[++i] = stats->mbrc;
+       data[++i] = stats->mbtc;
+       data[++i] = stats->bbrc;
+       data[++i] = stats->bbtc;
+       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+       data[++i] = stats->dma_pkt_rc;
+       data[++i] = stats->dma_pkt_tc;
+       data[++i] = stats->dma_oct_rc;
+       data[++i] = stats->dma_oct_tc;
+       data[++i] = stats->dpc;
+
+       i++;
+
+       data += i;
        count = 0U;
 
        for (i = 0U, aq_vec = self->aq_vec[0];
@@ -770,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
        }
 
 err_exit:;
-       (void)err;
+}
+
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
+{
+       struct net_device *ndev = self->ndev;
+       struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+
+       ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
+       ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
+       ndev->stats.rx_errors = stats->erpr;
+       ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
+       ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
+       ndev->stats.tx_errors = stats->erpt;
+       ndev->stats.multicast = stats->mprc;
 }
 
 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
index 4309983acdd6f7502fa05869f79336fd459dc2fe..3c9f8db03d5f2a576c83064b18e930d5af6b7e1c 100644 (file)
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
 
 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
                                   const struct ethtool_ops *et_ops,
-                                  struct device *dev,
+                                  struct pci_dev *pdev,
                                   struct aq_pci_func_s *aq_pci_func,
                                   unsigned int port,
                                   const struct aq_hw_ops *aq_hw_ops);
index cadaa646c89f4b741382b4beee72c6ec3e3bfc18..58c29d04b186e634686ca667bc5afe9bd86e63a3 100644 (file)
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
        pci_set_drvdata(pdev, self);
        self->pdev = pdev;
 
-       err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
+       err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
+                                    pdev->subsystem_device);
        if (err < 0)
                goto err_exit;
 
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
 
        for (port = 0; port < self->ports; ++port) {
                struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
-                                                           &pdev->dev, self,
+                                                           pdev, self,
                                                            port, aq_hw_ops);
 
                if (!aq_nic) {
index 07b3c49a16a4266b4fb312bb79198f9ba0c60f04..f18dce14c93cfa89f5c091db5f5b06c6e882aa68 100644 (file)
 #include "hw_atl_a0_internal.h"
 
 static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
-                                struct aq_hw_caps_s *aq_hw_caps)
+                                struct aq_hw_caps_s *aq_hw_caps,
+                                unsigned short device,
+                                unsigned short subsystem_device)
 {
        memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
+
+       if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+
+       if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
+       }
+
        return 0;
 }
 
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
        hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
        hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 
+       /* Reset link status and read out initial hardware counters */
+       self->aq_link_status.mbps = 0;
+       hw_atl_utils_update_stats(self);
+
        err = aq_hw_err_from_flags(self);
        if (err < 0)
                goto err_exit;
index ec68c20efcbdb6079b9dba4b8200ad8f1f450233..e4a22ce7bf09d50f21ffed6425783d7be7c88da5 100644 (file)
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
 #include "hw_atl_b0_internal.h"
+#include "hw_atl_llh_internal.h"
 
 static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
-                                struct aq_hw_caps_s *aq_hw_caps)
+                                struct aq_hw_caps_s *aq_hw_caps,
+                                unsigned short device,
+                                unsigned short subsystem_device)
 {
        memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
+
+       if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+
+       if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+               aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
+       }
+
        return 0;
 }
 
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
        };
 
        int err = 0;
+       u32 val;
 
        self->aq_nic_cfg = aq_nic_cfg;
 
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
        hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
        hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 
+       /* Force limit MRRS on RDM/TDM to 2K */
+       val = aq_hw_read_reg(self, pci_reg_control6_adr);
+       aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+
+       /* TX DMA total request limit. B0 hardware is not capable to
+        * handle more than (8K-MRRS) incoming DMA data.
+        * Value 24 in 256byte units
+        */
+       aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+
+       /* Reset link status and read out initial hardware counters */
+       self->aq_link_status.mbps = 0;
+       hw_atl_utils_update_stats(self);
+
        err = aq_hw_err_from_flags(self);
        if (err < 0)
                goto err_exit;
index 5527fc0e5942d6a8f4e14071a9e40a12e02ecf27..93450ec930e89f71c83253ac094e9928206df75f 100644 (file)
 #define tx_dma_desc_base_addrmsw_adr(descriptor) \
                        (0x00007c04u + (descriptor) * 0x40)
 
+/* tx dma total request limit */
+#define tx_dma_total_req_limit_adr 0x00007b20u
+
 /* tx interrupt moderation control register definitions
  * Preprocessor definitions for TX Interrupt Moderation Control Register
  * Base Address: 0x00008980
 /* default value of bitfield reg_res_dsbl */
 #define pci_reg_res_dsbl_default 0x1
 
+/* PCI core control register */
+#define pci_reg_control6_adr 0x1014u
+
 /* global microprocessor scratch pad definitions */
 #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
 
index 1fe016fc4bc704361ca68ee39f3e443715505e8c..f2ce12ed4218ee8d39a462b6ffc1fada96358506 100644 (file)
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
        struct hw_atl_s *hw_self = PHAL_ATLANTIC;
        struct hw_aq_atl_utils_mbox mbox;
 
-       if (!self->aq_link_status.mbps)
-               return 0;
-
        hw_atl_utils_mpi_read_stats(self, &mbox);
 
 #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
                        mbox.stats._N_ - hw_self->last_stats._N_)
-
-       AQ_SDELTA(uprc);
-       AQ_SDELTA(mprc);
-       AQ_SDELTA(bprc);
-       AQ_SDELTA(erpt);
-
-       AQ_SDELTA(uptc);
-       AQ_SDELTA(mptc);
-       AQ_SDELTA(bptc);
-       AQ_SDELTA(erpr);
-
-       AQ_SDELTA(ubrc);
-       AQ_SDELTA(ubtc);
-       AQ_SDELTA(mbrc);
-       AQ_SDELTA(mbtc);
-       AQ_SDELTA(bbrc);
-       AQ_SDELTA(bbtc);
-       AQ_SDELTA(dpc);
-
+       if (self->aq_link_status.mbps) {
+               AQ_SDELTA(uprc);
+               AQ_SDELTA(mprc);
+               AQ_SDELTA(bprc);
+               AQ_SDELTA(erpt);
+
+               AQ_SDELTA(uptc);
+               AQ_SDELTA(mptc);
+               AQ_SDELTA(bptc);
+               AQ_SDELTA(erpr);
+
+               AQ_SDELTA(ubrc);
+               AQ_SDELTA(ubtc);
+               AQ_SDELTA(mbrc);
+               AQ_SDELTA(mbtc);
+               AQ_SDELTA(bbrc);
+               AQ_SDELTA(bbtc);
+               AQ_SDELTA(dpc);
+       }
 #undef AQ_SDELTA
+       hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
+       hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
+       hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
+       hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
 
        memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
 
        return 0;
 }
 
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
-                             u64 *data, unsigned int *p_count)
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
 {
-       struct hw_atl_s *hw_self = PHAL_ATLANTIC;
-       struct hw_atl_stats_s *stats = &hw_self->curr_stats;
-       int i = 0;
-
-       data[i] = stats->uprc + stats->mprc + stats->bprc;
-       data[++i] = stats->uprc;
-       data[++i] = stats->mprc;
-       data[++i] = stats->bprc;
-       data[++i] = stats->erpt;
-       data[++i] = stats->uptc + stats->mptc + stats->bptc;
-       data[++i] = stats->uptc;
-       data[++i] = stats->mptc;
-       data[++i] = stats->bptc;
-       data[++i] = stats->ubrc;
-       data[++i] = stats->ubtc;
-       data[++i] = stats->mbrc;
-       data[++i] = stats->mbtc;
-       data[++i] = stats->bbrc;
-       data[++i] = stats->bbtc;
-       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
-       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
-       data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
-       data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
-       data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
-       data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
-       data[++i] = stats->dpc;
-
-       if (p_count)
-               *p_count = ++i;
-
-       return 0;
+       return &PHAL_ATLANTIC->curr_stats;
 }
 
 static const u32 hw_atl_utils_hw_mac_regs[] = {
index c99cc690e425bb72907df675e04a196819cfec02..21aeca6908d3b6dac5ec5ced1285c89cdba5acf2 100644 (file)
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
 struct __packed hw_atl_s {
        struct aq_hw_s base;
        struct hw_atl_stats_s last_stats;
-       struct hw_atl_stats_s curr_stats;
+       struct aq_stats_s curr_stats;
        u64 speed;
        unsigned int chip_features;
        u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
 
 int hw_atl_utils_update_stats(struct aq_hw_s *self);
 
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
-                             u64 *data,
-                             unsigned int *p_count);
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
 
 #endif /* HW_ATL_UTILS_H */
index 0de858d215c22d036d6c120bafe996e12ada1287..9009f2651e706b66e50b998b95a12283cb3b0e9b 100644 (file)
 #define VER_H
 
 #define NIC_MAJOR_DRIVER_VERSION           1
-#define NIC_MINOR_DRIVER_VERSION           5
-#define NIC_BUILD_DRIVER_VERSION           345
+#define NIC_MINOR_DRIVER_VERSION           6
+#define NIC_BUILD_DRIVER_VERSION           13
 #define NIC_REVISION_DRIVER_VERSION        0
 
+#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
+
 #endif /* VER_H */
index 3c63b16d485f4bb3a7587e9e6d36dd0e121668d2..d9efbc8d783b84b128379e0ce58a43b005a8ab58 100644 (file)
@@ -159,6 +159,8 @@ struct arc_emac_priv {
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
+
+       unsigned int rx_missed_errors;
 };
 
 /**
index 3241af1ce7182824c09ee3ad774f122565f6c940..bd277b0dc615118a58b81dfba5b040e26fa667ba 100644 (file)
@@ -26,6 +26,8 @@
 
 #include "emac.h"
 
+static void arc_emac_restart(struct net_device *ndev);
+
 /**
  * arc_emac_tx_avail - Return the number of available slots in the tx ring.
  * @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
                        continue;
                }
 
-               pktlen = info & LEN_MASK;
-               stats->rx_packets++;
-               stats->rx_bytes += pktlen;
-               skb = rx_buff->skb;
-               skb_put(skb, pktlen);
-               skb->dev = ndev;
-               skb->protocol = eth_type_trans(skb, ndev);
-
-               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
-                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
-               /* Prepare the BD for next cycle */
-               rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
-                                                        EMAC_BUFFER_SIZE);
-               if (unlikely(!rx_buff->skb)) {
+               /* Prepare the BD for next cycle. netif_receive_skb()
+                * only if new skb was allocated and mapped to avoid holes
+                * in the RX fifo.
+                */
+               skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+               if (unlikely(!skb)) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "cannot allocate skb\n");
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
-                       /* Because receive_skb is below, increment rx_dropped */
                        stats->rx_dropped++;
                        continue;
                }
 
-               /* receive_skb only if new skb was allocated to avoid holes */
-               netif_receive_skb(skb);
-
-               addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+               addr = dma_map_single(&ndev->dev, (void *)skb->data,
                                      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
                if (dma_mapping_error(&ndev->dev, addr)) {
                        if (net_ratelimit())
-                               netdev_err(ndev, "cannot dma map\n");
-                       dev_kfree_skb(rx_buff->skb);
+                               netdev_err(ndev, "cannot map dma buffer\n");
+                       dev_kfree_skb(skb);
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
+                       stats->rx_dropped++;
                        continue;
                }
+
+               /* unmap previosly mapped skb */
+               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+               pktlen = info & LEN_MASK;
+               stats->rx_packets++;
+               stats->rx_bytes += pktlen;
+               skb_put(rx_buff->skb, pktlen);
+               rx_buff->skb->dev = ndev;
+               rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+               netif_receive_skb(rx_buff->skb);
+
+               rx_buff->skb = skb;
                dma_unmap_addr_set(rx_buff, addr, addr);
                dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
 
@@ -258,6 +269,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
        return work_done;
 }
 
+/**
+ * arc_emac_rx_miss_handle - handle R_MISS register
+ * @ndev:      Pointer to the net_device structure.
+ */
+static void arc_emac_rx_miss_handle(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int miss;
+
+       miss = arc_reg_get(priv, R_MISS);
+       if (miss) {
+               stats->rx_errors += miss;
+               stats->rx_missed_errors += miss;
+               priv->rx_missed_errors += miss;
+       }
+}
+
+/**
+ * arc_emac_rx_stall_check - check RX stall
+ * @ndev:      Pointer to the net_device structure.
+ * @budget:    How many BDs requested to process on 1 call.
+ * @work_done: How many BDs processed
+ *
+ * Under certain conditions EMAC stop reception of incoming packets and
+ * continuously increment R_MISS register instead of saving data into
+ * provided buffer. This function detect that condition and restart
+ * EMAC.
+ */
+static void arc_emac_rx_stall_check(struct net_device *ndev,
+                                   int budget, unsigned int work_done)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct arc_emac_bd *rxbd;
+
+       if (work_done)
+               priv->rx_missed_errors = 0;
+
+       if (priv->rx_missed_errors && budget) {
+               rxbd = &priv->rxbd[priv->last_rx_bd];
+               if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
+                       arc_emac_restart(ndev);
+                       priv->rx_missed_errors = 0;
+               }
+       }
+}
+
 /**
  * arc_emac_poll - NAPI poll handler.
  * @napi:      Pointer to napi_struct structure.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
        unsigned int work_done;
 
        arc_emac_tx_clean(ndev);
+       arc_emac_rx_miss_handle(ndev);
 
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
                arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
+       arc_emac_rx_stall_check(ndev, budget, work_done);
+
        return work_done;
 }
 
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
                if (status & MSER_MASK) {
                        stats->rx_missed_errors += 0x100;
                        stats->rx_errors += 0x100;
+                       priv->rx_missed_errors += 0x100;
+                       napi_schedule(&priv->napi);
                }
 
                if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 }
 
 
+/**
+ * arc_emac_restart - Restart EMAC
+ * @ndev:      Pointer to net_device structure.
+ *
+ * This function do hardware reset of EMAC in order to restore
+ * network packets reception.
+ */
+static void arc_emac_restart(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       int i;
+
+       if (net_ratelimit())
+               netdev_warn(ndev, "restarting stalled EMAC\n");
+
+       netif_stop_queue(ndev);
+
+       /* Disable interrupts */
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Disable EMAC */
+       arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+       /* Return the sk_buff to system */
+       arc_free_tx_queue(ndev);
+
+       /* Clean Tx BD's */
+       priv->txbd_curr = 0;
+       priv->txbd_dirty = 0;
+       memset(priv->txbd, 0, TX_RING_SZ);
+
+       for (i = 0; i < RX_BD_NUM; i++) {
+               struct arc_emac_bd *rxbd = &priv->rxbd[i];
+               unsigned int info = le32_to_cpu(rxbd->info);
+
+               if (!(info & FOR_EMAC)) {
+                       stats->rx_errors++;
+                       stats->rx_dropped++;
+               }
+               /* Return ownership to EMAC */
+               rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+       }
+       priv->last_rx_bd = 0;
+
+       /* Make sure info is visible to EMAC before enable */
+       wmb();
+
+       /* Enable interrupts */
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Enable EMAC */
+       arc_reg_or(priv, R_CTRL, EN_MASK);
+
+       netif_start_queue(ndev);
+}
+
 static const struct net_device_ops arc_emac_netdev_ops = {
        .ndo_open               = arc_emac_open,
        .ndo_stop               = arc_emac_stop,
index e278e3d96ee010235ec4d21696a996b68cb3ef18..16f9bee992fedfab2069a2324c38fd4a5f142c93 100644 (file)
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
 
        /* RMII interface needs always a rate of 50MHz */
        err = clk_set_rate(priv->refclk, 50000000);
-       if (err)
+       if (err) {
                dev_err(dev,
                        "failed to change reference clock rate (%d)\n", err);
+               goto out_regulator_disable;
+       }
 
        if (priv->soc_data->need_div_macclk) {
                priv->macclk = devm_clk_get(dev, "macclk");
@@ -220,19 +222,24 @@ static int emac_rockchip_probe(struct platform_device *pdev)
 
                /* RMII TX/RX needs always a rate of 25MHz */
                err = clk_set_rate(priv->macclk, 25000000);
-               if (err)
+               if (err) {
                        dev_err(dev,
                                "failed to change mac clock rate (%d)\n", err);
+                       goto out_clk_disable_macclk;
+               }
        }
 
        err = arc_emac_probe(ndev, interface);
        if (err) {
                dev_err(dev, "failed to probe arc emac (%d)\n", err);
-               goto out_regulator_disable;
+               goto out_clk_disable_macclk;
        }
 
        return 0;
 
+out_clk_disable_macclk:
+       if (priv->soc_data->need_div_macclk)
+               clk_disable_unprepare(priv->macclk);
 out_regulator_disable:
        if (priv->regulator)
                regulator_disable(priv->regulator);
index 8c9986f3fc0186701bd9ae81f27cbb1519e9f25f..94270f654b3b534b88ed3296f7556de0186de123 100644 (file)
@@ -222,9 +222,10 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
  * atl1c_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl1c_phy_config(unsigned long data)
+static void atl1c_phy_config(struct timer_list *t)
 {
-       struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
+       struct atl1c_adapter *adapter = from_timer(adapter, t,
+                                                  phy_config_timer);
        struct atl1c_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -2613,8 +2614,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->mii.phy_id_mask = 0x1f;
        adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
        netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
-       setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
-                       (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
        /* setup the private structure */
        err = atl1c_sw_init(adapter);
        if (err) {
index 4f7e195af0bc6dff79687547b9979375b35a17d6..9dc6da039a6d90ac4137a70e94b2c3213c2a4741 100644 (file)
@@ -130,9 +130,10 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
  * atl1e_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl1e_phy_config(unsigned long data)
+static void atl1e_phy_config(struct timer_list *t)
 {
-       struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+       struct atl1e_adapter *adapter = from_timer(adapter, t,
+                                                  phy_config_timer);
        struct atl1e_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -2361,8 +2362,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
 
-       setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
 
        /* get user settings */
        atl1e_check_options(adapter);
index 83d2db2abb45535c43f5921ab75791f9869ed4c0..b81fbf119bce314a9e2f282672dccd9712139eaa 100644 (file)
@@ -2575,9 +2575,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
  * atl1_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl1_phy_config(unsigned long data)
+static void atl1_phy_config(struct timer_list *t)
 {
-       struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+       struct atl1_adapter *adapter = from_timer(adapter, t,
+                                                 phy_config_timer);
        struct atl1_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -3071,8 +3072,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* assume we have no link for now */
        netif_carrier_off(netdev);
 
-       setup_timer(&adapter->phy_config_timer, atl1_phy_config,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
        adapter->phy_timer_pending = false;
 
        INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
index 77a1c03255defa77f2c662650d41a1ffc68eb7bb..db4bcc51023adf546ed0e8a9c8d075beec048faf 100644 (file)
@@ -1028,9 +1028,9 @@ static void atl2_tx_timeout(struct net_device *netdev)
  * atl2_watchdog - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl2_watchdog(unsigned long data)
+static void atl2_watchdog(struct timer_list *t)
 {
-       struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+       struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
 
        if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
                u32 drop_rxd, drop_rxs;
@@ -1053,9 +1053,10 @@ static void atl2_watchdog(unsigned long data)
  * atl2_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl2_phy_config(unsigned long data)
+static void atl2_phy_config(struct timer_list *t)
 {
-       struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+       struct atl2_adapter *adapter = from_timer(adapter, t,
+                                                 phy_config_timer);
        struct atl2_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -1434,11 +1435,9 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        atl2_check_options(adapter);
 
-       setup_timer(&adapter->watchdog_timer, atl2_watchdog,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0);
 
-       setup_timer(&adapter->phy_config_timer, atl2_phy_config,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0);
 
        INIT_WORK(&adapter->reset_task, atl2_reset_task);
        INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
index 42e44fc03a181b7564f1ff16ce05a545e135208d..e445ab724827f8d3c7c3770748902c08b76f8e13 100644 (file)
@@ -599,9 +599,9 @@ static void b44_check_phy(struct b44 *bp)
        }
 }
 
-static void b44_timer(unsigned long __opaque)
+static void b44_timer(struct timer_list *t)
 {
-       struct b44 *bp = (struct b44 *) __opaque;
+       struct b44 *bp = from_timer(bp, t, timer);
 
        spin_lock_irq(&bp->lock);
 
@@ -1474,7 +1474,7 @@ static int b44_open(struct net_device *dev)
                goto out;
        }
 
-       setup_timer(&bp->timer, b44_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, b44_timer, 0);
        bp->timer.expires = jiffies + HZ;
        add_timer(&bp->timer);
 
index b3055a76dfbf5275a2b4dc6b864e5aa9704f913d..7919f6112ecf95786e9f9a180fa248ef258428a5 100644 (file)
@@ -6183,9 +6183,9 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
 }
 
 static void
-bnx2_timer(unsigned long data)
+bnx2_timer(struct timer_list *t)
 {
-       struct bnx2 *bp = (struct bnx2 *) data;
+       struct bnx2 *bp = from_timer(bp, t, timer);
 
        if (!netif_running(bp->dev))
                return;
@@ -8462,7 +8462,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bnx2_set_default_link(bp);
        bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
 
-       setup_timer(&bp->timer, bnx2_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, bnx2_timer, 0);
        bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
 
 #ifdef BCM_CNIC
index be9fd7d184d0a00acfcc3a2e5b4d06a52d2a761d..91e2a7560b48d572d26e8566c9a3b0667083d45d 100644 (file)
@@ -5761,9 +5761,9 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
                 bp->fw_drv_pulse_wr_seq);
 }
 
-static void bnx2x_timer(unsigned long data)
+static void bnx2x_timer(struct timer_list *t)
 {
-       struct bnx2x *bp = (struct bnx2x *) data;
+       struct bnx2x *bp = from_timer(bp, t, timer);
 
        if (!netif_running(bp->dev))
                return;
@@ -12421,7 +12421,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
 
-       setup_timer(&bp->timer, bnx2x_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, bnx2x_timer, 0);
        bp->timer.expires = jiffies + bp->current_interval;
 
        if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
index 33c49ad697e47360a83abcfeb1a7c1dae008da5f..61ca4eb7c6fa983165fc0308b22f2e29fbf30ef6 100644 (file)
@@ -1883,7 +1883,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                         * here forever if we consistently cannot allocate
                         * buffers.
                         */
-                       else if (rc == -ENOMEM)
+                       else if (rc == -ENOMEM && budget)
                                rx_pkts++;
                        else if (rc == -EBUSY)  /* partial completion */
                                break;
@@ -1969,7 +1969,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
                                cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
 
                        rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
-                       if (likely(rc == -EIO))
+                       if (likely(rc == -EIO) && budget)
                                rx_pkts++;
                        else if (rc == -EBUSY)  /* partial completion */
                                break;
@@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        u16 cp_ring_id, len = 0;
        struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
        u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
+       struct hwrm_short_input short_input = {0};
 
        req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
        memset(resp, 0, PAGE_SIZE);
@@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
 
        if (bp->flags & BNXT_FLAG_SHORT_CMD) {
                void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
-               struct hwrm_short_input short_input = {0};
 
                memcpy(short_cmd_req, req, msg_len);
                memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@@ -6962,9 +6962,9 @@ static void bnxt_poll_controller(struct net_device *dev)
 }
 #endif
 
-static void bnxt_timer(unsigned long data)
+static void bnxt_timer(struct timer_list *t)
 {
-       struct bnxt *bp = (struct bnxt *)data;
+       struct bnxt *bp = from_timer(bp, t, timer);
        struct net_device *dev = bp->dev;
 
        if (!netif_running(dev))
@@ -7236,7 +7236,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        bnxt_init_dflt_coal(bp);
 
-       setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, bnxt_timer, 0);
        bp->current_interval = BNXT_TIMER_INTERVAL;
 
        clear_bit(BNXT_STATE_OPEN, &bp->state);
@@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
        if (netif_running(dev))
                dev_close(dev);
 
+       bnxt_ulp_shutdown(bp);
+
        if (system_state == SYSTEM_POWER_OFF) {
-               bnxt_ulp_shutdown(bp);
                bnxt_clear_int_mode(bp);
                pci_wake_from_d3(pdev, bp->wol);
                pci_set_power_state(pdev, PCI_D3hot);
index d5031f436f8341ac98d7b1074f22bec9e107245d..3d201d7324bdc7b2c50377e5da5b3ab3acb8a423 100644 (file)
@@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
 {
        int ifindex = tcf_mirred_ifindex(tc_act);
        struct net_device *dev;
-       u16 dst_fid;
 
        dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
        if (!dev) {
@@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
                return -EINVAL;
        }
 
-       /* find the FID from dev */
-       dst_fid = bnxt_flow_get_dst_fid(bp, dev);
-       if (dst_fid == BNXT_FID_INVALID) {
-               netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
-               return -EINVAL;
-       }
-
        actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
-       actions->dst_fid = dst_fid;
        actions->dst_dev = dev;
        return 0;
 }
@@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
        if (rc)
                return rc;
 
-       /* Tunnel encap/decap action must be accompanied by a redirect action */
-       if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
-            actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
-           !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
-               netdev_info(bp->dev,
-                           "error: no redir action along with encap/decap");
-               return -EINVAL;
+       if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
+               if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
+                       /* dst_fid is PF's fid */
+                       actions->dst_fid = bp->pf.fw_fid;
+               } else {
+                       /* find the FID from dst_dev */
+                       actions->dst_fid =
+                               bnxt_flow_get_dst_fid(bp, actions->dst_dev);
+                       if (actions->dst_fid == BNXT_FID_INVALID)
+                               return -EINVAL;
+               }
        }
 
        return rc;
@@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
        }
 
        if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
-               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
-                          CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
+               enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
                ether_addr_copy(req.dst_macaddr, l2_info->dmac);
-               ether_addr_copy(req.src_macaddr, l2_info->smac);
        }
        if (l2_info->num_vlans) {
                enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
 
 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
                                       struct ip_tunnel_key *tun_key,
-                                      struct bnxt_tc_l2_key *l2_info,
-                                      struct net_device *real_dst_dev)
+                                      struct bnxt_tc_l2_key *l2_info)
 {
 #ifdef CONFIG_INET
+       struct net_device *real_dst_dev = bp->dev;
        struct flowi4 flow = { {0} };
        struct net_device *dst_dev;
        struct neighbour *nbr;
@@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
         */
        tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
        tun_key.tp_dst = flow->tun_key.tp_dst;
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
        if (rc)
                goto put_decap;
 
-       decap_key->ttl = tun_key.ttl;
        decap_l2_info = &decap_node->l2_info;
+       /* decap smac is wildcarded */
        ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
-       ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
        if (l2_info.num_vlans) {
                decap_l2_info->num_vlans = l2_info.num_vlans;
                decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
        if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
                goto done;
 
-       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
-                                        flow->actions.dst_dev);
+       rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
        if (rc)
                goto put_encap;
 
@@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
        return 0;
 }
 
+static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
+                               u16 src_fid)
+{
+       if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
+               flow->src_fid = bp->pf.fw_fid;
+       else
+               flow->src_fid = src_fid;
+}
+
 /* Add a new flow or replace an existing flow.
  * Notes on locking:
  * There are essentially two critical sections here.
@@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
        rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
        if (rc)
                goto free_node;
-       flow->src_fid = src_fid;
+
+       bnxt_tc_set_src_fid(bp, flow, src_fid);
 
        if (!bnxt_tc_can_offload(bp, flow)) {
                rc = -ENOSPC;
index d8d5f207c759fb51b970359cb52fba32dcbda638..d09c5a9c53b502788224102a6ae789cc42cc9b25 100644 (file)
@@ -10931,9 +10931,9 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
        }
 }
 
-static void tg3_timer(unsigned long __opaque)
+static void tg3_timer(struct timer_list *t)
 {
-       struct tg3 *tp = (struct tg3 *) __opaque;
+       struct tg3 *tp = from_timer(tp, t, timer);
 
        spin_lock(&tp->lock);
 
@@ -11087,7 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp)
        tp->asf_multiplier = (HZ / tp->timer_offset) *
                             TG3_FW_UPDATE_FREQ_SEC;
 
-       setup_timer(&tp->timer, tg3_timer, (unsigned long)tp);
+       timer_setup(&tp->timer, tg3_timer, 0);
 }
 
 static void tg3_timer_start(struct tg3 *tp)
@@ -14225,7 +14225,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
        /* Reset PHY, otherwise the read DMA engine will be in a mode that
         * breaks all requests to 256 bytes.
         */
-       if (tg3_asic_rev(tp) == ASIC_REV_57766)
+       if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+           tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719)
                reset_phy = true;
 
        err = tg3_restart_hw(tp, reset_phy);
index 6aa0eee88ea529963850828fc1ab46eb36d75095..a5eecd895a8253d753bea0fb273da0bf49005d13 100644 (file)
@@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
                                dev_err(&oct->pci_dev->dev,
                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
                                        core);
-                                       err_msg_was_printed[core] = true;
+                               err_msg_was_printed[core] = true;
                        }
                }
 
index 8b2c31e2a2b0281d6ca8c70bbf3a520bdd15eb31..a3d12dbde95b6d71634c8502c6eb3509be057049 100644 (file)
@@ -1355,6 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 
        /* Offload checksum calculation to HW */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (ip.v4->version == 4)
+                       hdr->csum_l3 = 1; /* Enable IP csum calculation */
                hdr->l3_offset = skb_network_offset(skb);
                hdr->l4_offset = skb_transport_offset(skb);
 
index 8dc21c9f97168e1a0a12bcf2c0d6b9ea224fa42f..973c1fb70d09929f92fc47db0e3d60e3146eaff0 100644 (file)
@@ -123,9 +123,9 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
 }
 
 #ifdef CONFIG_RFS_ACCEL
-void enic_flow_may_expire(unsigned long data)
+void enic_flow_may_expire(struct timer_list *t)
 {
-       struct enic *enic = (struct enic *)data;
+       struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
        bool res;
        int j;
 
index 0ae83e091a629d6f6aae135560fc07dcee129633..8c4ce50da6e1f43d7faf5ab5cbed27b472561572 100644 (file)
@@ -16,12 +16,11 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id);
 #ifdef CONFIG_RFS_ACCEL
 int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
                       u16 rxq_index, u32 flow_id);
-void enic_flow_may_expire(unsigned long data);
+void enic_flow_may_expire(struct timer_list *t);
 
 static inline void enic_rfs_timer_start(struct enic *enic)
 {
-       setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire,
-                   (unsigned long)enic);
+       timer_setup(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 0);
        mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
 }
 
index 4a11baffe02d931fc54a9b9a0bc8d56e20dd800a..e130fb757e7bbeffeb78ac8139d84f1f1c688440 100644 (file)
@@ -1676,9 +1676,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-static void enic_notify_timer(unsigned long data)
+static void enic_notify_timer(struct timer_list *t)
 {
-       struct enic *enic = (struct enic *)data;
+       struct enic *enic = from_timer(enic, t, notify_timer);
 
        enic_notify_check(enic);
 
@@ -2846,8 +2846,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Setup notification timer, HW reset task, and wq locks
         */
 
-       setup_timer(&enic->notify_timer, enic_notify_timer,
-                   (unsigned long)enic);
+       timer_setup(&enic->notify_timer, enic_notify_timer, 0);
 
        enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
index 6105738552134809fc0d0abf8caadfa76f5e1f8a..8184d2fca9be017b4d374e1ba5b3fab6e7d77e9e 100644 (file)
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
                for (i = 0; i < txq->bd.ring_size; i++) {
                        /* Initialize the BD for every fragment in the page. */
                        bdp->cbd_sc = cpu_to_fec16(0);
+                       if (bdp->cbd_bufaddr &&
+                           !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                               dma_unmap_single(&fep->pdev->dev,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
+                                                fec16_to_cpu(bdp->cbd_datlen),
+                                                DMA_TO_DEVICE);
                        if (txq->tx_skbuff[i]) {
                                dev_kfree_skb_any(txq->tx_skbuff[i]);
                                txq->tx_skbuff[i] = NULL;
index 5be52d89b182ec4d1c46cc692a0e446d6925c618..7f837006bb6adf04a844d08a2a88d0957f704443 100644 (file)
@@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)
 
        gfar_init_addr_hash_table(priv);
 
-       /* Insert receive time stamps into padding alignment bytes */
+       /* Insert receive time stamps into padding alignment bytes, and
+        * plus 2 bytes padding to ensure the cpu alignment.
+        */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
-               priv->padding = 8;
+               priv->padding = 8 + DEFAULT_PADDING;
 
        if (dev->features & NETIF_F_IP_CSUM ||
            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
@@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)
                GFAR_SUPPORTED_GBIT : 0;
        phy_interface_t interface;
        struct phy_device *phydev;
+       struct ethtool_eee edata;
 
        priv->oldlink = 0;
        priv->oldspeed = 0;
@@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)
        /* Add support for flow control, but don't advertise it by default */
        phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
 
+       /* disable EEE autoneg, EEE not supported by eTSEC */
+       memset(&edata, 0, sizeof(struct ethtool_eee));
+       phy_ethtool_set_eee(phydev, &edata);
+
        return 0;
 }
 
index 81c1fac00d330e1fc2e16cb9f7756f954339af62..62f204f3231693807231c269183a49961020c7c8 100644 (file)
@@ -1346,9 +1346,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
        spin_unlock_bh(&mp->mib_counters_lock);
 }
 
-static void mib_counters_timer_wrapper(unsigned long _mp)
+static void mib_counters_timer_wrapper(struct timer_list *t)
 {
-       struct mv643xx_eth_private *mp = (void *)_mp;
+       struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
        mib_counters_update(mp);
        mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
@@ -2321,9 +2321,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-static inline void oom_timer_wrapper(unsigned long data)
+static inline void oom_timer_wrapper(struct timer_list *t)
 {
-       struct mv643xx_eth_private *mp = (void *)data;
+       struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
 
        napi_schedule(&mp->napi);
 }
@@ -3178,8 +3178,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        mib_counters_clear(mp);
 
-       setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
-                   (unsigned long)mp);
+       timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
        mp->mib_counters_timer.expires = jiffies + 30 * HZ;
 
        spin_lock_init(&mp->mib_counters_lock);
@@ -3188,7 +3187,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
 
-       setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
+       timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
 
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
index c9798210fa0f6cef39aec36ed5c64fe6e805d7dc..0495487f7b42e7e80d416a2212fad2d8ca786f71 100644 (file)
@@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
                        dev->regs + MVMDIO_ERR_INT_MASK);
 
        } else if (dev->err_interrupt == -EPROBE_DEFER) {
-               return -EPROBE_DEFER;
+               ret = -EPROBE_DEFER;
+               goto out_mdio;
        }
 
        if (pdev->dev.of_node)
index bc93b69cfd1edcf62d11cd24d41a9ca74b8f0dcc..a539263cd79ce4be8fcc0cbfe6bfdd196336cd38 100644 (file)
@@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
        val &= ~MVNETA_GMAC0_PORT_ENABLE;
        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
+       pp->link = 0;
+       pp->duplex = -1;
+       pp->speed = 0;
+
        udelay(200);
 }
 
@@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+                       mvneta_rx_error(pp, rx_desc);
 err_drop_frame:
                        dev->stats.rx_errors++;
-                       mvneta_rx_error(pp, rx_desc);
                        /* leave the descriptor untouched */
                        continue;
                }
@@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
        int queue;
 
-       for (queue = 0; queue < txq_number; queue++)
+       for (queue = 0; queue < rxq_number; queue++)
                mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
index d83a78be98a2cb90f5cea6b07eb257e11ad9ebed..634b2f41cc9e43ef66d2cf2c33e393221b92af23 100644 (file)
@@ -85,7 +85,7 @@
 
 /* RSS Registers */
 #define MVPP22_RSS_INDEX                       0x1500
-#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)  ((idx) << 8)
+#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)  (idx)
 #define     MVPP22_RSS_INDEX_TABLE(idx)                ((idx) << 8)
 #define     MVPP22_RSS_INDEX_QUEUE(idx)                ((idx) << 16)
 #define MVPP22_RSS_TABLE_ENTRY                 0x1508
@@ -5598,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
        u32 txq_dma;
 
        /* Allocate memory for TX descriptors */
-       aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+       aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
                                MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
                                &aggr_txq->descs_dma, GFP_KERNEL);
        if (!aggr_txq->descs)
index 91b1c154fd29e68157fea64b30c25186d903a047..7bbd86f08e5ff369f43af9b22818add8745cfe80 100644 (file)
@@ -362,9 +362,9 @@ static void rxq_refill(struct net_device *dev)
        }
 }
 
-static inline void rxq_refill_timer_wrapper(unsigned long data)
+static inline void rxq_refill_timer_wrapper(struct timer_list *t)
 {
-       struct pxa168_eth_private *pep = (void *)data;
+       struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
        napi_schedule(&pep->napi);
 }
 
@@ -1496,8 +1496,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
        netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
 
        memset(&pep->timeout, 0, sizeof(struct timer_list));
-       setup_timer(&pep->timeout, rxq_refill_timer_wrapper,
-                   (unsigned long)pep);
+       timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
 
        pep->smi_bus = mdiobus_alloc();
        if (!pep->smi_bus) {
index eef35bf3e8490f3832e62270d129d12bf098fae3..31efc47c847eaf555d3c54b7a754a83c5e4f8d9b 100644 (file)
@@ -1495,9 +1495,9 @@ static int xm_check_link(struct net_device *dev)
  * get an interrupt when carrier is detected, need to poll for
  * link coming up.
  */
-static void xm_link_timer(unsigned long arg)
+static void xm_link_timer(struct timer_list *t)
 {
-       struct skge_port *skge = (struct skge_port *) arg;
+       struct skge_port *skge = from_timer(skge, t, link_timer);
        struct net_device *dev = skge->netdev;
        struct skge_hw *hw = skge->hw;
        int port = skge->port;
@@ -3897,7 +3897,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
 
        /* Only used for Genesis XMAC */
        if (is_genesis(hw))
-           setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+           timer_setup(&skge->link_timer, xm_link_timer, 0);
        else {
                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
                                   NETIF_F_RXCSUM;
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
        if (hw->ports > 1) {
                skge_write32(hw, B0_IMSK, 0);
                skge_read32(hw, B0_IMSK);
-               free_irq(pdev->irq, hw);
        }
        spin_unlock_irq(&hw->hw_lock);
 
index 1145cde2274a4cb778ba816716c3c55a5850d71e..9efe1771423cdde9b7832042b61f3ac60b347123 100644 (file)
@@ -2974,9 +2974,9 @@ static int sky2_rx_hung(struct net_device *dev)
        }
 }
 
-static void sky2_watchdog(unsigned long arg)
+static void sky2_watchdog(struct timer_list *t)
 {
-       struct sky2_hw *hw = (struct sky2_hw *) arg;
+       struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
 
        /* Check for lost IRQ once a second */
        if (sky2_read32(hw, B0_ISRC)) {
@@ -5083,7 +5083,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                sky2_show_addr(dev1);
        }
 
-       setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+       timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
        INIT_WORK(&hw->restart_work, sky2_restart);
 
        pci_set_drvdata(pdev, hw);
index 54adfd96785846f9e60a2ded11ab96bc0c196c7e..fc67e35b253e4e59c12227c3e24da9c0f5bae311 100644 (file)
@@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
        /* set GE2 TUNE */
        regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
 
-       /* GE1, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
-
-       /* GE2, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+       /* Set linkdown as the default for each GMAC. Its own MCR would be set
+        * up with the more appropriate value when mtk_phy_link_adjust call is
+        * being invoked.
+        */
+       for (i = 0; i < MTK_MAC_COUNT; i++)
+               mtk_w32(eth, 0, MTK_MAC_MCR(i));
 
        /* Indicates CDM to parse the MTK special tag from CPU
         * which also is working out for untag packets.
index e0eb695318e64ebcaf58d6edb5f9a57be6f9ddf6..1fa4849a6f560f2c3e15dddc13c03bb59031a5b7 100644 (file)
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        struct net_device *dev = mdev->pndev[port];
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
-       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
        u64 in_mod = reset << 8 | port;
        int err;
        int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
+
+       mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
+       if (IS_ERR(mailbox_priority)) {
+               mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+               return PTR_ERR(mailbox_priority);
+       }
+
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
                           MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
                           MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 
        mlx4_en_stats = mailbox->buf;
 
+       memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+       counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+       err = mlx4_get_counter_stats(mdev->dev, counter_index,
+                                    &tmp_counter_stats, reset);
+
+       /* 0xffs indicates invalid value */
+       memset(mailbox_priority->buf, 0xff,
+              sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+
+       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
+               memset(mailbox_priority->buf, 0,
+                      sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+               err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
+                                  in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
+                                  0, MLX4_CMD_DUMP_ETH_STATS,
+                                  MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+               if (err)
+                       goto out;
+       }
+
+       flowstats = mailbox_priority->buf;
+
        spin_lock_bh(&priv->stats_lock);
 
        mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
        priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
 
-       spin_unlock_bh(&priv->stats_lock);
-
-       memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
-       counter_index = mlx4_get_default_counter_index(mdev->dev, port);
-       err = mlx4_get_counter_stats(mdev->dev, counter_index,
-                                    &tmp_counter_stats, reset);
-
-       /* 0xffs indicates invalid value */
-       memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
-
-       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
-               memset(mailbox->buf, 0,
-                      sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
-               err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
-                                  in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
-                                  0, MLX4_CMD_DUMP_ETH_STATS,
-                                  MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
-               if (err)
-                       goto out;
-       }
-
-       flowstats = mailbox->buf;
-
-       spin_lock_bh(&priv->stats_lock);
-
        if (tmp_counter_stats.counter_mode == 0) {
                priv->pf_stats.rx_bytes   = be64_to_cpu(tmp_counter_stats.rx_bytes);
                priv->pf_stats.tx_bytes   = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 
 out:
        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+       mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
        return err;
 }
 
index 88699b18194618b0f6fd71af120bf2848560787a..946d9db7c8c2028c61b03b7f3d2f5d7243280ab5 100644 (file)
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
                if (priv->mdev->dev->caps.flags &
                                        MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
                        buf[3] = mlx4_en_test_registers(priv);
-                       if (priv->port_up)
+                       if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
                                buf[4] = mlx4_en_test_loopback(priv);
                }
 
index 1856e279a7e0a40b9365da2016a1b52e9156479b..2b72677eccd48f5a45aa6f0e44cc45258cf24762 100644 (file)
 #define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
 #define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+#define PREAMBLE_LEN           8
+#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
+                                 ETH_HLEN + PREAMBLE_LEN)
 
 #define MLX4_EN_MIN_MTU                46
 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
index 04304dd894c6c3119eb24302fe599766dadd3708..606a0e0beeae6961ae4e8c7a357d737834be614e 100644 (file)
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
                                                MLX4_MAX_PORTS;
                                else
                                        res_alloc->guaranteed[t] = 0;
-                               res_alloc->res_free -= res_alloc->guaranteed[t];
                                break;
                        default:
                                break;
index 1fffdebbc9e8994c70a19f4982f26d1de98be5f2..e9a1fbcc4adfa6e692902b551d0c535bfe019a9a 100644 (file)
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
        case MLX5_CMD_OP_QUERY_Q_COUNTER:
-       case MLX5_CMD_OP_SET_RATE_LIMIT:
+       case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
        case MLX5_CMD_OP_QUERY_RATE_LIMIT:
        case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
        case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
-       MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
        MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
index c0872b3284cb405583642d71a0e2e540d4804b6f..543060c305a073c0457cc31ae7318f425a0e7c49 100644 (file)
@@ -82,6 +82,9 @@
        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
+       (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
+       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
 
 #define MLX5_MPWRQ_LOG_WQE_SZ                  18
 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -590,6 +593,7 @@ struct mlx5e_channel {
        struct mlx5_core_dev      *mdev;
        struct hwtstamp_config    *tstamp;
        int                        ix;
+       int                        cpu;
 };
 
 struct mlx5e_channels {
@@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params,
+                              u8 rq_type);
 
 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 {
index c6d90b6dd80efa9a1ee82958adf5ef4dd3b4522d..9bcf38f4123b504637c080413078c23304d9e49e 100644 (file)
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
                                    struct ieee_ets *ets)
 {
+       bool have_ets_tc = false;
        int bw_sum = 0;
        int i;
 
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        /* Validate Bandwidth Sum */
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+                       have_ets_tc = true;
                        bw_sum += ets->tc_tx_bw[i];
+               }
+       }
 
-       if (bw_sum != 0 && bw_sum != 100) {
+       if (have_ets_tc && bw_sum != 100) {
                netdev_err(netdev,
                           "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
index 23425f02840581f6be591bc48cf8cccc8cc26443..8f05efa5c829bccb67ddd8b24dc2997adfe4a6c8 100644 (file)
@@ -1523,8 +1523,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        new_channels.params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
 
-       mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
-                                new_channels.params.rq_wq_type);
+       new_channels.params.mpwqe_log_stride_sz =
+               MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
+       new_channels.params.mpwqe_log_num_strides =
+               MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                priv->channels.params = new_channels.params;
@@ -1536,6 +1538,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
                return err;
 
        mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+       mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
+                 MLX5E_GET_PFLAG(&priv->channels.params,
+                                 MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+
        return 0;
 }
 
index d2b057a3e512c1144d741ccffd5bf47b5f138a01..d9d8227f195f0e151ba948e0622ea90a411817c4 100644 (file)
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
        struct mlx5e_cq_param      icosq_cq;
 };
 
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
-{
-       return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
-}
-
 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
        return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
                MLX5_CAP_ETH(mdev, reg_umr_sq);
 }
 
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type)
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params, u8 rq_type)
 {
        params->rq_wq_type = rq_type;
        params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                params->log_rq_size = is_kdump_kernel() ?
                        MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
                        MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
-               params->mpwqe_log_stride_sz =
-                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
-                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+               params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
+                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
                params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
                        params->mpwqe_log_stride_sz;
                break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
 }
 
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+                               struct mlx5e_params *params)
 {
        u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
                    !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
                    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
                    MLX5_WQ_TYPE_LINKED_LIST;
-       mlx5e_set_rq_type_params(mdev, params, rq_type);
+       mlx5e_init_rq_type_params(mdev, params, rq_type);
 }
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
        int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
        int mtt_sz = mlx5e_get_wqe_mtt_sz();
        int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
-       int node = mlx5e_get_node(c->priv, c->ix);
        int i;
 
        rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
-                                       GFP_KERNEL, node);
+                                     GFP_KERNEL, cpu_to_node(c->cpu));
        if (!rq->mpwqe.info)
                goto err_out;
 
        /* We allocate more than mtt_sz as we will align the pointer */
-       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
-                                       GFP_KERNEL, node);
+       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+                                       cpu_to_node(c->cpu));
        if (unlikely(!rq->mpwqe.mtt_no_align))
                goto err_free_wqe_info;
 
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        int err;
        int i;
 
-       rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       rqp->wq.db_numa_node = cpu_to_node(c->cpu);
 
        err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
                                &rq->wq_ctrl);
@@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                rq->wqe.frag_info =
                        kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
-                                    GFP_KERNEL,
-                                    mlx5e_get_node(c->priv, c->ix));
+                                    GFP_KERNEL, cpu_to_node(c->cpu));
                if (!rq->wqe.frag_info) {
                        err = -ENOMEM;
                        goto err_rq_wq_destroy;
@@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
        struct mlx5_core_dev *mdev = c->priv->mdev;
        int err;
 
-       param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
-       param->wq.db_numa_node  = mlx5e_get_node(c->priv, c->ix);
+       param->wq.buf_numa_node = cpu_to_node(c->cpu);
+       param->wq.db_numa_node  = cpu_to_node(c->cpu);
        param->eq_ix   = c->ix;
 
        err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
        mlx5e_free_cq(cq);
 }
 
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+       return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
@@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 {
        struct mlx5e_cq_moder icocq_moder = {0, 0};
        struct net_device *netdev = priv->netdev;
+       int cpu = mlx5e_get_cpu(priv, ix);
        struct mlx5e_channel *c;
        unsigned int irq;
        int err;
        int eqn;
 
-       c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
+       c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
 
@@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->mdev     = priv->mdev;
        c->tstamp   = &priv->tstamp;
        c->ix       = ix;
+       c->cpu      = cpu;
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_activate_txqsq(&c->sq[tc]);
        mlx5e_activate_rq(&c->rq);
-       netif_set_xps_queue(c->netdev,
-               mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
+       netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
 }
 
 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -3679,6 +3677,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                                                     struct sk_buff *skb,
                                                     netdev_features_t features)
 {
+       unsigned int offset = 0;
        struct udphdr *udph;
        u8 proto;
        u16 port;
@@ -3688,7 +3687,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                proto = ip_hdr(skb)->protocol;
                break;
        case htons(ETH_P_IPV6):
-               proto = ipv6_hdr(skb)->nexthdr;
+               proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
                break;
        default:
                goto out;
index 60771865c99c9bf4402d042a760887c4497e0036..e7e7cef2bde402be23b191873a5790ed23fd7843 100644 (file)
@@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
                        break;
                case MLX5_EVENT_TYPE_CQ_ERROR:
                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
-                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
                                       cqn, eqe->data.cq_err.syndrome);
                        mlx5_cq_event(dev, cqn, eqe->type);
                        break;
@@ -775,7 +775,7 @@ err1:
        return err;
 }
 
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+void mlx5_stop_eqs(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
        int err;
@@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
        if (MLX5_CAP_GEN(dev, pg)) {
                err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
                if (err)
-                       return err;
+                       mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
+                                     err);
        }
 #endif
 
        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
        if (err)
-               return err;
+               mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
+                             err);
 
-       mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       if (err)
+               mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
+                             err);
        mlx5_cmd_use_polling(dev);
 
        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
        if (err)
-               mlx5_cmd_use_events(dev);
-
-       return err;
+               mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
+                             err);
 }
 
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
index 3c11d6e2160abeef5a893b7b81274a7ce315368c..14962969c5ba8c4462662eeb30ef10cbe1c27fa6 100644 (file)
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
index c70fd663a63301e7e89ef9ee00d37c7075fe1a0b..dfaad9ecb2b8f155c5cdf30451c572b2d10f1d37 100644 (file)
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
 static void del_sw_flow_table(struct fs_node *node);
 static void del_sw_flow_group(struct fs_node *node);
 static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
 /* Delete rule (destination) is special case that 
  * requires to lock the FTE for all the deletion process.
  */
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
        return NULL;
 }
 
+static void del_sw_ns(struct fs_node *node)
+{
+       kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+       kfree(node);
+}
+
 static void del_hw_flow_table(struct fs_node *node)
 {
        struct mlx5_flow_table *ft;
@@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
                return ERR_PTR(-ENOMEM);
 
        fs_prio->node.type = FS_TYPE_PRIO;
-       tree_init_node(&fs_prio->node, NULL, NULL);
+       tree_init_node(&fs_prio->node, NULL, del_sw_prio);
        tree_add_node(&fs_prio->node, &ns->node);
        fs_prio->num_levels = num_levels;
        fs_prio->prio = prio;
@@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
                return ERR_PTR(-ENOMEM);
 
        fs_init_namespace(ns);
-       tree_init_node(&ns->node, NULL, NULL);
+       tree_init_node(&ns->node, NULL, del_sw_ns);
        tree_add_node(&ns->node, &prio->node);
        list_add_tail(&ns->node.list, &prio->node.children);
 
index 1a0e797ad001ad672c954c228350ff9bcdea125b..21d29f7936f6c5d1e26c6e0d3f10644fd0f096c8 100644 (file)
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
        u32 fw;
        int i;
 
-       /* If the syndrom is 0, the device is OK and no need to print buffer */
+       /* If the syndrome is 0, the device is OK and no need to print buffer */
        if (!ioread8(&h->synd))
                return;
 
index d2a66dc4adc6d2933cfbc60c28cd49c67716a010..8812d7208e8f3522500b3f3e971b4a7341b22c8f 100644 (file)
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
                                   struct mlx5e_params *params)
 {
        /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
-       mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+       mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
 
        /* RQ size in ipoib by default is 512 */
        params->log_rq_size = is_kdump_kernel() ?
index 5f323442cc5ac009d5006438d93183e96b85d0d9..8a89c7e8cd631f2e14cb7cbac99a8983964b7eda 100644 (file)
@@ -317,9 +317,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 {
        struct mlx5_priv *priv = &dev->priv;
        struct mlx5_eq_table *table = &priv->eq_table;
-       struct irq_affinity irqdesc = {
-               .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
-       };
        int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
        int nvec;
 
@@ -333,10 +330,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
        if (!priv->irq_info)
                goto err_free_msix;
 
-       nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
+       nvec = pci_alloc_irq_vectors(dev->pdev,
                        MLX5_EQ_VEC_COMP_BASE + 1, nvec,
-                       PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
-                       &irqdesc);
+                       PCI_IRQ_MSIX);
        if (nvec < 0)
                return nvec;
 
@@ -622,6 +618,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
        return (u64)timer_l | (u64)timer_h1 << 32;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+               mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+               return -ENOMEM;
+       }
+
+       cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+                       priv->irq_info[i].mask);
+
+       if (IS_ENABLED(CONFIG_SMP) &&
+           irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+       return 0;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       irq_set_affinity_hint(irq, NULL);
+       free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+               err = mlx5_irq_set_affinity_hint(mdev, i);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       for (i--; i >= 0; i--)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn)
 {
@@ -1097,6 +1150,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                goto err_stop_eqs;
        }
 
+       err = mlx5_irq_set_affinity_hints(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+               goto err_affinity_hints;
+       }
+
        err = mlx5_init_fs(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1154,6 +1213,9 @@ err_sriov:
        mlx5_cleanup_fs(dev);
 
 err_fs:
+       mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
        free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -1222,6 +1284,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
        mlx5_sriov_detach(dev);
        mlx5_cleanup_fs(dev);
+       mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_put_uars_page(dev, priv->uar);
index db9e665ab10474f934131b5c2a8fa2173fa5feb6..889130edb71525ecd1f46e88a11b2d3fa0ef843f 100644 (file)
@@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 err_cmd:
        memset(din, 0, sizeof(din));
        memset(dout, 0, sizeof(dout));
-       MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-       MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+       MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+       MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
        mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
        return err;
 }
index e651e4c02867740d35c07bfcf485860f26ad6409..d3c33e9eea7292412974802c4c38ded8898ed55c 100644 (file)
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
        return ret_entry;
 }
 
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
                                   u32 rate, u16 index)
 {
-       u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
-       u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0};
+       u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
 
-       MLX5_SET(set_rate_limit_in, in, opcode,
-                MLX5_CMD_OP_SET_RATE_LIMIT);
-       MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
-       MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+       MLX5_SET(set_pp_rate_limit_in, in, opcode,
+                MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
                entry->refcount++;
        } else {
                /* new rate limit */
-               err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+               err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
                if (err) {
                        mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
                                      rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
        entry->refcount--;
        if (!entry->refcount) {
                /* need to remove rate */
-               mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+               mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
                entry->rate = 0;
        }
 
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
        /* Clear all configured rates */
        for (i = 0; i < table->max_size; i++)
                if (table->rl_entry[i].rate)
-                       mlx5_set_rate_limit_cmd(dev, 0,
-                                               table->rl_entry[i].index);
+                       mlx5_set_pp_rate_limit_cmd(dev, 0,
+                                                  table->rl_entry[i].index);
 
        kfree(dev->priv.rl_table.rl_entry);
 }
index 07a9ba6cfc70a11f7b4c05c73c1b32a704b7e6ba..2f74953e4561511e23d8fe3219db89104e3dd9e3 100644 (file)
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
        struct mlx5e_vxlan *vxlan;
 
-       spin_lock(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        vxlan = radix_tree_lookup(&vxlan_db->tree, port);
-       spin_unlock(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
 
        return vxlan;
 }
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       if (mlx5e_vxlan_lookup_port(priv, port))
+       mutex_lock(&priv->state_lock);
+       vxlan = mlx5e_vxlan_lookup_port(priv, port);
+       if (vxlan) {
+               atomic_inc(&vxlan->refcount);
                goto free_work;
+       }
 
        if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
                goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
                goto err_delete_port;
 
        vxlan->udp_port = port;
+       atomic_set(&vxlan->refcount, 1);
 
-       spin_lock_irq(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
-       spin_unlock_irq(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
        if (err)
                goto err_free;
 
@@ -113,35 +118,39 @@ err_free:
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
 free_work:
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv         = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
+       bool remove = false;
 
-       spin_lock_irq(&vxlan_db->lock);
-       vxlan = radix_tree_delete(&vxlan_db->tree, port);
-       spin_unlock_irq(&vxlan_db->lock);
-
+       mutex_lock(&priv->state_lock);
+       spin_lock_bh(&vxlan_db->lock);
+       vxlan = radix_tree_lookup(&vxlan_db->tree, port);
        if (!vxlan)
-               return;
-
-       mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
-       kfree(vxlan);
-}
+               goto out_unlock;
 
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
-       struct mlx5e_vxlan_work *vxlan_work =
-               container_of(work, struct mlx5e_vxlan_work, work);
-       struct mlx5e_priv *priv = vxlan_work->priv;
-       u16 port = vxlan_work->port;
+       if (atomic_dec_and_test(&vxlan->refcount)) {
+               radix_tree_delete(&vxlan_db->tree, port);
+               remove = true;
+       }
 
-       __mlx5e_vxlan_core_del_port(priv, port);
+out_unlock:
+       spin_unlock_bh(&vxlan_db->lock);
 
+       if (remove) {
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
+       }
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
        struct mlx5e_vxlan *vxlan;
        unsigned int port = 0;
 
-       spin_lock_irq(&vxlan_db->lock);
+       /* Lockless since we are the only radix-tree consumers, wq is disabled */
        while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
                port = vxlan->udp_port;
-               spin_unlock_irq(&vxlan_db->lock);
-               __mlx5e_vxlan_core_del_port(priv, (u16)port);
-               spin_lock_irq(&vxlan_db->lock);
+               radix_tree_delete(&vxlan_db->tree, port);
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
        }
-       spin_unlock_irq(&vxlan_db->lock);
 }
index 5def12c048e38992e7edd9f870233e369ef4580e..5ef6ae7d568abcd1410bc403b526628a634799cb 100644 (file)
@@ -36,6 +36,7 @@
 #include "en.h"
 
 struct mlx5e_vxlan {
+       atomic_t refcount;
        u16 udp_port;
 };
 
index 2d0897b7d86035286666e38ad4e41ab63fb4746b..9bd8d28de1522906b92021a8cf2c476b79c35a56 100644 (file)
@@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+       u16 vid = 1;
        int err;
 
        err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
                                     true, false);
        if (err)
                goto err_port_vlan_set;
+
+       for (; vid <= VLAN_N_VID - 1; vid++) {
+               err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+                                                    vid, false);
+               if (err)
+                       goto err_vid_learning_set;
+       }
+
        return 0;
 
+err_vid_learning_set:
+       for (vid--; vid >= 1; vid--)
+               mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
 err_port_vlan_set:
        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
 err_port_stp_set:
@@ -4323,6 +4335,12 @@ err_port_stp_set:
 
 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+       u16 vid;
+
+       for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
+               mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+                                              vid, true);
+
        mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
                               false, false);
        mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
index 72ef4f8025f00ff8810c2955b25b7f3baec49be1..be657b8533f04922a61a2f3a4b1aeddf3137cdf5 100644 (file)
@@ -2436,25 +2436,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
        rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
 }
 
-static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
-                                   const struct mlxsw_sp_rif *rif)
-{
-       char rauht_pl[MLXSW_REG_RAUHT_LEN];
-
-       mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
-                            rif->rif_index, rif->addr);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
-}
-
 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
                                         struct mlxsw_sp_rif *rif)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
 
-       mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
        list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
-                                rif_list_node)
+                                rif_list_node) {
+               mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
                mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+       }
 }
 
 enum mlxsw_sp_nexthop_type {
index b171ed2015fe479b6d7d099f14e188a3dd8cda00..2521c8c40015de32abbb3f30dbf8f9d527126b6f 100644 (file)
@@ -3501,7 +3501,7 @@ static void myri10ge_watchdog(struct work_struct *work)
  * cannot detect a NIC with a parity error in a timely fashion if the
  * NIC is lightly loaded.
  */
-static void myri10ge_watchdog_timer(unsigned long arg)
+static void myri10ge_watchdog_timer(struct timer_list *t)
 {
        struct myri10ge_priv *mgp;
        struct myri10ge_slice_state *ss;
@@ -3509,7 +3509,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
        u32 rx_pause_cnt;
        u16 cmd;
 
-       mgp = (struct myri10ge_priv *)arg;
+       mgp = from_timer(mgp, t, watchdog_timer);
 
        rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
        busy_slice_cnt = 0;
@@ -3930,8 +3930,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_save_state(pdev);
 
        /* Setup the watchdog timer */
-       setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
-                   (unsigned long)mgp);
+       timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
 
        netdev->ethtool_ops = &myri10ge_ethtool_ops;
        INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
index e379b78e86efa7c02dca2bc95afc1f79afc7800a..13190aa09faf748c16e1f00f7aee9097442aef85 100644 (file)
@@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
        return nfp_net_ebpf_capable(nn) ? "BPF" : "";
 }
 
+static int
+nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+       int err;
+
+       nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
+       if (!nn->app_priv)
+               return -ENOMEM;
+
+       err = nfp_app_nic_vnic_alloc(app, nn, id);
+       if (err)
+               goto err_free_priv;
+
+       return 0;
+err_free_priv:
+       kfree(nn->app_priv);
+       return err;
+}
+
 static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
 {
+       struct nfp_bpf_vnic *bv = nn->app_priv;
+
        if (nn->dp.bpf_offload_xdp)
                nfp_bpf_xdp_offload(app, nn, NULL);
+       WARN_ON(bv->tc_prog);
+       kfree(bv);
 }
 
 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
 {
        struct tc_cls_bpf_offload *cls_bpf = type_data;
        struct nfp_net *nn = cb_priv;
+       struct bpf_prog *oldprog;
+       struct nfp_bpf_vnic *bv;
+       int err;
 
        if (type != TC_SETUP_CLSBPF ||
            !tc_can_offload(nn->dp.netdev) ||
@@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
            cls_bpf->common.protocol != htons(ETH_P_ALL) ||
            cls_bpf->common.chain_index)
                return -EOPNOTSUPP;
-       if (nn->dp.bpf_offload_xdp)
-               return -EBUSY;
 
        /* Only support TC direct action */
        if (!cls_bpf->exts_integrated ||
@@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
                return -EOPNOTSUPP;
        }
 
-       switch (cls_bpf->command) {
-       case TC_CLSBPF_REPLACE:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
-       case TC_CLSBPF_ADD:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
-       case TC_CLSBPF_DESTROY:
-               return nfp_net_bpf_offload(nn, NULL, true);
-       default:
+       if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
                return -EOPNOTSUPP;
+
+       bv = nn->app_priv;
+       oldprog = cls_bpf->oldprog;
+
+       /* Don't remove if oldprog doesn't match driver's state */
+       if (bv->tc_prog != oldprog) {
+               oldprog = NULL;
+               if (!cls_bpf->prog)
+                       return 0;
        }
+
+       err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+       if (err)
+               return err;
+
+       bv->tc_prog = cls_bpf->prog;
+       return 0;
 }
 
 static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {
 
        .extra_cap      = nfp_bpf_extra_cap,
 
-       .vnic_alloc     = nfp_app_nic_vnic_alloc,
+       .vnic_alloc     = nfp_bpf_vnic_alloc,
        .vnic_free      = nfp_bpf_vnic_free,
 
        .setup_tc       = nfp_bpf_setup_tc,
index 082a15f6dfb5b9ba806316bd4f272006c93749fb..57b6043177a3891c49096ab85906ee539b3d5ecb 100644 (file)
@@ -172,6 +172,14 @@ struct nfp_prog {
        struct list_head insns;
 };
 
+/**
+ * struct nfp_bpf_vnic - per-vNIC BPF priv structure
+ * @tc_prog:   currently loaded cls_bpf program
+ */
+struct nfp_bpf_vnic {
+       struct bpf_prog *tc_prog;
+};
+
 int nfp_bpf_jit(struct nfp_prog *prog);
 
 extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
index 924a05e05da027523e7845728c6ddc4d41accc0b..78b36c67c232f661d5c2633034c5ada015bf1aeb 100644 (file)
@@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,
 {
        u8 __iomem *mem = port->eth_stats;
 
-       /* TX and RX stats are flipped as we are returning the stats as seen
-        * at the switch port corresponding to the phys port.
-        */
-       stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
-       stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
-       stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
+       stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
+       stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
+       stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
 
-       stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
-       stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
-       stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
+       stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
+       stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
+       stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
 }
 
 static void
index 457ee80307ea6612a8a085c981d5897f1dbd9901..40e52ffb732f4fc9f9a10b9444e8c4b2d2722e87 100644 (file)
@@ -1089,9 +1089,10 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
  * pch_gbe_watchdog - Watchdog process
  * @data:  Board private structure
  */
-static void pch_gbe_watchdog(unsigned long data)
+static void pch_gbe_watchdog(struct timer_list *t)
 {
-       struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+       struct pch_gbe_adapter *adapter = from_timer(adapter, t,
+                                                    watchdog_timer);
        struct net_device *netdev = adapter->netdev;
        struct pch_gbe_hw *hw = &adapter->hw;
 
@@ -2644,8 +2645,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev, "Invalid MAC address, "
                                    "interface disabled.\n");
        }
-       setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
 
        INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
 
index 49591d9c2e1b9f4bde7217e750396d99f032f302..c9a55b774935cd8900435e308c441d2283e5cb39 100644 (file)
@@ -943,9 +943,9 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
 
 #define TX_CLEAN_INTERVAL HZ
 
-static void pasemi_mac_tx_timer(unsigned long data)
+static void pasemi_mac_tx_timer(struct timer_list *t)
 {
-       struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
+       struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
        struct pasemi_mac *mac = txring->mac;
 
        pasemi_mac_clean_tx(txring);
@@ -1199,8 +1199,7 @@ static int pasemi_mac_open(struct net_device *dev)
        if (dev->phydev)
                phy_start(dev->phydev);
 
-       setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
-                   (unsigned long)mac->tx);
+       timer_setup(&mac->tx->clean_timer, pasemi_mac_tx_timer, 0);
        mod_timer(&mac->tx->clean_timer, jiffies + HZ);
 
        return 0;
index 05479d4354696d0a34a573e4cbbe72a41bbfd7c1..9e5264d8773b09a3cdda77a8cec066a44d3359a3 100644 (file)
@@ -3749,9 +3749,9 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
        qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
 }
 
-static void ql3xxx_timer(unsigned long ptr)
+static void ql3xxx_timer(struct timer_list *t)
 {
-       struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+       struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
        queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
 }
 
@@ -3891,7 +3891,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
        INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
        INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
 
-       setup_timer(&qdev->adapter_timer, ql3xxx_timer, (unsigned long)qdev);
+       timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
        qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
 
        if (!cards_found) {
index 18461fcb981501efd7015634999cb787041c01a7..53dbf1e163a85ea5bdfc571788c18ce0d6d0f7b3 100644 (file)
@@ -47,6 +47,7 @@
 #define MDIO_CLK_25_28                                               7
 
 #define MDIO_WAIT_TIMES                                           1000
+#define MDIO_STATUS_DELAY_TIME                                       1
 
 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 {
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)),
-                              100, MDIO_WAIT_TIMES * 100))
+                              MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
                return -EIO;
 
        return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
        writel(reg, adpt->base + EMAC_MDIO_CTRL);
 
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
-                              !(reg & (MDIO_START | MDIO_BUSY)), 100,
-                              MDIO_WAIT_TIMES * 100))
+                              !(reg & (MDIO_START | MDIO_BUSY)),
+                              MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
                return -EIO;
 
        return 0;
index 70c92b649b299a1a16195e144e3da77cff25855c..38c924bdd32e46f3586eac77d5a63c361993fd09 100644 (file)
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
                return ret;
        }
 
-       ret = emac_mac_up(adpt);
+       ret = adpt->phy.open(adpt);
        if (ret) {
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
                return ret;
        }
 
-       ret = adpt->phy.open(adpt);
+       ret = emac_mac_up(adpt);
        if (ret) {
-               emac_mac_down(adpt);
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
+               adpt->phy.close(adpt);
                return ret;
        }
 
index 71bee1af71effaea4359f765df82c4654ca96567..df21e900f874036bfaf349fa334acf6c339acf3c 100644 (file)
@@ -195,6 +195,7 @@ err2:
 err1:
        rmnet_unregister_real_device(real_dev, port);
 err0:
+       kfree(ep);
        return err;
 }
 
index 29842ccc91a9d35ff49a0e5ac1cb61ec61fb3b49..08e4afc0ab39b42ff8585a466daf64d9f25fb703 100644 (file)
@@ -126,12 +126,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 
        if (skb_headroom(skb) < required_headroom) {
                if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
-                       return RMNET_MAP_CONSUMED;
+                       goto fail;
        }
 
        map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
        if (!map_header)
-               return RMNET_MAP_CONSUMED;
+               goto fail;
 
        if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
                if (mux_id == 0xff)
@@ -143,6 +143,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        skb->protocol = htons(ETH_P_MAP);
 
        return RMNET_MAP_SUCCESS;
+
+fail:
+       kfree_skb(skb);
+       return RMNET_MAP_CONSUMED;
 }
 
 static void
index 2b962d349f5f415b0b38148a5d575a5720850fbe..009780df664b4a09ea3e88855b0e707ff0b24b10 100644 (file)
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
        struct ravb_private *priv = netdev_priv(ndev);
        int ret = 0;
 
-       if (priv->wol_enabled) {
-               /* Reduce the usecount of the clock to zero and then
-                * restore it to its original value. This is done to force
-                * the clock to be re-enabled which is a workaround
-                * for renesas-cpg-mssr driver which do not enable clocks
-                * when resuming from PSCI suspend/resume.
-                *
-                * Without this workaround the driver fails to communicate
-                * with the hardware if WoL was enabled when the system
-                * entered PSCI suspend. This is due to that if WoL is enabled
-                * we explicitly keep the clock from being turned off when
-                * suspending, but in PSCI sleep power is cut so the clock
-                * is disabled anyhow, the clock driver is not aware of this
-                * so the clock is not turned back on when resuming.
-                *
-                * TODO: once the renesas-cpg-mssr suspend/resume is working
-                *       this clock dance should be removed.
-                */
-               clk_disable(priv->clk);
-               clk_disable(priv->clk);
-               clk_enable(priv->clk);
-               clk_enable(priv->clk);
-
-               /* Set reset mode to rearm the WoL logic */
+       /* If WoL is enabled set reset mode to rearm the WoL logic */
+       if (priv->wol_enabled)
                ravb_write(ndev, CCC_OPC_RESET, CCC);
-       }
 
        /* All register have been reset to default values.
         * Restore all registers which where setup at probe time and
index 7e060aa9fbed4057c2a00f389f4e411923acb6f0..75323000c3646bc781c12287367f8b455ada5a6a 100644 (file)
@@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
                           entry, le32_to_cpu(txdesc->status));
                /* Free the original skb. */
                if (mdp->tx_skbuff[entry]) {
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+                       dma_unmap_single(&mdp->pdev->dev,
+                                        le32_to_cpu(txdesc->addr),
                                         le32_to_cpu(txdesc->len) >> 16,
                                         DMA_TO_DEVICE);
                        dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
@@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)
                        if (mdp->rx_skbuff[i]) {
                                struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
 
-                               dma_unmap_single(&ndev->dev,
+                               dma_unmap_single(&mdp->pdev->dev,
                                                 le32_to_cpu(rxdesc->addr),
                                                 ALIGN(mdp->rx_buf_sz, 32),
                                                 DMA_FROM_DEVICE);
                        }
                }
                ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+               dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
                                  mdp->rx_desc_dma);
                mdp->rx_ring = NULL;
        }
@@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
                sh_eth_tx_free(ndev, false);
 
                ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+               dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
                                  mdp->tx_desc_dma);
                mdp->tx_ring = NULL;
        }
@@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
 
                /* The size of the buffer is a multiple of 32 bytes. */
                buf_len = ALIGN(mdp->rx_buf_sz, 32);
-               dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+               dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
                                          DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, dma_addr)) {
+               if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                        kfree_skb(skb);
                        break;
                }
@@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
 
        /* Allocate all Rx descriptors. */
        rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-       mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
-                                         GFP_KERNEL);
+       mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
+                                         &mdp->rx_desc_dma, GFP_KERNEL);
        if (!mdp->rx_ring)
                goto ring_free;
 
@@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
 
        /* Allocate all Tx descriptors. */
        tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
-       mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
-                                         GFP_KERNEL);
+       mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
+                                         &mdp->tx_desc_dma, GFP_KERNEL);
        if (!mdp->tx_ring)
                goto ring_free;
        return 0;
@@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        mdp->rx_skbuff[entry] = NULL;
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
-                       dma_unmap_single(&ndev->dev, dma_addr,
+                       dma_unmap_single(&mdp->pdev->dev, dma_addr,
                                         ALIGN(mdp->rx_buf_sz, 32),
                                         DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
@@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        sh_eth_set_receive_align(skb);
-                       dma_addr = dma_map_single(&ndev->dev, skb->data,
+                       dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
                                                  buf_len, DMA_FROM_DEVICE);
-                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                       if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                                kfree_skb(skb);
                                break;
                        }
@@ -1891,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
                return PTR_ERR(phydev);
        }
 
+       /* mask with MAC supported features */
+       if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
+               int err = phy_set_max_speed(phydev, SPEED_100);
+               if (err) {
+                       netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
+                       phy_disconnect(phydev);
+                       return err;
+               }
+       }
+
        phy_attached_info(phydev);
 
        return 0;
@@ -2441,9 +2452,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* soft swap. */
        if (!mdp->cd->hw_swap)
                sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
-       dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+       dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
                                  DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+       if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
                kfree_skb(skb);
                return NETDEV_TX_OK;
        }
index 0653b70723a34f973d825d991b83009e7ccab78b..6d6fb8cf3e7c84a28b2de68fd25bc5b575df7659 100644 (file)
@@ -1983,9 +1983,9 @@ err_out:
        return err;
 }
 
-static void ofdpa_fdb_cleanup(unsigned long data)
+static void ofdpa_fdb_cleanup(struct timer_list *t)
 {
-       struct ofdpa *ofdpa = (struct ofdpa *)data;
+       struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
        struct ofdpa_port *ofdpa_port;
        struct ofdpa_fdb_tbl_entry *entry;
        struct hlist_node *tmp;
@@ -2368,8 +2368,7 @@ static int ofdpa_init(struct rocker *rocker)
        hash_init(ofdpa->neigh_tbl);
        spin_lock_init(&ofdpa->neigh_tbl_lock);
 
-       setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
-                   (unsigned long) ofdpa);
+       timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
        mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
 
        ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
index 0ea7e16f2e6e2c6d8106308e73327390e62074ce..9937a2450e573f8028046233cf82de2f51f16704 100644 (file)
@@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
        }
 
        if (buffer->flags & EFX_TX_BUF_SKB) {
+               EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
                (*pkts_compl)++;
                (*bytes_compl) += buffer->skb->len;
                dev_consume_skb_any((struct sk_buff *)buffer->skb);
@@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
        struct efx_tx_buffer *buffer;
+       unsigned int bytes_compl = 0;
+       unsigned int pkts_compl = 0;
 
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
                buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
-               efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
        }
 }
 
index e1e5ac0537606f2192d553c85795428b18fd615d..ce2ea2d491acac195eefe3f01f8ec9df08d3e77c 100644 (file)
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
        /* get timestamp value */
         u64(*get_timestamp) (void *desc, u32 ats);
        /* get rx timestamp status */
-       int (*get_rx_timestamp_status) (void *desc, u32 ats);
+       int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
        /* Display ring */
        void (*display_ring)(void *head, unsigned int size, bool rx);
        /* set MSS via context descriptor */
index 61cb24810d101194c5285cf27edd963aa488473f..9e6db16af663b5a43c3c1eff26824fab3cb2df6c 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
  *
- * Copyright (C) Alexandre Torgue 2015
- * Author:  Alexandre Torgue <alexandre.torgue@gmail.com>
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author:  Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
  * License terms:  GNU General Public License (GPL), version 2
  *
  */
index e5ff734d4f9b2ff9b56799cc803fdafe4f80ea1c..9eb7f65d8000d28190da780587dba562aed72152 100644 (file)
@@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                         val, reg);
 
        if (gmac->variant->soc_has_internal_phy) {
-               if (of_property_read_bool(priv->plat->phy_node,
-                                         "allwinner,leds-active-low"))
+               if (of_property_read_bool(node, "allwinner,leds-active-low"))
                        reg |= H3_EPHY_LED_POL;
                else
                        reg &= ~H3_EPHY_LED_POL;
index 4b286e27c4ca5cdbbb7c457e31bef1b2e9e7bd94..7e089bf906b4f316034403f9a44fbfd191ee09eb 100644 (file)
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
        return ret;
 }
 
-static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+                                                u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
        int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
 
                        /* Check if timestamp is OK from context descriptor */
                        do {
-                               ret = dwmac4_rx_check_timestamp(desc);
+                               ret = dwmac4_rx_check_timestamp(next_desc);
                                if (ret < 0)
                                        goto exit;
                                i++;
index 7546b3664113a3d776fe19094df71b2adfb99e98..2a828a31281423082995bc332ec51a3f20989804 100644 (file)
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+                                           u32 ats)
 {
        if (ats) {
                struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
index f817f8f365696d3388e73f85710d30dde43a7d41..db4cee57bb2465eb98fe38cb947624e779da4673 100644 (file)
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
 
index 721b616552611aa74ea077e744ec9a0c4836a48f..08c19ebd530674972ceb9ebcb41cd7af4b3fb58d 100644 (file)
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 {
        u32 value = readl(ioaddr + PTP_TCR);
        unsigned long data;
+       u32 reg_value;
 
        /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
         *      formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 
        data &= PTP_SSIR_SSINC_MASK;
 
+       reg_value = data;
        if (gmac4)
-               data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+               reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
 
-       writel(data, ioaddr + PTP_SSIR);
+       writel(reg_value, ioaddr + PTP_SSIR);
 
        return data;
 }
index ff4fb5eae1af3f0c7f3dcb61492a530a12f19bce..337d53d12e94b3acfe745e48422b44d1939ad2c0 100644 (file)
@@ -345,9 +345,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
  *  if there is no data transfer and if we are not in LPI state,
  *  then MAC Transmitter can be moved to LPI state.
  */
-static void stmmac_eee_ctrl_timer(unsigned long arg)
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+       struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
 
        stmmac_enable_eee_mode(priv);
        mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
@@ -401,9 +401,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                spin_lock_irqsave(&priv->lock, flags);
                if (!priv->eee_active) {
                        priv->eee_active = 1;
-                       setup_timer(&priv->eee_ctrl_timer,
-                                   stmmac_eee_ctrl_timer,
-                                   (unsigned long)priv);
+                       timer_setup(&priv->eee_ctrl_timer,
+                                   stmmac_eee_ctrl_timer, 0);
                        mod_timer(&priv->eee_ctrl_timer,
                                  STMMAC_LPI_T(eee_timer));
 
@@ -483,7 +482,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                desc = np;
 
        /* Check if timestamp is available */
-       if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+       if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
                ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
@@ -2221,9 +2220,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
  * Description:
  * This is the timer handler to directly invoke the stmmac_tx_clean.
  */
-static void stmmac_tx_timer(unsigned long data)
+static void stmmac_tx_timer(struct timer_list *t)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)data;
+       struct stmmac_priv *priv = from_timer(priv, t, txtimer);
        u32 tx_queues_count = priv->plat->tx_queues_to_use;
        u32 queue;
 
@@ -2244,7 +2243,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
 {
        priv->tx_coal_frames = STMMAC_TX_FRAMES;
        priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
-       setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
+       timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
        priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
        add_timer(&priv->txtimer);
 }
@@ -2589,6 +2588,7 @@ static int stmmac_open(struct net_device *dev)
 
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+       priv->mss = 0;
 
        ret = alloc_dma_desc_resources(priv);
        if (ret < 0) {
index e1b55b8fb8e0917d6fa8537ff029e52d29371ca1..1f8e9601592a679025cc1451b9c686c38ac3176f 100644 (file)
@@ -358,9 +358,9 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void xlgmac_tx_timer(unsigned long data)
+static void xlgmac_tx_timer(struct timer_list *t)
 {
-       struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+       struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
        struct xlgmac_pdata *pdata = channel->pdata;
        struct napi_struct *napi;
 
@@ -391,8 +391,7 @@ static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
                if (!channel->tx_ring)
                        break;
 
-               setup_timer(&channel->tx_timer, xlgmac_tx_timer,
-                           (unsigned long)channel);
+               timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
        }
 }
 
index cd1185e6613387e60b74bdd0a75f8f21b230dcf8..b432a75fb874cec3acc680827e4310b495e65111 100644 (file)
@@ -765,9 +765,9 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
 
-static void cpsw_ale_timer(unsigned long arg)
+static void cpsw_ale_timer(struct timer_list *t)
 {
-       struct cpsw_ale *ale = (struct cpsw_ale *)arg;
+       struct cpsw_ale *ale = from_timer(ale, t, timer);
 
        cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
 
@@ -859,7 +859,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
        cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
        cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
 
-       setup_timer(&ale->timer, cpsw_ale_timer, (unsigned long)ale);
+       timer_setup(&ale->timer, cpsw_ale_timer, 0);
        if (ale->ageout) {
                ale->timer.expires = jiffies + ale->ageout;
                add_timer(&ale->timer);
index 4ad821655e51cdd467fd614530ea8fe8dd4dfb78..e831c49713eecca836746c076c8bb30b70ca381c 100644 (file)
@@ -2745,9 +2745,9 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
        return -EOPNOTSUPP;
 }
 
-static void netcp_ethss_timer(unsigned long arg)
+static void netcp_ethss_timer(struct timer_list *t)
 {
-       struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+       struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
        struct gbe_intf *gbe_intf;
        struct gbe_slave *slave;
 
@@ -3616,8 +3616,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        }
        spin_unlock_bh(&gbe_dev->hw_stats_lock);
 
-       setup_timer(&gbe_dev->timer, netcp_ethss_timer,
-                   (unsigned long)gbe_dev);
+       timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
        gbe_dev->timer.expires   = jiffies + GBE_TIMER_INTERVAL;
        add_timer(&gbe_dev->timer);
        *inst_priv = gbe_dev;
index 8f53d762fbc405cc68325ca8cb5cb8d1ee8ae9c3..5a4e78fde530ad4ac0984975b950eafa1cb0fde5 100644 (file)
@@ -254,7 +254,7 @@ tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
                        spin_unlock_irqrestore(&priv->lock, flags);
                return;
        }
-       priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+       priv->timer.function = tlan_timer;
        if (!in_irq())
                spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -1425,7 +1425,7 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
                tlan_dio_write8(dev->base_addr,
                                TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
                if (priv->timer.function == NULL) {
-                       priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+                       priv->timer.function = tlan_timer;
                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
                        priv->timer_set_at = jiffies;
                        priv->timer_type = TLAN_TIMER_ACTIVITY;
@@ -1576,7 +1576,7 @@ drop_and_reuse:
                tlan_dio_write8(dev->base_addr,
                                TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
                if (priv->timer.function == NULL)  {
-                       priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+                       priv->timer.function = tlan_timer;
                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
                        priv->timer_set_at = jiffies;
                        priv->timer_type = TLAN_TIMER_ACTIVITY;
index a913538d32131d81dc7f92b253ae0184e31b474a..d925b8203996691f1c380d70ff28de9f9c0b0a49 100644 (file)
@@ -912,8 +912,9 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
  * packets, including updating the queue tail pointer.
  */
 static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
+spider_net_cleanup_tx_ring(struct timer_list *t)
 {
+       struct spider_net_card *card = from_timer(card, t, tx_timer);
        if ((spider_net_release_tx_chain(card, 0) != 0) &&
            (card->netdev->flags & IFF_UP)) {
                spider_net_kick_tx_dma(card);
@@ -1265,7 +1266,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
        spider_net_refill_rx_chain(card);
        spider_net_enable_rxdmac(card);
 
-       spider_net_cleanup_tx_ring(card);
+       spider_net_cleanup_tx_ring(&card->tx_timer);
 
        /* if all packets are in the stack, enable interrupts and return 0 */
        /* if not, return 1 */
@@ -1977,9 +1978,9 @@ init_firmware_failed:
  * @data: used for pointer to card structure
  *
  */
-static void spider_net_link_phy(unsigned long data)
+static void spider_net_link_phy(struct timer_list *t)
 {
-       struct spider_net_card *card = (struct spider_net_card *)data;
+       struct spider_net_card *card = from_timer(card, t, aneg_timer);
        struct mii_phy *phy = &card->phy;
 
        /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
@@ -2256,14 +2257,11 @@ spider_net_setup_netdev(struct spider_net_card *card)
 
        pci_set_drvdata(card->pdev, netdev);
 
-       setup_timer(&card->tx_timer,
-                   (void(*)(unsigned long))spider_net_cleanup_tx_ring,
-                   (unsigned long)card);
+       timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
        netdev->irq = card->pdev->irq;
 
        card->aneg_count = 0;
-       setup_timer(&card->aneg_timer, spider_net_link_phy,
-                   (unsigned long)card);
+       timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
 
        netif_napi_add(netdev, &card->napi,
                       spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
index 83e6f76eb9654ee2c0ed3ab9a97b91e3cba7ffb5..33949248c829e1d7ab16c5d11a6ca4208efdf43c 100644 (file)
@@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
        else
                name = "Rhine III";
 
-       netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
-                   name, (long)ioaddr, dev->dev_addr, rp->irq);
+       netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
+                   name, ioaddr, dev->dev_addr, rp->irq);
 
        dev_set_drvdata(hwdev, dev);
 
index 6d68c8a8f4f2ac7f732ff6cfe862c71ae2460a27..da4ec575ccf9ba4aede92c0fd3a4842ed4257449 100644 (file)
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
 config XILINX_LL_TEMAC
        tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
        depends on (PPC || MICROBLAZE)
+       depends on !64BIT || BROKEN
        select PHYLIB
        ---help---
          This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
index c9f7215c5dc234599951e2f20a418a7f4c68ff55..3de2729590905328f13ca9c22bb6e8178cd4ac29 100644 (file)
@@ -1005,7 +1005,7 @@ static void __scc_start_tx_timer(struct scc_channel *scc,
        } else 
        if (when != TIMER_OFF)
        {
-               scc->tx_t.function = (TIMER_FUNC_TYPE)handler;
+               scc->tx_t.function = handler;
                scc->tx_t.expires = jiffies + (when*HZ)/100;
                add_timer(&scc->tx_t);
        }
@@ -1031,7 +1031,7 @@ static void scc_start_defer(struct scc_channel *scc)
        
        if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
        {
-               scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_busy;
+               scc->tx_wdog.function = t_busy;
                scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
                add_timer(&scc->tx_wdog);
        }
@@ -1047,7 +1047,7 @@ static void scc_start_maxkeyup(struct scc_channel *scc)
        
        if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
        {
-               scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_maxkeyup;
+               scc->tx_wdog.function = t_maxkeyup;
                scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
                add_timer(&scc->tx_wdog);
        }
@@ -1428,7 +1428,7 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
 
        del_timer(&scc->tx_wdog);
 
-       scc->tx_wdog.function = (TIMER_FUNC_TYPE)scc_stop_calibrate;
+       scc->tx_wdog.function = scc_stop_calibrate;
        scc->tx_wdog.expires = jiffies + HZ*duration;
        add_timer(&scc->tx_wdog);
 
index 8483f03d5a4103d6c4da7d5d586a99e013f06107..1ab97d99b9bae9f9dde6227dacff606256e66c72 100644 (file)
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
                            rrpriv->info_dma);
        rrpriv->info = NULL;
 
-       free_irq(pdev->irq, dev);
        spin_unlock_irqrestore(&rrpriv->lock, flags);
+       free_irq(pdev->irq, dev);
 
        return 0;
 }
index 11c1e7950fe58002b1b2b52e6af395dbfc7b6863..77cc4fbaeace4836419b2232913f8d78351e0148 100644 (file)
@@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
                .flowi4_oif = dev->ifindex,
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC,
+               .flowi4_mark = skb->mark,
                .daddr = ip4h->daddr,
                .saddr = ip4h->saddr,
        };
index 5f93e6add56394f28f68da72ddb8fc724db14200..e911e4990b20e181aaf3711d1069f086c2a7a4d3 100644 (file)
@@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)
 {
        int value;
 
-       mutex_lock(&phydev->lock);
-
        value = phy_read(phydev, MII_BMCR);
        value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
        phy_write(phydev, MII_BMCR, value);
 
-       mutex_unlock(&phydev->lock);
-
        return 0;
 }
 
index 4d02b27df0445e7c1ad6f0774e44a5b7013bb05e..82104edca393b9b6662a18ef8ea0bdd8d3bb057d 100644 (file)
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
        if (err < 0)
                goto error;
 
+       /* Do not touch the fiber page if we're in copper->sgmii mode */
+       if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+               return 0;
+
        /* Then the fiber link */
        err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
        if (err < 0)
@@ -875,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
 
        /* SGMII-to-Copper mode initialization */
        if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+               u32 pause;
+
                /* Select page 18 */
                err = marvell_set_page(phydev, 18);
                if (err < 0)
@@ -898,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
                err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
                if (err < 0)
                        return err;
+
+               /* There appears to be a bug in the 88e1512 when used in
+                * SGMII to copper mode, where the AN advertisment register
+                * clears the pause bits each time a negotiation occurs.
+                * This means we can never be truely sure what was advertised,
+                * so disable Pause support.
+                */
+               pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+               phydev->supported &= ~pause;
+               phydev->advertising &= ~pause;
        }
 
        return m88e1121_config_init(phydev);
@@ -2069,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &m88e1145_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1101_config_aneg,
                .read_status = &genphy_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
index bfd3090fb055bac4c40924205036119da5b6ce61..07c6048200c6164ac77a649468063e2fedd404c6 100644 (file)
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
        }
 
        ret = xgene_enet_ecc_init(pdata);
-       if (ret)
+       if (ret) {
+               if (pdata->dev->of_node)
+                       clk_disable_unprepare(pdata->clk);
                return ret;
+       }
        xgene_gmac_reset(pdata);
 
        return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                return ret;
 
        mdio_bus = mdiobus_alloc();
-       if (!mdio_bus)
-               return -ENOMEM;
+       if (!mdio_bus) {
+               ret = -ENOMEM;
+               goto out_clk;
+       }
 
        mdio_bus->name = "APM X-Gene MDIO bus";
 
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                mdio_bus->phy_mask = ~0;
                ret = mdiobus_register(mdio_bus);
                if (ret)
-                       goto out;
+                       goto out_mdiobus;
 
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
                                    acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
        }
 
        if (ret)
-               goto out;
+               goto out_mdiobus;
 
        pdata->mdio_bus = mdio_bus;
        xgene_mdio_status = true;
 
        return 0;
 
-out:
+out_mdiobus:
        mdiobus_free(mdio_bus);
 
+out_clk:
+       if (dev->of_node)
+               clk_disable_unprepare(pdata->clk);
+
        return ret;
 }
 
index 2df7b62c1a36811e97087ae641a89d06641cef4e..54d00a1d2bef094877c80bccad220de7f4d97eba 100644 (file)
@@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
 
                if (addr == mdiodev->addr) {
                        dev->of_node = child;
+                       dev->fwnode = of_fwnode_handle(child);
                        return;
                }
        }
index 1ea69b7585d9bcb8098ecddfc33d0a3a46704843..842eb871a6e38df0c22cc43b7ee02f0f137f3a05 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ethtool.h>
 #include <linux/phy.h>
 #include <linux/netdevice.h>
+#include <linux/bitfield.h>
 
 static int meson_gxl_config_init(struct phy_device *phydev)
 {
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
        return 0;
 }
 
+/* This function is provided to cope with the possible failures of this phy
+ * during aneg process. When aneg fails, the PHY reports that aneg is done
+ * but the value found in MII_LPA is wrong:
+ *  - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
+ *    the link partner (LP) supports aneg but the LP never acked our base
+ *    code word, it is likely that we never sent it to begin with.
+ *  - Late failures: MII_LPA is filled with a value which seems to make sense
+ *    but it actually is not what the LP is advertising. It seems that we
+ *    can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
+ *    If this particular bit is not set when aneg is reported being done,
+ *    it means MII_LPA is likely to be wrong.
+ *
+ * In both case, forcing a restart of the aneg process solve the problem.
+ * When this failure happens, the first retry is usually successful but,
+ * in some cases, it may take up to 6 retries to get a decent result
+ */
+static int meson_gxl_read_status(struct phy_device *phydev)
+{
+       int ret, wol, lpa, exp;
+
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               ret = genphy_aneg_done(phydev);
+               if (ret < 0)
+                       return ret;
+               else if (!ret)
+                       goto read_status_continue;
+
+               /* Need to access WOL bank, make sure the access is open */
+               ret = phy_write(phydev, 0x14, 0x0000);
+               if (ret)
+                       return ret;
+               ret = phy_write(phydev, 0x14, 0x0400);
+               if (ret)
+                       return ret;
+               ret = phy_write(phydev, 0x14, 0x0000);
+               if (ret)
+                       return ret;
+               ret = phy_write(phydev, 0x14, 0x0400);
+               if (ret)
+                       return ret;
+
+               /* Request LPI_STATUS WOL register */
+               ret = phy_write(phydev, 0x14, 0x8D80);
+               if (ret)
+                       return ret;
+
+               /* Read LPI_STATUS value */
+               wol = phy_read(phydev, 0x15);
+               if (wol < 0)
+                       return wol;
+
+               lpa = phy_read(phydev, MII_LPA);
+               if (lpa < 0)
+                       return lpa;
+
+               exp = phy_read(phydev, MII_EXPANSION);
+               if (exp < 0)
+                       return exp;
+
+               if (!(wol & BIT(12)) ||
+                   ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
+                       /* Looks like aneg failed after all */
+                       phydev_dbg(phydev, "LPA corruption - aneg restart\n");
+                       return genphy_restart_aneg(phydev);
+               }
+       }
+
+read_status_continue:
+       return genphy_read_status(phydev);
+}
+
 static struct phy_driver meson_gxl_phy[] = {
        {
                .phy_id         = 0x01814400,
@@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {
                .config_init    = meson_gxl_config_init,
                .config_aneg    = genphy_config_aneg,
                .aneg_done      = genphy_aneg_done,
-               .read_status    = genphy_read_status,
+               .read_status    = meson_gxl_read_status,
                .suspend        = genphy_suspend,
                .resume         = genphy_resume,
        },
index fdb43dd9b5cd424f4dde02f1257070ffe4b50fb1..422ff6333c52da8c4a212123bdaf443945613e02 100644 (file)
@@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
        return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
 }
 
+/* Center KSZ9031RNX FLP timing at 16ms. */
 static int ksz9031_center_flp_timing(struct phy_device *phydev)
 {
        int result;
 
-       /* Center KSZ9031RNX FLP timing at 16ms. */
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+       if (result)
+               return result;
+
        result = ksz9031_extended_write(phydev, OP_DATA, 0,
                                        MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
-
        if (result)
                return result;
 
@@ -622,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
                phydev->link = 0;
                if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
                        phydev->drv->config_intr(phydev);
+               return genphy_config_aneg(phydev);
        }
 
        return 0;
index 2b1e67bc1e736ceb33f7afa8462f5a4858b522df..ed10d1fc8f59188b95e090f085ced18421ad23c2 100644 (file)
@@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);
  */
 void phy_start(struct phy_device *phydev)
 {
-       bool do_resume = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)
                phydev->state = PHY_UP;
                break;
        case PHY_HALTED:
+               /* if phy was suspended, bring the physical link up again */
+               phy_resume(phydev);
+
                /* make sure interrupts are re-enabled for the PHY */
                if (phydev->irq != PHY_POLL) {
                        err = phy_enable_interrupts(phydev);
@@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)
                }
 
                phydev->state = PHY_RESUMING;
-               do_resume = true;
                break;
        default:
                break;
        }
        mutex_unlock(&phydev->lock);
 
-       /* if phy was suspended, bring the physical link up again */
-       if (do_resume)
-               phy_resume(phydev);
-
        phy_trigger_machine(phydev, true);
 }
 EXPORT_SYMBOL(phy_start);
index 67f25ac29025c53903cc724fac62efdd94828510..b15b31ca26182719cca6f764e19335483e11930e 100644 (file)
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
        if (!mdio_bus_phy_may_suspend(phydev))
                goto no_resume;
 
+       mutex_lock(&phydev->lock);
        ret = phy_resume(phydev);
+       mutex_unlock(&phydev->lock);
        if (ret < 0)
                return ret;
 
@@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        if (err)
                goto error;
 
+       mutex_lock(&phydev->lock);
        phy_resume(phydev);
+       mutex_unlock(&phydev->lock);
        phy_led_triggers_register(phydev);
 
        return err;
@@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)
        struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
        int ret = 0;
 
+       WARN_ON(!mutex_is_locked(&phydev->lock));
+
        if (phydev->drv && phydrv->resume)
                ret = phydrv->resume(phydev);
 
@@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)
 {
        int value;
 
-       mutex_lock(&phydev->lock);
-
        value = phy_read(phydev, MII_BMCR);
        phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
 
-       mutex_unlock(&phydev->lock);
-
        return 0;
 }
 EXPORT_SYMBOL(genphy_resume);
index e3bbc70372d3ba73517514c1eda6a861d35d9524..827f3f92560e711a17514055cb96adfbc05708f7 100644 (file)
@@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
        pl->link_config.pause = MLO_PAUSE_AN;
        pl->link_config.speed = SPEED_UNKNOWN;
        pl->link_config.duplex = DUPLEX_UNKNOWN;
+       pl->link_config.an_enabled = true;
        pl->ops = ops;
        __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
 
@@ -773,6 +774,7 @@ void phylink_stop(struct phylink *pl)
                sfp_upstream_stop(pl->sfp_bus);
 
        set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
+       queue_work(system_power_efficient_wq, &pl->resolve);
        flush_work(&pl->resolve);
 }
 EXPORT_SYMBOL_GPL(phylink_stop);
@@ -950,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
        mutex_lock(&pl->state_mutex);
        /* Configure the MAC to match the new settings */
        linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
+       pl->link_config.interface = config.interface;
        pl->link_config.speed = our_kset.base.speed;
        pl->link_config.duplex = our_kset.base.duplex;
        pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
index e381811e5f1143f35432e6624e80c00b13f0b56e..9dfc1c4c954f3230c7f6419ac2c59ad85b26c1c4 100644 (file)
@@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
 {
        unsigned int los = sfp->state & SFP_F_LOS;
 
-       /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor
-        * SFP_OPTIONS_LOS_NORMAL are set?  For now, we assume
-        * the same as SFP_OPTIONS_LOS_NORMAL set.
+       /* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
+        * are set, we assume that no LOS signal is available.
         */
-       if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED)
+       if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
                los ^= SFP_F_LOS;
+       else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
+               los = 0;
 
        if (los)
                sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
                sfp_sm_link_up(sfp);
 }
 
+static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_LOW) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_HIGH);
+}
+
+static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
+{
+       return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+               event == SFP_E_LOS_HIGH) ||
+              (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+               event == SFP_E_LOS_LOW);
+}
+
 static void sfp_sm_fault(struct sfp *sfp, bool warn)
 {
        if (sfp->sm_retries && !--sfp->sm_retries) {
@@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
                return -EINVAL;
        }
 
+       /* If the module requires address swap mode, warn about it */
+       if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+               dev_warn(sfp->dev,
+                        "module address swap to access page 0xA2 is not supported.\n");
+
        return sfp_module_insert(sfp->sfp_bus, &sfp->id);
 }
 
@@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
        case SFP_S_WAIT_LOS:
                if (event == SFP_E_TX_FAULT)
                        sfp_sm_fault(sfp, true);
-               else if (event ==
-                        (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                         SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
+               else if (sfp_los_event_inactive(sfp, event))
                        sfp_sm_link_up(sfp);
                break;
 
@@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
                if (event == SFP_E_TX_FAULT) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_fault(sfp, true);
-               } else if (event ==
-                          (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
-                           SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
+               } else if (sfp_los_event_active(sfp, event)) {
                        sfp_sm_link_down(sfp);
                        sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
                }
@@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
 {
        /* locking... and check module is present */
 
-       if (sfp->id.ext.sff8472_compliance) {
+       if (sfp->id.ext.sff8472_compliance &&
+           !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
                modinfo->type = ETH_MODULE_SFF_8472;
                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
        } else {
index eb8a18991d8c7c78c45ba828b79479263acbf1a9..cc63102ca96e0056ac8f1755c5b43b7bd878ccf0 100644 (file)
@@ -106,8 +106,8 @@ static int slip_esc6(unsigned char *p, unsigned char *d, int len);
 static void slip_unesc6(struct slip *sl, unsigned char c);
 #endif
 #ifdef CONFIG_SLIP_SMART
-static void sl_keepalive(unsigned long sls);
-static void sl_outfill(unsigned long sls);
+static void sl_keepalive(struct timer_list *t);
+static void sl_outfill(struct timer_list *t);
 static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 #endif
 
@@ -763,8 +763,8 @@ static struct slip *sl_alloc(dev_t line)
        sl->mode        = SL_MODE_DEFAULT;
 #ifdef CONFIG_SLIP_SMART
        /* initialize timer_list struct */
-       setup_timer(&sl->keepalive_timer, sl_keepalive, (unsigned long)sl);
-       setup_timer(&sl->outfill_timer, sl_outfill, (unsigned long)sl);
+       timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
+       timer_setup(&sl->outfill_timer, sl_outfill, 0);
 #endif
        slip_devs[i] = dev;
        return sl;
@@ -1388,9 +1388,9 @@ module_exit(slip_exit);
  * added by Stanislav Voronyi. All changes before marked VSV
  */
 
-static void sl_outfill(unsigned long sls)
+static void sl_outfill(struct timer_list *t)
 {
-       struct slip *sl = (struct slip *)sls;
+       struct slip *sl = from_timer(sl, t, outfill_timer);
 
        spin_lock(&sl->lock);
 
@@ -1419,9 +1419,9 @@ out:
        spin_unlock(&sl->lock);
 }
 
-static void sl_keepalive(unsigned long sls)
+static void sl_keepalive(struct timer_list *t)
 {
-       struct slip *sl = (struct slip *)sls;
+       struct slip *sl = from_timer(sl, t, keepalive_timer);
 
        spin_lock(&sl->lock);
 
index e9489b88407ce1677385fe480592958b57d02c8d..0a886fda01291efb5a6beb0a2b5eb2123c1f05ab 100644 (file)
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
        DEFINE_WAIT(wait);
        ssize_t ret = 0;
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (skb)
                goto put;
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
                       size_t total_len, int flags)
 {
        struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+       struct sk_buff *skb = m->msg_control;
        int ret;
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+               if (skb)
+                       kfree_skb(skb);
                return -EINVAL;
-       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       }
+       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
index 6a7bde9bc4b292e349dd92830de494185f7fdc39..4f4a842a1c9cb8ac3397b329854a0fc7bd2f6aa3 100644 (file)
@@ -444,9 +444,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
        spin_unlock_bh(&tun->lock);
 }
 
-static void tun_flow_cleanup(unsigned long data)
+static void tun_flow_cleanup(struct timer_list *t)
 {
-       struct tun_struct *tun = (struct tun_struct *)data;
+       struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
        unsigned long delay = tun->ageing_time;
        unsigned long next_timer = jiffies + delay;
        unsigned long count = 0;
@@ -1196,7 +1196,9 @@ static void tun_flow_init(struct tun_struct *tun)
                INIT_HLIST_HEAD(&tun->flows[i]);
 
        tun->ageing_time = TUN_FLOW_EXPIRE;
-       setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+       timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
+       mod_timer(&tun->flow_gc_timer,
+                 round_jiffies_up(jiffies + tun->ageing_time));
 }
 
 static void tun_flow_uninit(struct tun_struct *tun)
@@ -1950,8 +1952,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (!skb) {
                /* Read frames from ring */
@@ -2067,22 +2072,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 {
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
        struct tun_struct *tun = tun_get(tfile);
+       struct sk_buff *skb = m->msg_control;
        int ret;
 
-       if (!tun)
-               return -EBADFD;
+       if (!tun) {
+               ret = -EBADFD;
+               goto out_free_skb;
+       }
 
        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
                ret = -EINVAL;
-               goto out;
+               goto out_put_tun;
        }
        if (flags & MSG_ERRQUEUE) {
                ret = sock_recv_errqueue(sock->sk, m, total_len,
                                         SOL_PACKET, TUN_TX_TIMESTAMP);
                goto out;
        }
-       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > (ssize_t)total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2090,6 +2097,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 out:
        tun_put(tun);
        return ret;
+
+out_put_tun:
+       tun_put(tun);
+out_free_skb:
+       if (skb)
+               kfree_skb(skb);
+       return ret;
 }
 
 static int tun_peek_len(struct socket *sock)
index c750cf7c042b004ecfbbce64aefb3d0f1d512c82..3000ddd1c7e2e481bb961deb86099b5c2ea11371 100644 (file)
@@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
                net->hard_header_len = 0;
                net->addr_len        = 0;
                net->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+               set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: raw IP\n");
        } else if (!net->header_ops) { /* don't bother if already set */
                ether_setup(net);
+               clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: Ethernet\n");
        }
 
@@ -1202,12 +1204,14 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9079, 10)},   /* Sierra Wireless EM74xx */
        {QMI_FIXED_INTF(0x1199, 0x907b, 8)},    /* Sierra Wireless EM74xx */
        {QMI_FIXED_INTF(0x1199, 0x907b, 10)},   /* Sierra Wireless EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9091, 8)},    /* Sierra Wireless EM7565 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
        {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},    /* Telewell TW-3G HSPA+ */
index 80348b6a864668d0b7535906084bf64f967a1448..d56fe32bf48dea8c617c011d5bd6ddc8d9d5270f 100644 (file)
@@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                return -ENOLINK;
        }
 
-       skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+       if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
+               skb = __netdev_alloc_skb(dev->net, size, flags);
+       else
+               skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
                usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
index 19a985ef9104ba129086d53f7661ba880d2cb5d3..559b215c016967f2b6525e7f3bc4fca2a2ee9e90 100644 (file)
@@ -756,7 +756,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                int num_skb_frags;
 
                buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
-               if (unlikely(!ctx)) {
+               if (unlikely(!buf)) {
                        pr_debug("%s: rx error: %d buffers out of %d missing\n",
                                 dev->name, num_buf,
                                 virtio16_to_cpu(vi->vdev,
index 19b9cc51079e75346af766c91786d66eaa92c3f2..31f4b7911ef84c85789011332e37c5314099d82c 100644 (file)
@@ -2155,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ndst = &rt->dst;
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
+
+                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+                                                      skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
                err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                goto out_unlock;
                }
 
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
+
+                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+                                                      skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
                skb_scrub_packet(skb, xnet);
@@ -3103,6 +3117,11 @@ static void vxlan_config_apply(struct net_device *dev,
 
                max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
                                           VXLAN_HEADROOM);
+               if (max_mtu < ETH_MIN_MTU)
+                       max_mtu = ETH_MIN_MTU;
+
+               if (!changelink && !conf->mtu)
+                       dev->mtu = max_mtu;
        }
 
        if (dev->mtu > max_mtu)
index c7721c729541ea2d5d9d439bb4176b0950e3c16c..afeca6bcdade60a45fb6588cb69e69e32014cc7f 100644 (file)
@@ -558,9 +558,9 @@ out:
        return NET_RX_DROP;
 }
 
-static void ppp_timer(unsigned long arg)
+static void ppp_timer(struct timer_list *t)
 {
-       struct proto *proto = (struct proto *)arg;
+       struct proto *proto = from_timer(proto, t, timer);
        struct ppp *ppp = get_ppp(proto->dev);
        unsigned long flags;
 
@@ -610,7 +610,7 @@ static void ppp_start(struct net_device *dev)
        for (i = 0; i < IDX_COUNT; i++) {
                struct proto *proto = &ppp->protos[i];
                proto->dev = dev;
-               setup_timer(&proto->timer, ppp_timer, (unsigned long)proto);
+               timer_setup(&proto->timer, ppp_timer, 0);
                proto->state = CLOSED;
        }
        ppp->protos[IDX_LCP].pid = PID_LCP;
index ede89d4ffc8824cbf8e43dc0e78e227f02bb1539..e99e766a302851e36b34f338cd819b6eaf616637 100644 (file)
@@ -518,11 +518,11 @@ exit:
 
 /* LED trigger */
 static int tx_activity;
-static void at76_ledtrig_tx_timerfunc(unsigned long data);
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused);
 static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc);
 DEFINE_LED_TRIGGER(ledtrig_tx);
 
-static void at76_ledtrig_tx_timerfunc(unsigned long data)
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused)
 {
        static int tx_lastactivity;
 
index 3559fb5b8fb03a964461be8815f12a4dad3cba3a..03aae6bc18388a529f3a64cd5501ac8d5ed38deb 100644 (file)
@@ -280,9 +280,9 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
 /**
  * brcmf_btcoex_timerfunc() - BT coex timer callback
  */
-static void brcmf_btcoex_timerfunc(ulong data)
+static void brcmf_btcoex_timerfunc(struct timer_list *t)
 {
-       struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
+       struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
        brcmf_dbg(TRACE, "enter\n");
 
        bt_local->timer_on = false;
@@ -380,7 +380,7 @@ int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
        /* Set up timer for BT  */
        btci->timer_on = false;
        btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
-       setup_timer(&btci->timer, brcmf_btcoex_timerfunc, (ulong)btci);
+       timer_setup(&btci->timer, brcmf_btcoex_timerfunc, 0);
        btci->cfg = cfg;
        btci->saved_regs_part1 = false;
        btci->saved_regs_part2 = false;
index 6e70df97815944cb3868c7bed7b68689f695cbc8..15fa00d79fc66bb7eb7d7c770c6980ee45333355 100644 (file)
@@ -2983,10 +2983,10 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
        brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
 }
 
-static void brcmf_escan_timeout(unsigned long data)
+static void brcmf_escan_timeout(struct timer_list *t)
 {
        struct brcmf_cfg80211_info *cfg =
-                       (struct brcmf_cfg80211_info *)data;
+                       from_timer(cfg, t, escan_timeout);
 
        if (cfg->int_escan_map || cfg->scan_request) {
                brcmf_err("timer expired\n");
@@ -3150,8 +3150,7 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
                            brcmf_cfg80211_escan_handler);
        cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
        /* Init scan_timeout timer */
-       setup_timer(&cfg->escan_timeout, brcmf_escan_timeout,
-                   (unsigned long)cfg);
+       timer_setup(&cfg->escan_timeout, brcmf_escan_timeout, 0);
        INIT_WORK(&cfg->escan_timeout_work,
                  brcmf_cfg80211_escan_timeout_worker);
 }
index e3495ea95553fb2d2056421d5ff0845b8f26e83a..cdf9e41615925c6978dc4620e032d65b055ee28d 100644 (file)
@@ -2070,7 +2070,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
        return head_pad;
 }
 
-/**
+/*
  * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
  * bus layer usage.
  */
@@ -3972,9 +3972,9 @@ brcmf_sdio_watchdog_thread(void *data)
 }
 
 static void
-brcmf_sdio_watchdog(unsigned long data)
+brcmf_sdio_watchdog(struct timer_list *t)
 {
-       struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+       struct brcmf_sdio *bus = from_timer(bus, t, timer);
 
        if (bus->watchdog_tsk) {
                complete(&bus->watchdog_wait);
@@ -4121,8 +4121,8 @@ release:
        sdio_release_host(sdiodev->func[1]);
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
-       device_release_driver(dev);
        device_release_driver(&sdiodev->func[2]->dev);
+       device_release_driver(dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
@@ -4169,8 +4169,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        init_waitqueue_head(&bus->dcmd_resp_wait);
 
        /* Set up the watchdog timer */
-       setup_timer(&bus->timer, brcmf_sdio_watchdog,
-                   (unsigned long)bus);
+       timer_setup(&bus->timer, brcmf_sdio_watchdog, 0);
        /* Initialize watchdog thread */
        init_completion(&bus->watchdog_wait);
        bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
index 2acd94da9efeb48a65304a43032d3be8fd684660..d11d72615de220f2e776f0b0762c8cc7806212d0 100644 (file)
@@ -399,9 +399,9 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
  * was received.  We need to ensure we receive the statistics in order
  * to update the temperature used for calibrating the TXPOWER.
  */
-static void iwl_bg_statistics_periodic(unsigned long data)
+static void iwl_bg_statistics_periodic(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
@@ -556,9 +556,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
  * this function is to perform continuous uCode event logging operation
  * if enabled
  */
-static void iwl_bg_ucode_trace(unsigned long data)
+static void iwl_bg_ucode_trace(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
@@ -1085,11 +1085,9 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
        if (priv->lib->bt_params)
                iwlagn_bt_setup_deferred_work(priv);
 
-       setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
-                   (unsigned long)priv);
+       timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
 
-       setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
-                   (unsigned long)priv);
+       timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
 }
 
 void iwl_cancel_deferred_work(struct iwl_priv *priv)
index 5b73492e7ff71d9041ed12c613069f5925558821..6524533d723c5a48d8bf68bcb3fabf9daa4c9ab9 100644 (file)
@@ -164,9 +164,10 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
  * without doing anything, driver should continue the 5 seconds timer
  * to wake up uCode for temperature check until temperature drop below CT
  */
-static void iwl_tt_check_exit_ct_kill(unsigned long data)
+static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t,
+                                          thermal_throttle.ct_kill_exit_tm);
        struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
        unsigned long flags;
 
@@ -214,9 +215,10 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
        }
 }
 
-static void iwl_tt_ready_for_ct_kill(unsigned long data)
+static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t,
+                                          thermal_throttle.ct_kill_waiting_tm);
        struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -612,10 +614,10 @@ void iwl_tt_initialize(struct iwl_priv *priv)
        memset(tt, 0, sizeof(struct iwl_tt_mgmt));
 
        tt->state = IWL_TI_0;
-       setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
-                   iwl_tt_check_exit_ct_kill, (unsigned long)priv);
-       setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
-                   iwl_tt_ready_for_ct_kill, (unsigned long)priv);
+       timer_setup(&priv->thermal_throttle.ct_kill_exit_tm,
+                   iwl_tt_check_exit_ct_kill, 0);
+       timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm,
+                   iwl_tt_ready_for_ct_kill, 0);
        /* setup deferred ct kill work */
        INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
        INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
index 87b4434224a1f0a121455ef0a2d0877c4e14e42b..dfa111bb411e5b1421ae2060f37d600432d70bd9 100644 (file)
@@ -68,6 +68,9 @@
  * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
  * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
  * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
+ * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
+ *     monitor mode. Note this queue is the same as the queue for P2P device
+ *     but we can't have active monitor mode along with P2P device anyway.
  * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
  * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
  *     that we are never left without the possibility to connect to an AP.
@@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq {
        IWL_MVM_DQA_CMD_QUEUE = 0,
        IWL_MVM_DQA_AUX_QUEUE = 1,
        IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+       IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
        IWL_MVM_DQA_GCAST_QUEUE = 3,
        IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
        IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
index 9c889a32fe2424941d9bceb89b8cd1f593e4f3ab..223fb77a3aa9d64456244dd4c5156b8885cec6fd 100644 (file)
@@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
 
 static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
 {
-       iwl_fw_dbg_stop_recording(fwrt);
-
        fwrt->dump.conf = FW_DBG_INVALID;
 }
 
index ca0b5536a8a68e1a4dcc8b2d2de61e5dfb7af255..921cab9e2d737bf7fac47028339852dd60c201c6 100644 (file)
 #define FH_RSCSR_FRAME_INVALID         0x55550000
 #define FH_RSCSR_FRAME_ALIGN           0x40
 #define FH_RSCSR_RPA_EN                        BIT(25)
+#define FH_RSCSR_RADA_EN               BIT(26)
 #define FH_RSCSR_RXQ_POS               16
 #define FH_RSCSR_RXQ_MASK              0x3F0000
 
@@ -128,7 +129,8 @@ struct iwl_rx_packet {
         * 31:    flag flush RB request
         * 30:    flag ignore TC (terminal counter) request
         * 29:    flag fast IRQ request
-        * 28-26: Reserved
+        * 28-27: Reserved
+        * 26:    RADA enabled
         * 25:    Offload enabled
         * 24:    RPF enabled
         * 23:    RSS enabled
index a2bf530eeae49e38430d9f05ab33d865ed9fd560..2f22e14e00fe881bc9868a22c25ba41286a9ea51 100644 (file)
@@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
                                         u32 action)
 {
        struct iwl_mac_ctx_cmd cmd = {};
-       u32 tfd_queue_msk = 0;
+       u32 tfd_queue_msk = BIT(mvm->snif_queue);
        int ret;
 
        WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
index 4575595ab022600ff7d33da2d789c54f84a0e951..55ab5349dd40d8b886373df6cb0715e977650309 100644 (file)
@@ -972,6 +972,7 @@ struct iwl_mvm {
 
        /* Tx queues */
        u16 aux_queue;
+       u16 snif_queue;
        u16 probe_queue;
        u16 p2p_dev_queue;
 
@@ -1060,6 +1061,7 @@ struct iwl_mvm {
  * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
  * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
  * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
+ * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
  */
 enum iwl_mvm_status {
        IWL_MVM_STATUS_HW_RFKILL,
@@ -1071,6 +1073,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_ROC_AUX_RUNNING,
        IWL_MVM_STATUS_D3_RECONFIG,
        IWL_MVM_STATUS_FIRMWARE_RUNNING,
+       IWL_MVM_STATUS_NEED_FLUSH_P2P,
 };
 
 /* Keep track of completed init configuration */
index 7078b7e458be84d59e691e88bf791bce9ca9154d..45470b6b351a9a31734b3a77f04fcbd99a8526eb 100644 (file)
@@ -624,6 +624,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
 
        mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+       mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
        mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
        mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
 
index 76dc58381e1c9e443847d2f890b19eceb6b0140e..3b8d44361380de8d2a0a9635a915fe8151388ecf 100644 (file)
@@ -213,6 +213,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
                                        struct ieee80211_rx_status *rx_status)
 {
        int energy_a, energy_b, max_energy;
+       u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
 
        energy_a = desc->energy_a;
        energy_a = energy_a ? -energy_a : S8_MIN;
@@ -224,7 +225,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
                        energy_a, energy_b, max_energy);
 
        rx_status->signal = max_energy;
-       rx_status->chains = 0; /* TODO: phy info */
+       rx_status->chains =
+               (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
        rx_status->chain_signal[0] = energy_a;
        rx_status->chain_signal[1] = energy_b;
        rx_status->chain_signal[2] = S8_MIN;
@@ -232,8 +234,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
 
 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                             struct ieee80211_rx_status *stats,
-                            struct iwl_rx_mpdu_desc *desc, int queue,
-                            u8 *crypt_len)
+                            struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
+                            int queue, u8 *crypt_len)
 {
        u16 status = le16_to_cpu(desc->status);
 
@@ -253,6 +255,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                        return -1;
 
                stats->flag |= RX_FLAG_DECRYPTED;
+               if (pkt_flags & FH_RSCSR_RADA_EN)
+                       stats->flag |= RX_FLAG_MIC_STRIPPED;
                *crypt_len = IEEE80211_CCMP_HDR_LEN;
                return 0;
        case IWL_RX_MPDU_STATUS_SEC_TKIP:
@@ -270,6 +274,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
                                IWL_RX_MPDU_STATUS_SEC_WEP)
                        *crypt_len = IEEE80211_WEP_IV_LEN;
+
+               if (pkt_flags & FH_RSCSR_RADA_EN)
+                       stats->flag |= RX_FLAG_ICV_STRIPPED;
+
                return 0;
        case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
                if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
@@ -848,7 +856,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
        rx_status = IEEE80211_SKB_RXCB(skb);
 
-       if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
+       if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
+                             le32_to_cpu(pkt->len_n_flags), queue,
+                             &crypt_len)) {
                kfree_skb(skb);
                return;
        }
index c19f98489d4e9565e0ee33c0dfd22a757a435db4..1add5615fc3ad9d0a9801920448e79f71e64eda5 100644 (file)
@@ -1709,29 +1709,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
        sta->sta_id = IWL_MVM_INVALID_STA;
 }
 
-static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+                                         u8 sta_id, u8 fifo)
 {
        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
                                        mvm->cfg->base_params->wd_timeout :
                                        IWL_WATCHDOG_DISABLED;
 
        if (iwl_mvm_has_new_tx_api(mvm)) {
-               int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
-                                                   mvm->aux_sta.sta_id,
-                                                   IWL_MAX_TID_COUNT,
-                                                   wdg_timeout);
-               mvm->aux_queue = queue;
+               int tvqm_queue =
+                       iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
+                                               IWL_MAX_TID_COUNT,
+                                               wdg_timeout);
+               *queue = tvqm_queue;
        } else {
                struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = IWL_MVM_TX_FIFO_MCAST,
-                       .sta_id = mvm->aux_sta.sta_id,
+                       .fifo = fifo,
+                       .sta_id = sta_id,
                        .tid = IWL_MAX_TID_COUNT,
                        .aggregate = false,
                        .frame_limit = IWL_FRAME_LIMIT,
                };
 
-               iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
-                                  wdg_timeout);
+               iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
        }
 }
 
@@ -1750,7 +1750,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 
        /* Map Aux queue to fifo - needs to happen before adding Aux station */
        if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_queue(mvm);
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+                                             mvm->aux_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_MCAST);
 
        ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
                                         MAC_INDEX_AUX, 0);
@@ -1764,7 +1766,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
         * to firmware so enable queue here - after the station was added
         */
        if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_queue(mvm);
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+                                             mvm->aux_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_MCAST);
 
        return 0;
 }
@@ -1772,10 +1776,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
 
        lockdep_assert_held(&mvm->mutex);
-       return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
+
+       /* Map snif queue to fifo - must happen before adding snif station */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+                                             mvm->snif_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_BE);
+
+       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
                                         mvmvif->id, 0);
+       if (ret)
+               return ret;
+
+       /*
+        * For 22000 firmware and on we cannot add queue to a station unknown
+        * to firmware so enable queue here - after the station was added
+        */
+       if (iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+                                             mvm->snif_sta.sta_id,
+                                             IWL_MVM_TX_FIFO_BE);
+
+       return 0;
 }
 
 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1784,6 +1809,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
+       iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
+                           IWL_MAX_TID_COUNT, 0);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
index 4d0314912e94794b7728af17707618cd72ad2908..e25cda9fbf6c34d951b7441b40574bfb9c0a67b7 100644 (file)
@@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
         * executed, and a new time event means a new command.
         */
        iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
+
+       /* Do the same for the P2P device queue (STA) */
+       if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+               struct iwl_mvm_vif *mvmvif;
+
+               /*
+                * NB: access to this pointer would be racy, but the flush bit
+                * can only be set when we had a P2P-Device VIF, and we have a
+                * flush of this work in iwl_mvm_prepare_mac_removal() so it's
+                * not really racy.
+                */
+
+               if (!WARN_ON(!mvm->p2p_device_vif)) {
+                       mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+                       iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
+                                         CMD_ASYNC);
+               }
+       }
 }
 
 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
 
        mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
 
-       if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+       if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
-       else
+               set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+       } else {
                iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
+       }
 
        iwl_mvm_roc_finished(mvm);
 }
index 593b7f97b29c103f8faf28dd905dd36aed34e763..333bcb75b8afcd17eecd76c1a581c683edef4457 100644 (file)
@@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                        if (ap_sta_id != IWL_MVM_INVALID_STA)
                                sta_id = ap_sta_id;
                } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
-                       queue = mvm->aux_queue;
+                       queue = mvm->snif_queue;
+                       sta_id = mvm->snif_sta.sta_id;
                }
        }
 
index d46115e2d69e1d6de1fcc3126e3f1af8a59da99c..03ffd84786ca4f1b832fe78177ac0ef090d4dddd 100644 (file)
@@ -1134,9 +1134,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
        unsigned int default_timeout =
                cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
+               /*
+                * We can't know when the station is asleep or awake, so we
+                * must disable the queue hang detection.
+                */
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+                   vif && vif->type == NL80211_IFTYPE_AP)
+                       return IWL_WATCHDOG_DISABLED;
                return iwlmvm_mod_params.tfd_q_hang_detect ?
                        default_timeout : IWL_WATCHDOG_DISABLED;
+       }
 
        trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
        txq_timer = (void *)trigger->data;
@@ -1163,6 +1172,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
                return le32_to_cpu(txq_timer->p2p_go);
        case NL80211_IFTYPE_P2P_DEVICE:
                return le32_to_cpu(txq_timer->p2p_device);
+       case NL80211_IFTYPE_MONITOR:
+               return default_timeout;
        default:
                WARN_ON(1);
                return mvm->cfg->base_params->wd_timeout;
index f21fe59faccff835efe0db590c437f419a89ccb4..ccd7c33c4c2823e3374934a7e25da8cac82996a1 100644 (file)
@@ -553,6 +553,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
@@ -664,6 +665,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
        {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
        {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
 
 #endif /* CONFIG_IWLMVM */
 
index c59f4581e97271cc35d07bed98876d300384fa9c..ac05fd1e74c4c80308caf5b630ffd29a6968f888 100644 (file)
@@ -49,6 +49,7 @@
  *
  *****************************************************************************/
 #include "iwl-trans.h"
+#include "iwl-prph.h"
 #include "iwl-context-info.h"
 #include "internal.h"
 
@@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
 
        trans_pcie->is_down = true;
 
+       /* Stop dbgc before stopping device */
+       iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+       udelay(100);
+       iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+
        /* tell the device to stop sending interrupts */
        iwl_disable_interrupts(trans);
 
index b7a51603465b20752616639cd3f663a6844dcdd8..4541c86881d604e16093eef51cb7ba48afb3685b 100644 (file)
@@ -166,6 +166,7 @@ static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
                               4, buf, i, 0);
        }
+       goto out;
 
 err_read:
        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@ -1226,6 +1227,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 
        trans_pcie->is_down = true;
 
+       /* Stop dbgc before stopping device */
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       } else {
+               iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+               udelay(100);
+               iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+       }
+
        /* tell the device to stop sending interrupts */
        iwl_disable_interrupts(trans);
 
index b5c459cd70cecab890111b302f09b5e5a55c9e29..fed6d842a5e1dc444fe58ad203cb8fb5e3ab1c6f 100644 (file)
@@ -147,9 +147,9 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
        memset(ptr, 0, sizeof(*ptr));
 }
 
-static void iwl_pcie_txq_stuck_timer(unsigned long data)
+static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
 {
-       struct iwl_txq *txq = (void *)data;
+       struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
        struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
        struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
 
@@ -495,8 +495,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
        if (WARN_ON(txq->entries || txq->tfds))
                return -EINVAL;
 
-       setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
-                   (unsigned long)txq);
+       timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
        txq->trans_pcie = trans_pcie;
 
        txq->n_window = slots_num;
index 1a8d8db80b05405de0a2f1a7160c56cb2fd0fdc3..b4dfe1893d18445373e7b812931d376ebc17c65f 100644 (file)
@@ -185,9 +185,9 @@ static void hostap_event_expired_sta(struct net_device *dev,
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
 
-static void ap_handle_timer(unsigned long data)
+static void ap_handle_timer(struct timer_list *t)
 {
-       struct sta_info *sta = (struct sta_info *) data;
+       struct sta_info *sta = from_timer(sta, t, timer);
        local_info_t *local;
        struct ap_data *ap;
        unsigned long next_time = 0;
@@ -1189,10 +1189,8 @@ static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
        }
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-       init_timer(&sta->timer);
+       timer_setup(&sta->timer, ap_handle_timer, 0);
        sta->timer.expires = jiffies + ap->max_inactivity;
-       sta->timer.data = (unsigned long) sta;
-       sta->timer.function = ap_handle_timer;
        if (!ap->local->hostapd)
                add_timer(&sta->timer);
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
index 72b46eaf3de21ce1e6b6b43064f4488a61e2c4f5..5c4a17a18968bf0b0f0235b42fa577de6f9ed9d4 100644 (file)
@@ -2794,9 +2794,9 @@ static void prism2_check_sta_fw_version(local_info_t *local)
 }
 
 
-static void hostap_passive_scan(unsigned long data)
+static void hostap_passive_scan(struct timer_list *t)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = from_timer(local, t, passive_scan_timer);
        struct net_device *dev = local->dev;
        u16 chan;
 
@@ -2869,10 +2869,10 @@ static void handle_comms_qual_update(struct work_struct *work)
  * used to monitor that local->last_tick_timer is being updated. If not,
  * interrupt busy-loop is assumed and driver tries to recover by masking out
  * some events. */
-static void hostap_tick_timer(unsigned long data)
+static void hostap_tick_timer(struct timer_list *t)
 {
        static unsigned long last_inquire = 0;
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = from_timer(local, t, tick_timer);
        local->last_tick_timer = jiffies;
 
        /* Inquire CommTallies every 10 seconds to keep the statistics updated
@@ -3225,13 +3225,8 @@ while (0)
 
        lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
 
-       init_timer(&local->passive_scan_timer);
-       local->passive_scan_timer.data = (unsigned long) local;
-       local->passive_scan_timer.function = hostap_passive_scan;
-
-       init_timer(&local->tick_timer);
-       local->tick_timer.data = (unsigned long) local;
-       local->tick_timer.function = hostap_tick_timer;
+       timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0);
+       timer_setup(&local->tick_timer, hostap_tick_timer, 0);
        local->tick_timer.expires = jiffies + 2 * HZ;
        add_timer(&local->tick_timer);
 
index 501180584b4b197b84aa486a85cb1e8b993d0e80..94ad6fe29e69bdf199157e78023e15595e22daa9 100644 (file)
@@ -319,9 +319,9 @@ static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
        mod_timer(timer, expire);
 }
 
-static void ezusb_request_timerfn(u_long _ctx)
+static void ezusb_request_timerfn(struct timer_list *t)
 {
-       struct request_context *ctx = (void *) _ctx;
+       struct request_context *ctx = from_timer(ctx, t, timer);
 
        ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
        if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
@@ -365,7 +365,7 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
        refcount_set(&ctx->refcount, 1);
        init_completion(&ctx->done);
 
-       setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
+       timer_setup(&ctx->timer, ezusb_request_timerfn, 0);
        return ctx;
 }
 
index 10b075a46b266218c53d1e5674c1789e1e0f3d80..e8189c07b41f6b450f135ef703ec4e01568e311d 100644 (file)
@@ -684,6 +684,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                         IEEE80211_STYPE_NULLFUNC |
+                                        IEEE80211_FCTL_TODS |
                                         (ps ? IEEE80211_FCTL_PM : 0));
        hdr->duration_id = cpu_to_le16(0);
        memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -3215,7 +3216,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
                        continue;
 
-               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
                if (!skb) {
                        res = -ENOMEM;
                        goto out_err;
index 7d6dc76c930ad32265f00b00076cabe6a75bf3b6..6711e7fb69269c325a18328138c9e310d4a19cf4 100644 (file)
@@ -554,7 +554,7 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
                return -EFAULT;
        }
 
-       mac->scan_timeout.function = (TIMER_FUNC_TYPE)qtnf_scan_timeout;
+       mac->scan_timeout.function = qtnf_scan_timeout;
        mod_timer(&mac->scan_timeout,
                  jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
 
index 2d2c1ea65cb26440dc81efb2ae0e2c6851d8d6d6..3423dc51198b574cd863c72685c9d220a997f258 100644 (file)
@@ -288,7 +288,7 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
                mac->iflist[i].vifid = i;
                qtnf_sta_list_init(&mac->iflist[i].sta_list);
                mutex_init(&mac->mac_lock);
-               setup_timer(&mac->scan_timeout, NULL, 0);
+               timer_setup(&mac->scan_timeout, NULL, 0);
        }
 
        qtnf_mac_init_primary_intf(mac);
index d8afcdfca1ed6206dca11b8e86e8ba091b705659..0133fcd4601b2410b0d9b3b666f216b1860e3e37 100644 (file)
@@ -569,7 +569,7 @@ static int dl_startup_params(struct net_device *dev)
        local->card_status = CARD_DL_PARAM;
        /* Start kernel timer to wait for dl startup to complete. */
        local->timer.expires = jiffies + HZ / 2;
-       local->timer.function = (TIMER_FUNC_TYPE)verify_dl_startup;
+       local->timer.function = verify_dl_startup;
        add_timer(&local->timer);
        dev_dbg(&link->dev,
              "ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -1947,12 +1947,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
                                        dev_dbg(&link->dev,
                                              "ray_cs interrupt network \"%s\" start failed\n",
                                              memtmp);
-                                       local->timer.function = (TIMER_FUNC_TYPE)start_net;
+                                       local->timer.function = start_net;
                                } else {
                                        dev_dbg(&link->dev,
                                              "ray_cs interrupt network \"%s\" join failed\n",
                                              memtmp);
-                                       local->timer.function = (TIMER_FUNC_TYPE)join_net;
+                                       local->timer.function = join_net;
                                }
                                add_timer(&local->timer);
                        }
@@ -2417,9 +2417,9 @@ static void authenticate(ray_dev_t *local)
 
        del_timer(&local->timer);
        if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
-               local->timer.function = (TIMER_FUNC_TYPE)join_net;
+               local->timer.function = join_net;
        } else {
-               local->timer.function = (TIMER_FUNC_TYPE)authenticate_timeout;
+               local->timer.function = authenticate_timeout;
        }
        local->timer.expires = jiffies + HZ * 2;
        add_timer(&local->timer);
@@ -2502,7 +2502,7 @@ static void associate(ray_dev_t *local)
 
                del_timer(&local->timer);
                local->timer.expires = jiffies + HZ * 2;
-               local->timer.function = (TIMER_FUNC_TYPE)join_net;
+               local->timer.function = join_net;
                add_timer(&local->timer);
                local->card_status = CARD_ASSOC_FAILED;
                return;
index c346c021b99939f715c80250580d6f1775eb8953..d47921a845098815652a46f7fc16cdf8bccf3488 100644 (file)
@@ -196,9 +196,9 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static void wl1271_rx_streaming_timer(unsigned long data)
+static void wl1271_rx_streaming_timer(struct timer_list *t)
 {
-       struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+       struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
        struct wl1271 *wl = wlvif->wl;
        ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
 }
@@ -2279,8 +2279,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                          wlcore_pending_auth_complete_work);
        INIT_LIST_HEAD(&wlvif->list);
 
-       setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
-                   (unsigned long) wlvif);
+       timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
        return 0;
 }
 
index d6dff347f8962e04208306c0ffea90956470cddd..78ebe494fef02b8d31505262f8c551e27aee7dc5 100644 (file)
@@ -186,7 +186,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Obtain the queue to be used to transmit this packet */
        index = skb_get_queue_mapping(skb);
        if (index >= num_queues) {
-               pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
+               pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
                                    index, vif->dev->name);
                index %= num_queues;
        }
index 391432e2725db1c87c3bb79792fa7e149202e71f..c5a34671abdaf78a1257b869af817ecd148a6e0c 100644 (file)
@@ -230,9 +230,9 @@ static bool xennet_can_sg(struct net_device *dev)
 }
 
 
-static void rx_refill_timeout(unsigned long data)
+static void rx_refill_timeout(struct timer_list *t)
 {
-       struct netfront_queue *queue = (struct netfront_queue *)data;
+       struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
        napi_schedule(&queue->napi);
 }
 
@@ -1607,8 +1607,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
        spin_lock_init(&queue->tx_lock);
        spin_lock_init(&queue->rx_lock);
 
-       setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
-                   (unsigned long)queue);
+       timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
 
        snprintf(queue->name, sizeof(queue->name), "%s-q%u",
                 queue->info->netdev->name, queue->id);
index 7f8960a46aab0a6a494458f2c0a89cca1716e26d..52c8ae504e328ad6f44f05db2d041b5775a27d04 100644 (file)
@@ -130,9 +130,9 @@ static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error)
        nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error);
 }
 
-static void fw_dnld_timeout(unsigned long arg)
+static void fw_dnld_timeout(struct timer_list *t)
 {
-       struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg;
+       struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
 
        nfc_err(priv->dev, "FW loading timeout");
        priv->fw_dnld.state = STATE_RESET;
@@ -538,8 +538,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
        }
 
        /* Configure a timer for timeout */
-       setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout,
-                   (unsigned long) priv);
+       timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0);
        mod_timer(&priv->fw_dnld.timer,
                  jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT));
 
index c05cb637ba9234371b24d4f7ede54ff7b4a6f0c3..a0cc1cc452927b54e84d749d053fe99e63463ca2 100644 (file)
@@ -1232,9 +1232,9 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
        return 0;
 }
 
-static void pn533_listen_mode_timer(unsigned long data)
+static void pn533_listen_mode_timer(struct timer_list *t)
 {
-       struct pn533 *dev = (struct pn533 *)data;
+       struct pn533 *dev = from_timer(dev, t, listen_timer);
 
        dev_dbg(dev->dev, "Listen mode timeout\n");
 
@@ -2632,9 +2632,7 @@ struct pn533 *pn533_register_device(u32 device_type,
        if (priv->wq == NULL)
                goto error;
 
-       init_timer(&priv->listen_timer);
-       priv->listen_timer.data = (unsigned long) priv;
-       priv->listen_timer.function = pn533_listen_mode_timer;
+       timer_setup(&priv->listen_timer, pn533_listen_mode_timer, 0);
 
        skb_queue_head_init(&priv->resp_q);
        skb_queue_head_init(&priv->fragment_skb);
index 9477994cf97534fe38b6b1e70b603c932081de6e..f26d938d240f03dbb27889fc101d552511ea9d92 100644 (file)
@@ -246,18 +246,18 @@ void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ndlc_recv);
 
-static void ndlc_t1_timeout(unsigned long data)
+static void ndlc_t1_timeout(struct timer_list *t)
 {
-       struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+       struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
 
        pr_debug("\n");
 
        schedule_work(&ndlc->sm_work);
 }
 
-static void ndlc_t2_timeout(unsigned long data)
+static void ndlc_t2_timeout(struct timer_list *t)
 {
-       struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+       struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
 
        pr_debug("\n");
 
@@ -282,13 +282,8 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
        *ndlc_id = ndlc;
 
        /* initialize timers */
-       init_timer(&ndlc->t1_timer);
-       ndlc->t1_timer.data = (unsigned long)ndlc;
-       ndlc->t1_timer.function = ndlc_t1_timeout;
-
-       init_timer(&ndlc->t2_timer);
-       ndlc->t2_timer.data = (unsigned long)ndlc;
-       ndlc->t2_timer.function = ndlc_t2_timeout;
+       timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0);
+       timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0);
 
        skb_queue_head_init(&ndlc->rcv_q);
        skb_queue_head_init(&ndlc->send_q);
index 56f2112e0cd840d789ec23b8db8ba1f2a52103ae..f55d082ace71558c8bf23d1813d70da18c9c5a0d 100644 (file)
@@ -677,7 +677,7 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
 }
 EXPORT_SYMBOL(st_nci_se_io);
 
-static void st_nci_se_wt_timeout(unsigned long data)
+static void st_nci_se_wt_timeout(struct timer_list *t)
 {
        /*
         * No answer from the secure element
@@ -690,7 +690,7 @@ static void st_nci_se_wt_timeout(unsigned long data)
         */
        /* hardware reset managed through VCC_UICC_OUT power supply */
        u8 param = 0x01;
-       struct st_nci_info *info = (struct st_nci_info *) data;
+       struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
 
        pr_debug("\n");
 
@@ -708,9 +708,10 @@ static void st_nci_se_wt_timeout(unsigned long data)
        info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
 }
 
-static void st_nci_se_activation_timeout(unsigned long data)
+static void st_nci_se_activation_timeout(struct timer_list *t)
 {
-       struct st_nci_info *info = (struct st_nci_info *) data;
+       struct st_nci_info *info = from_timer(info, t,
+                                             se_info.se_active_timer);
 
        pr_debug("\n");
 
@@ -725,15 +726,11 @@ int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status)
 
        init_completion(&info->se_info.req_completion);
        /* initialize timers */
-       init_timer(&info->se_info.bwi_timer);
-       info->se_info.bwi_timer.data = (unsigned long)info;
-       info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
+       timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0);
        info->se_info.bwi_active = false;
 
-       init_timer(&info->se_info.se_active_timer);
-       info->se_info.se_active_timer.data = (unsigned long)info;
-       info->se_info.se_active_timer.function =
-                       st_nci_se_activation_timeout;
+       timer_setup(&info->se_info.se_active_timer,
+                   st_nci_se_activation_timeout, 0);
        info->se_info.se_active = false;
 
        info->se_info.xch_error = false;
index 3a98563d4a121ddc99223ca367ee44a51f8fd92b..4bed9e842db38126859d74d4d585dee66ea80d33 100644 (file)
@@ -252,7 +252,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
 }
 EXPORT_SYMBOL(st21nfca_hci_se_io);
 
-static void st21nfca_se_wt_timeout(unsigned long data)
+static void st21nfca_se_wt_timeout(struct timer_list *t)
 {
        /*
         * No answer from the secure element
@@ -265,7 +265,8 @@ static void st21nfca_se_wt_timeout(unsigned long data)
         */
        /* hardware reset managed through VCC_UICC_OUT power supply */
        u8 param = 0x01;
-       struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+       struct st21nfca_hci_info *info = from_timer(info, t,
+                                                   se_info.bwi_timer);
 
        pr_debug("\n");
 
@@ -283,9 +284,10 @@ static void st21nfca_se_wt_timeout(unsigned long data)
        info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
 }
 
-static void st21nfca_se_activation_timeout(unsigned long data)
+static void st21nfca_se_activation_timeout(struct timer_list *t)
 {
-       struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+       struct st21nfca_hci_info *info = from_timer(info, t,
+                                                   se_info.se_active_timer);
 
        pr_debug("\n");
 
@@ -392,14 +394,11 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
 
        init_completion(&info->se_info.req_completion);
        /* initialize timers */
-       init_timer(&info->se_info.bwi_timer);
-       info->se_info.bwi_timer.data = (unsigned long)info;
-       info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+       timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
        info->se_info.bwi_active = false;
 
-       init_timer(&info->se_info.se_active_timer);
-       info->se_info.se_active_timer.data = (unsigned long)info;
-       info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+       timer_setup(&info->se_info.se_active_timer,
+                   st21nfca_se_activation_timeout, 0);
        info->se_info.se_active = false;
 
        info->se_info.count_pipes = 0;
index 938a18bcfc3f85b5b1acd0d66b54f20dbaf96c78..3f5a92bae6f8e4ecea11a865cbee43b5473ac052 100644 (file)
@@ -107,9 +107,9 @@ struct pp_ctx {
 
 static struct dentry *pp_debugfs_dir;
 
-static void pp_ping(unsigned long ctx)
+static void pp_ping(struct timer_list *t)
 {
-       struct pp_ctx *pp = (void *)ctx;
+       struct pp_ctx *pp = from_timer(pp, t, db_timer);
        unsigned long irqflags;
        u64 db_bits, db_mask;
        u32 spad_rd, spad_wr;
@@ -153,7 +153,7 @@ static void pp_link_event(void *ctx)
 
        if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
                dev_dbg(&pp->ntb->dev, "link is up\n");
-               pp_ping((unsigned long)pp);
+               pp_ping(&pp->db_timer);
        } else {
                dev_dbg(&pp->ntb->dev, "link is down\n");
                del_timer(&pp->db_timer);
@@ -252,7 +252,7 @@ static int pp_probe(struct ntb_client *client,
        pp->db_bits = 0;
        atomic_set(&pp->count, 0);
        spin_lock_init(&pp->db_lock);
-       setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+       timer_setup(&pp->db_timer, pp_ping, 0);
        pp->db_delay = msecs_to_jiffies(delay_ms);
 
        rc = ntb_set_ctx(ntb, pp, &pp_ops);
index 25da74d310d1bbd5e7c62f9a35de94f6279fbc25..1e46e60b8f1080e339ebe81c1710dabb23afef75 100644 (file)
@@ -1287,7 +1287,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
        BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
                        NVME_DSM_MAX_RANGES);
 
-       queue->limits.discard_alignment = size;
+       queue->limits.discard_alignment = 0;
        queue->limits.discard_granularity = size;
 
        blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1449,19 +1449,19 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
        int srcu_idx, ret;
        u8 data[16] = { 0, };
 
+       ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+       if (unlikely(!ns))
+               return -EWOULDBLOCK;
+
        put_unaligned_le64(key, &data[0]);
        put_unaligned_le64(sa_key, &data[8]);
 
        memset(&c, 0, sizeof(c));
        c.common.opcode = op;
-       c.common.nsid = cpu_to_le32(head->ns_id);
+       c.common.nsid = cpu_to_le32(ns->head->ns_id);
        c.common.cdw10[0] = cpu_to_le32(cdw10);
 
-       ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
-       if (unlikely(!ns))
-               ret = -EWOULDBLOCK;
-       else
-               ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+       ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
        nvme_put_ns_from_disk(head, srcu_idx);
        return ret;
 }
@@ -1705,7 +1705,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
-       if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+           is_power_of_2(ctrl->max_hw_sectors))
                blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
        blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2869,7 +2870,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        nvme_set_queue_limits(ctrl, ns->queue);
-       nvme_setup_streams_ns(ctrl, ns);
 
        id = nvme_identify_ns(ctrl, nsid);
        if (!id)
@@ -2880,6 +2880,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        if (nvme_init_ns_head(ns, nsid, id, &new))
                goto out_free_id;
+       nvme_setup_streams_ns(ctrl, ns);
        
 #ifdef CONFIG_NVME_MULTIPATH
        /*
@@ -2961,14 +2962,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-       struct nvme_ns_head *head = ns->head;
-
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
        if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
                nvme_mpath_remove_disk_links(ns);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_id_attr_group);
@@ -2976,19 +2973,20 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                        nvme_nvm_unregister_sysfs(ns);
                del_gendisk(ns->disk);
                blk_cleanup_queue(ns->queue);
+               if (blk_get_integrity(ns->disk))
+                       blk_integrity_unregister(ns->disk);
        }
 
        mutex_lock(&ns->ctrl->subsys->lock);
        nvme_mpath_clear_current_path(ns);
-       if (head)
-               list_del_rcu(&ns->siblings);
+       list_del_rcu(&ns->siblings);
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
        mutex_unlock(&ns->ctrl->namespaces_mutex);
 
-       synchronize_srcu(&head->srcu);
+       synchronize_srcu(&ns->head->srcu);
        nvme_put_ns(ns);
 }
 
index 42232e731f19f71d31e5808af8513ed73bbf7ee8..9ba614953607eba072000fb10c2854ea1ee677d6 100644 (file)
@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 
+static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
+               struct request *rq)
+{
+       struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+       /*
+        * We cannot accept any other command until the connect command has
+        * completed, so only allow connect to pass.
+        */
+       if (!blk_rq_is_passthrough(rq) ||
+           cmd->common.opcode != nvme_fabrics_command ||
+           cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+               /*
+                * Reconnecting state means transport disruption, which can take
+                * a long time and even might fail permanently, fail fast to
+                * give upper layers a chance to failover.
+                * Deleting state means that the ctrl will never accept commands
+                * again, fail it permanently.
+                */
+               if (ctrl->state == NVME_CTRL_RECONNECTING ||
+                   ctrl->state == NVME_CTRL_DELETING) {
+                       nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+                       return BLK_STS_IOERR;
+               }
+               return BLK_STS_RESOURCE; /* try again later */
+       }
+
+       return BLK_STS_OK;
+}
+
 #endif /* _NVME_FABRICS_H */
index 7ab0be55c7d063b31f1a9525a2961308b0d8a274..794e66e4aa20115f4dc3a6b5fc12f706b2040bf4 100644 (file)
@@ -31,7 +31,8 @@
 
 
 enum nvme_fc_queue_flags {
-       NVME_FC_Q_CONNECTED = (1 << 0),
+       NVME_FC_Q_CONNECTED = 0,
+       NVME_FC_Q_LIVE,
 };
 
 #define NVMEFC_QUEUE_DELAY     3               /* ms units */
@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
        if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
                return;
 
+       clear_bit(NVME_FC_Q_LIVE, &queue->flags);
        /*
         * Current implementation never disconnects a single queue.
         * It always terminates a whole association. So there is never
@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
         */
 
        queue->connection_id = 0;
-       clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
 }
 
 static void
@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        break;
+
+               set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
        }
 
        return ret;
@@ -2320,6 +2323,14 @@ busy:
        return BLK_STS_RESOURCE;
 }
 
+static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
+               struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
+}
+
 static blk_status_t
 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        u32 data_len;
        blk_status_t ret;
 
+       ret = nvme_fc_is_ready(queue, rq);
+       if (unlikely(ret))
+               return ret;
+
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
                return ret;
@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_disconnect_admin_queue;
 
+       set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
        /*
         * Check controller capabilities
         *
@@ -3204,7 +3221,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
-               nvme_put_ctrl(&ctrl->ctrl);
 
                /* Remove core ctrl ref. */
                nvme_put_ctrl(&ctrl->ctrl);
index 78d92151a9042b6190514e074575821bda684e7f..1218a9fca8466b874e45f97d47b9c70234916ec7 100644 (file)
@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
                bio->bi_opf |= REQ_NVME_MPATH;
                ret = direct_make_request(bio);
        } else if (!list_empty_careful(&head->list)) {
-               dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+               dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
 
                spin_lock_irq(&head->requeue_lock);
                bio_list_add(&head->requeue_list, bio);
index c0873a68872fb188228f52ac44355761a82ab721..ea1aa5283e8ed9215537594a33a243d47b363e25 100644 (file)
@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
  * found empirically.
  */
-#define NVME_QUIRK_DELAY_AMOUNT                2000
+#define NVME_QUIRK_DELAY_AMOUNT                2300
 
 enum nvme_ctrl_state {
        NVME_CTRL_NEW,
index a11cfd470089226cffd01c9c6104afdc876c341a..f5800c3c9082a6f038c129327bccd22f5eb861fe 100644 (file)
@@ -1759,6 +1759,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
                        dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
                        dev->host_mem_descs, dev->host_mem_descs_dma);
        dev->host_mem_descs = NULL;
+       dev->nr_host_mem_descs = 0;
 }
 
 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
@@ -1787,7 +1788,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
        if (!bufs)
                goto out_free_descs;
 
-       for (size = 0; size < preferred; size += len) {
+       for (size = 0; size < preferred && i < max_entries; size += len) {
                dma_addr_t dma_addr;
 
                len = min_t(u64, chunk_size, preferred - size);
@@ -2428,7 +2429,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
        return -ENODEV;
 }
 
-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
 {
        if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
                /*
@@ -2443,6 +2444,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
                    (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
                     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
                        return NVME_QUIRK_NO_DEEPEST_PS;
+       } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+               /*
+                * Samsung SSD 960 EVO drops off the PCIe bus after system
+                * suspend on a Ryzen board, ASUS PRIME B350M-A.
+                */
+               if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+                   dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
+                       return NVME_QUIRK_NO_APST;
        }
 
        return 0;
@@ -2482,7 +2491,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto unmap;
 
-       quirks |= check_dell_samsung_bug(pdev);
+       quirks |= check_vendor_combination_bug(pdev);
 
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
                        quirks);
@@ -2665,6 +2674,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+       { PCI_DEVICE(0x1c58, 0x0023),   /* WDC SN200 adapter */
+               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
index 4f9bf2f815c399f3f7f39d5b6d485dbe75a2466f..37af56596be6ce8a0339ce2a3151f8dd98f8f854 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <rdma/mr_pool.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/atomic.h>
@@ -59,6 +60,9 @@ struct nvme_rdma_request {
        struct nvme_request     req;
        struct ib_mr            *mr;
        struct nvme_rdma_qe     sqe;
+       union nvme_result       result;
+       __le16                  status;
+       refcount_t              ref;
        struct ib_sge           sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
        u32                     num_sge;
        int                     nents;
@@ -73,11 +77,11 @@ struct nvme_rdma_request {
 enum nvme_rdma_queue_flags {
        NVME_RDMA_Q_ALLOCATED           = 0,
        NVME_RDMA_Q_LIVE                = 1,
+       NVME_RDMA_Q_TR_READY            = 2,
 };
 
 struct nvme_rdma_queue {
        struct nvme_rdma_qe     *rsp_ring;
-       atomic_t                sig_count;
        int                     queue_size;
        size_t                  cmnd_capsule_len;
        struct nvme_rdma_ctrl   *ctrl;
@@ -258,32 +262,6 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
        return ret;
 }
 
-static int nvme_rdma_reinit_request(void *data, struct request *rq)
-{
-       struct nvme_rdma_ctrl *ctrl = data;
-       struct nvme_rdma_device *dev = ctrl->device;
-       struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-       int ret = 0;
-
-       if (WARN_ON_ONCE(!req->mr))
-               return 0;
-
-       ib_dereg_mr(req->mr);
-
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               req->mr = NULL;
-               goto out;
-       }
-
-       req->mr->need_inval = false;
-
-out:
-       return ret;
-}
-
 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
                struct request *rq, unsigned int hctx_idx)
 {
@@ -293,9 +271,6 @@ static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
        struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
        struct nvme_rdma_device *dev = queue->device;
 
-       if (req->mr)
-               ib_dereg_mr(req->mr);
-
        nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
                        DMA_TO_DEVICE);
 }
@@ -317,21 +292,9 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
        if (ret)
                return ret;
 
-       req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
-                       ctrl->max_fr_pages);
-       if (IS_ERR(req->mr)) {
-               ret = PTR_ERR(req->mr);
-               goto out_free_qe;
-       }
-
        req->queue = queue;
 
        return 0;
-
-out_free_qe:
-       nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       return -ENOMEM;
 }
 
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -428,10 +391,23 @@ out_err:
 
 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 {
-       struct nvme_rdma_device *dev = queue->device;
-       struct ib_device *ibdev = dev->dev;
+       struct nvme_rdma_device *dev;
+       struct ib_device *ibdev;
 
-       rdma_destroy_qp(queue->cm_id);
+       if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
+               return;
+
+       dev = queue->device;
+       ibdev = dev->dev;
+
+       ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
+       /*
+        * The cm_id object might have been destroyed during RDMA connection
+        * establishment error flow to avoid getting other cma events, thus
+        * the destruction of the QP shouldn't use rdma_cm API.
+        */
+       ib_destroy_qp(queue->qp);
        ib_free_cq(queue->ib_cq);
 
        nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
@@ -440,6 +416,12 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
        nvme_rdma_dev_put(dev);
 }
 
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+{
+       return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+                    ibdev->attrs.max_fast_reg_page_list_len);
+}
+
 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
 {
        struct ib_device *ibdev;
@@ -482,8 +464,24 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
                goto out_destroy_qp;
        }
 
+       ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+                             queue->queue_size,
+                             IB_MR_TYPE_MEM_REG,
+                             nvme_rdma_get_max_fr_pages(ibdev));
+       if (ret) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "failed to initialize MR pool sized %d for QID %d\n",
+                       queue->queue_size, idx);
+               goto out_destroy_ring;
+       }
+
+       set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
+
        return 0;
 
+out_destroy_ring:
+       nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+                           sizeof(struct nvme_completion), DMA_FROM_DEVICE);
 out_destroy_qp:
        rdma_destroy_qp(queue->cm_id);
 out_destroy_ib_cq:
@@ -510,7 +508,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
                queue->cmnd_capsule_len = sizeof(struct nvme_command);
 
        queue->queue_size = queue_size;
-       atomic_set(&queue->sig_count, 0);
 
        queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
                        RDMA_PS_TCP, IB_QPT_RC);
@@ -546,6 +543,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
 
 out_destroy_cm_id:
        rdma_destroy_id(queue->cm_id);
+       nvme_rdma_destroy_queue_ib(queue);
        return ret;
 }
 
@@ -756,8 +754,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        ctrl->device = ctrl->queues[0].device;
 
-       ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
-               ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+       ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
@@ -771,10 +768,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
                        error = PTR_ERR(ctrl->ctrl.admin_q);
                        goto out_free_tagset;
                }
-       } else {
-               error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
-               if (error)
-                       goto out_free_queue;
        }
 
        error = nvme_rdma_start_queue(ctrl, 0);
@@ -854,10 +847,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
                        goto out_free_tag_set;
                }
        } else {
-               ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
-               if (ret)
-                       goto out_free_io_queues;
-
                blk_mq_update_nr_hw_queues(&ctrl->tag_set,
                        ctrl->ctrl.queue_count - 1);
        }
@@ -1018,8 +1007,18 @@ static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
 
 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-       if (unlikely(wc->status != IB_WC_SUCCESS))
+       struct nvme_rdma_request *req =
+               container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
+       struct request *rq = blk_mq_rq_from_pdu(req);
+
+       if (unlikely(wc->status != IB_WC_SUCCESS)) {
                nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+               return;
+       }
+
+       if (refcount_dec_and_test(&req->ref))
+               nvme_end_request(rq, req->status, req->result);
+
 }
 
 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1030,7 +1029,7 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
                .opcode             = IB_WR_LOCAL_INV,
                .next               = NULL,
                .num_sge            = 0,
-               .send_flags         = 0,
+               .send_flags         = IB_SEND_SIGNALED,
                .ex.invalidate_rkey = req->mr->rkey,
        };
 
@@ -1044,22 +1043,15 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
-       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
        struct nvme_rdma_device *dev = queue->device;
        struct ib_device *ibdev = dev->dev;
-       int res;
 
        if (!blk_rq_bytes(rq))
                return;
 
-       if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
-               res = nvme_rdma_inv_rkey(queue, req);
-               if (unlikely(res < 0)) {
-                       dev_err(ctrl->ctrl.device,
-                               "Queueing INV WR for rkey %#x failed (%d)\n",
-                               req->mr->rkey, res);
-                       nvme_rdma_error_recovery(queue->ctrl);
-               }
+       if (req->mr) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
        }
 
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
@@ -1118,12 +1110,18 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
        int nr;
 
+       req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+       if (WARN_ON_ONCE(!req->mr))
+               return -EAGAIN;
+
        /*
         * Align the MR to a 4K page size to match the ctrl page size and
         * the block virtual boundary.
         */
        nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
        if (unlikely(nr < count)) {
+               ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+               req->mr = NULL;
                if (nr < 0)
                        return nr;
                return -EINVAL;
@@ -1142,8 +1140,6 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
                             IB_ACCESS_REMOTE_READ |
                             IB_ACCESS_REMOTE_WRITE;
 
-       req->mr->need_inval = true;
-
        sg->addr = cpu_to_le64(req->mr->iova);
        put_unaligned_le24(req->mr->length, sg->length);
        put_unaligned_le32(req->mr->rkey, sg->key);
@@ -1163,7 +1159,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        req->num_sge = 1;
        req->inline_data = false;
-       req->mr->need_inval = false;
+       refcount_set(&req->ref, 2); /* send and recv completions */
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
@@ -1200,25 +1196,24 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
-       if (unlikely(wc->status != IB_WC_SUCCESS))
-               nvme_rdma_wr_error(cq, wc, "SEND");
-}
+       struct nvme_rdma_qe *qe =
+               container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+       struct nvme_rdma_request *req =
+               container_of(qe, struct nvme_rdma_request, sqe);
+       struct request *rq = blk_mq_rq_from_pdu(req);
 
-/*
- * We want to signal completion at least every queue depth/2.  This returns the
- * largest power of two that is not above half of (queue size + 1) to optimize
- * (avoid divisions).
- */
-static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
-{
-       int limit = 1 << ilog2((queue->queue_size + 1) / 2);
+       if (unlikely(wc->status != IB_WC_SUCCESS)) {
+               nvme_rdma_wr_error(cq, wc, "SEND");
+               return;
+       }
 
-       return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
+       if (refcount_dec_and_test(&req->ref))
+               nvme_end_request(rq, req->status, req->result);
 }
 
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
                struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
-               struct ib_send_wr *first, bool flush)
+               struct ib_send_wr *first)
 {
        struct ib_send_wr wr, *bad_wr;
        int ret;
@@ -1227,31 +1222,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
        sge->length = sizeof(struct nvme_command),
        sge->lkey   = queue->device->pd->local_dma_lkey;
 
-       qe->cqe.done = nvme_rdma_send_done;
-
        wr.next       = NULL;
        wr.wr_cqe     = &qe->cqe;
        wr.sg_list    = sge;
        wr.num_sge    = num_sge;
        wr.opcode     = IB_WR_SEND;
-       wr.send_flags = 0;
-
-       /*
-        * Unsignalled send completions are another giant desaster in the
-        * IB Verbs spec:  If we don't regularly post signalled sends
-        * the send queue will fill up and only a QP reset will rescue us.
-        * Would have been way to obvious to handle this in hardware or
-        * at least the RDMA stack..
-        *
-        * Always signal the flushes. The magic request used for the flush
-        * sequencer is not allocated in our driver's tagset and it's
-        * triggered to be freed by blk_cleanup_queue(). So we need to
-        * always mark it as signaled to ensure that the "wr_cqe", which is
-        * embedded in request's payload, is not freed when __ib_process_cq()
-        * calls wr_cqe->done().
-        */
-       if (nvme_rdma_queue_sig_limit(queue) || flush)
-               wr.send_flags |= IB_SEND_SIGNALED;
+       wr.send_flags = IB_SEND_SIGNALED;
 
        if (first)
                first->next = &wr;
@@ -1301,6 +1277,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
        return queue->ctrl->tag_set.tags[queue_idx - 1];
 }
 
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       if (unlikely(wc->status != IB_WC_SUCCESS))
+               nvme_rdma_wr_error(cq, wc, "ASYNC");
+}
+
 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 {
        struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
@@ -1319,10 +1301,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
        cmd->common.flags |= NVME_CMD_SGL_METABUF;
        nvme_rdma_set_sg_null(cmd);
 
+       sqe->cqe.done = nvme_rdma_async_done;
+
        ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
                        DMA_TO_DEVICE);
 
-       ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+       ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
        WARN_ON_ONCE(ret);
 }
 
@@ -1343,14 +1327,34 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
        }
        req = blk_mq_rq_to_pdu(rq);
 
-       if (rq->tag == tag)
-               ret = 1;
+       req->status = cqe->status;
+       req->result = cqe->result;
 
-       if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
-           wc->ex.invalidate_rkey == req->mr->rkey)
-               req->mr->need_inval = false;
+       if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+               if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+                       dev_err(queue->ctrl->ctrl.device,
+                               "Bogus remote invalidation for rkey %#x\n",
+                               req->mr->rkey);
+                       nvme_rdma_error_recovery(queue->ctrl);
+               }
+       } else if (req->mr) {
+               ret = nvme_rdma_inv_rkey(queue, req);
+               if (unlikely(ret < 0)) {
+                       dev_err(queue->ctrl->ctrl.device,
+                               "Queueing INV WR for rkey %#x failed (%d)\n",
+                               req->mr->rkey, ret);
+                       nvme_rdma_error_recovery(queue->ctrl);
+               }
+               /* the local invalidation completion will end the request */
+               return 0;
+       }
+
+       if (refcount_dec_and_test(&req->ref)) {
+               if (rq->tag == tag)
+                       ret = 1;
+               nvme_end_request(rq, req->status, req->result);
+       }
 
-       nvme_end_request(rq, cqe->status, cqe->result);
        return ret;
 }
 
@@ -1591,31 +1595,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
  * We cannot accept any other command until the Connect command has completed.
  */
 static inline blk_status_t
-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
-       if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
-               struct nvme_command *cmd = nvme_req(rq)->cmd;
-
-               if (!blk_rq_is_passthrough(rq) ||
-                   cmd->common.opcode != nvme_fabrics_command ||
-                   cmd->fabrics.fctype != nvme_fabrics_type_connect) {
-                       /*
-                        * reconnecting state means transport disruption, which
-                        * can take a long time and even might fail permanently,
-                        * fail fast to give upper layers a chance to failover.
-                        * deleting state means that the ctrl will never accept
-                        * commands again, fail it permanently.
-                        */
-                       if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
-                           queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
-                               nvme_req(rq)->status = NVME_SC_ABORT_REQ;
-                               return BLK_STS_IOERR;
-                       }
-                       return BLK_STS_RESOURCE; /* try again later */
-               }
-       }
-
-       return 0;
+nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1627,14 +1611,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_qe *sqe = &req->sqe;
        struct nvme_command *c = sqe->data;
-       bool flush = false;
        struct ib_device *dev;
        blk_status_t ret;
        int err;
 
        WARN_ON_ONCE(rq->tag < 0);
 
-       ret = nvme_rdma_queue_is_ready(queue, rq);
+       ret = nvme_rdma_is_ready(queue, rq);
        if (unlikely(ret))
                return ret;
 
@@ -1656,13 +1639,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
                goto err;
        }
 
+       sqe->cqe.done = nvme_rdma_send_done;
+
        ib_dma_sync_single_for_device(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-       if (req_op(rq) == REQ_OP_FLUSH)
-               flush = true;
        err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-                       req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+                       req->mr ? &req->reg_wr.wr : NULL);
        if (unlikely(err)) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
@@ -1810,7 +1793,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .submit_async_event     = nvme_rdma_submit_async_event,
        .delete_ctrl            = nvme_rdma_delete_ctrl,
        .get_address            = nvmf_get_address,
-       .reinit_request         = nvme_rdma_reinit_request,
 };
 
 static inline bool
index 664d3013f68f3484980da8cf1c9a93f0b49f4265..5fd86039e35362b01e6f1615c4fb835c2e019728 100644 (file)
@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 
        tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
 
+       /* release the queue lookup reference on the completed IO */
+       nvmet_fc_tgt_q_put(queue);
+
        spin_lock_irqsave(&queue->qlock, flags);
        deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
                                struct nvmet_fc_defer_fcp_req, req_list);
        if (!deferfcp) {
                list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
                spin_unlock_irqrestore(&queue->qlock, flags);
-
-               /* Release reference taken at queue lookup and fod allocation */
-               nvmet_fc_tgt_q_put(queue);
                return;
        }
 
@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
                tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
                                deferfcp->fcp_req);
 
+               /* release the queue lookup reference */
+               nvmet_fc_tgt_q_put(queue);
+
                kfree(deferfcp);
 
                spin_lock_irqsave(&queue->qlock, flags);
index 96d390416789b41e2a6259a52851c3267cc1d4d9..1e21b286f299834298fb9d23f06f9f20e0797157 100644 (file)
@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
        return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
 }
 
+enum nvme_loop_queue_flags {
+       NVME_LOOP_Q_LIVE        = 0,
+};
+
 struct nvme_loop_queue {
        struct nvmet_cq         nvme_cq;
        struct nvmet_sq         nvme_sq;
        struct nvme_loop_ctrl   *ctrl;
+       unsigned long           flags;
 };
 
 static struct nvmet_port *nvmet_loop_port;
@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
        return BLK_EH_HANDLED;
 }
 
+static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
+               struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
+               return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+       return BLK_STS_OK;
+}
+
 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
        blk_status_t ret;
 
+       ret = nvme_loop_is_ready(queue, req);
+       if (unlikely(ret))
+               return ret;
+
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
        if (ret)
                return ret;
@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
+       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
 {
        int i;
 
-       for (i = 1; i < ctrl->ctrl.queue_count; i++)
+       for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+               clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+       }
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        return ret;
+               set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
        }
 
        return 0;
@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        if (error)
                goto out_cleanup_queue;
 
+       set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
        error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
        if (error) {
                dev_err(ctrl->ctrl.device,
index c454941b34ec8aa7d09e83e2f72328110728349a..ab988d88704da0d2d583a9280dba31d671a5fa14 100644 (file)
@@ -695,7 +695,7 @@ int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert)
 /*
  * Returns 0 on success, a negative error value in case of an error.
  *
- * If multiple changset entry notification errors occur then only the
+ * If multiple changeset entry notification errors occur then only the
  * final notification error is reported.
  */
 int __of_changeset_apply_notify(struct of_changeset *ocs)
@@ -795,7 +795,7 @@ int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply)
 }
 
 /*
- * If multiple changset entry notification errors occur then only the
+ * If multiple changeset entry notification errors occur then only the
  * final notification error is reported.
  */
 int __of_changeset_revert_notify(struct of_changeset *ocs)
index 98258583abb0b40529056767c91401296e0013d4..3481e69738b5f94d18cc393f77934fac7cfc7fdc 100644 (file)
@@ -81,6 +81,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
         * can be looked up later */
        of_node_get(child);
        phy->mdio.dev.of_node = child;
+       phy->mdio.dev.fwnode = of_fwnode_handle(child);
 
        /* All data is now stored in the phy struct;
         * register it */
@@ -111,6 +112,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
         */
        of_node_get(child);
        mdiodev->dev.of_node = child;
+       mdiodev->dev.fwnode = of_fwnode_handle(child);
 
        /* All data is now stored in the mdiodev struct; register it. */
        rc = mdio_device_register(mdiodev);
@@ -206,6 +208,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        mdio->phy_mask = ~0;
 
        mdio->dev.of_node = np;
+       mdio->dev.fwnode = of_fwnode_handle(np);
 
        /* Get bus level PHY reset GPIO details */
        mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
index c150abb9049d776d48c4ef5d38b821dcea09cef7..3981b7da4fa90e6d9d0b62c8d170ceb9589b7e3c 100644 (file)
@@ -522,7 +522,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
        struct device_node *node, *overlay_node;
        struct fragment *fragment;
        struct fragment *fragments;
-       int cnt, ret;
+       int cnt, id, ret;
 
        /*
         * Warn for some issues.  Can not return -EINVAL for these until
@@ -543,9 +543,9 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
 
        of_changeset_init(&ovcs->cset);
 
-       ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
-       if (ovcs->id <= 0)
-               return ovcs->id;
+       id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
+       if (id <= 0)
+               return id;
 
        cnt = 0;
 
@@ -572,18 +572,20 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
 
        cnt = 0;
        for_each_child_of_node(tree, node) {
+               overlay_node = of_get_child_by_name(node, "__overlay__");
+               if (!overlay_node)
+                       continue;
+
                fragment = &fragments[cnt];
-               fragment->overlay = of_get_child_by_name(node, "__overlay__");
-               if (fragment->overlay) {
-                       fragment->target = find_target_node(node);
-                       if (!fragment->target) {
-                               of_node_put(fragment->overlay);
-                               ret = -EINVAL;
-                               goto err_free_fragments;
-                       } else {
-                               cnt++;
-                       }
+               fragment->overlay = overlay_node;
+               fragment->target = find_target_node(node);
+               if (!fragment->target) {
+                       of_node_put(fragment->overlay);
+                       ret = -EINVAL;
+                       goto err_free_fragments;
                }
+
+               cnt++;
        }
 
        /*
@@ -611,6 +613,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
                goto err_free_fragments;
        }
 
+       ovcs->id = id;
        ovcs->count = cnt;
        ovcs->fragments = fragments;
 
@@ -619,7 +622,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
 err_free_fragments:
        kfree(fragments);
 err_free_idr:
-       idr_remove(&ovcs_idr, ovcs->id);
+       idr_remove(&ovcs_idr, id);
 
        pr_err("%s() failed, ret = %d\n", __func__, ret);
 
@@ -630,9 +633,8 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
 {
        int i;
 
-       if (!ovcs->cset.entries.next)
-               return;
-       of_changeset_destroy(&ovcs->cset);
+       if (ovcs->cset.entries.next)
+               of_changeset_destroy(&ovcs->cset);
 
        if (ovcs->id)
                idr_remove(&ovcs_idr, ovcs->id);
@@ -660,14 +662,14 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
  * A non-zero return value will not have created the changeset if error is from:
  *   - parameter checks
  *   - building the changeset
- *   - overlay changset pre-apply notifier
+ *   - overlay changeset pre-apply notifier
  *
  * If an error is returned by an overlay changeset pre-apply notifier
  * then no further overlay changeset pre-apply notifier will be called.
  *
  * A non-zero return value will have created the changeset if error is from:
  *   - overlay changeset entry notifier
- *   - overlay changset post-apply notifier
+ *   - overlay changeset post-apply notifier
  *
  * If an error is returned by an overlay changeset post-apply notifier
  * then no further overlay changeset post-apply notifier will be called.
@@ -706,12 +708,11 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
        }
 
        of_overlay_mutex_lock();
+       mutex_lock(&of_mutex);
 
        ret = of_resolve_phandles(tree);
        if (ret)
-               goto err_overlay_unlock;
-
-       mutex_lock(&of_mutex);
+               goto err_free_overlay_changeset;
 
        ret = init_overlay_changeset(ovcs, tree);
        if (ret)
@@ -736,14 +737,13 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
                        devicetree_state_flags |= DTSF_APPLY_FAIL;
                }
                goto err_free_overlay_changeset;
-       } else {
-               ret = __of_changeset_apply_notify(&ovcs->cset);
-               if (ret)
-                       pr_err("overlay changeset entry notify error %d\n",
-                              ret);
-               /* fall through */
        }
 
+       ret = __of_changeset_apply_notify(&ovcs->cset);
+       if (ret)
+               pr_err("overlay changeset entry notify error %d\n", ret);
+       /* notify failure is not fatal, continue */
+
        list_add_tail(&ovcs->ovcs_list, &ovcs_list);
        *ovcs_id = ovcs->id;
 
@@ -755,18 +755,14 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
                        ret = ret_tmp;
        }
 
-       mutex_unlock(&of_mutex);
-       of_overlay_mutex_unlock();
-
-       goto out;
-
-err_overlay_unlock:
-       of_overlay_mutex_unlock();
+       goto out_unlock;
 
 err_free_overlay_changeset:
        free_overlay_changeset(ovcs);
 
+out_unlock:
        mutex_unlock(&of_mutex);
+       of_overlay_mutex_unlock();
 
 out:
        pr_debug("%s() err=%d\n", __func__, ret);
@@ -871,7 +867,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
  *
  * A non-zero return value will not revert the changeset if error is from:
  *   - parameter checks
- *   - overlay changset pre-remove notifier
+ *   - overlay changeset pre-remove notifier
  *   - overlay changeset entry revert
  *
  * If an error is returned by an overlay changeset pre-remove notifier
@@ -882,7 +878,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
  *
  * A non-zero return value will revert the changeset if error is from:
  *   - overlay changeset entry notifier
- *   - overlay changset post-remove notifier
+ *   - overlay changeset post-remove notifier
  *
  * If an error is returned by an overlay changeset post-remove notifier
  * then no further overlay changeset post-remove notifier will be called.
@@ -931,15 +927,13 @@ int of_overlay_remove(int *ovcs_id)
                if (ret_apply)
                        devicetree_state_flags |= DTSF_REVERT_FAIL;
                goto out_unlock;
-       } else {
-               ret = __of_changeset_revert_notify(&ovcs->cset);
-               if (ret) {
-                       pr_err("overlay changeset entry notify error %d\n",
-                              ret);
-                       /* fall through - changeset was reverted */
-               }
        }
 
+       ret = __of_changeset_revert_notify(&ovcs->cset);
+       if (ret)
+               pr_err("overlay changeset entry notify error %d\n", ret);
+       /* notify failure is not fatal, continue */
+
        *ovcs_id = 0;
 
        ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE);
index e568b1e82501b832f4f92cb30810749d914855da..0f8052f1355c0c09ac99a43335ab67927cdac52b 100644 (file)
@@ -2165,7 +2165,6 @@ static int __init overlay_data_add(int onum)
        ret = of_overlay_apply(info->np_overlay, &info->overlay_id);
        if (ret < 0) {
                pr_err("of_overlay_apply() (ret=%d), %d\n", ret, onum);
-               of_overlay_mutex_unlock();
                goto out_free_np_overlay;
        }
 
index a25fed52f7e94de4bd3dd5cb8b0922e1df8e81bf..41b740aed3a346e4bbc610959281649447f83bd4 100644 (file)
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
        iounmap(base_addr);
 }
 
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1292)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+       quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1291)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+       quirk_diva_aux_disable);
index 90944667cceada37ea3e9176c9ac37f5413bc649..bda151788f3f723198be6c3401393f3876f5babf 100644 (file)
@@ -80,15 +80,6 @@ config XEN_PCIDEV_FRONTEND
           The PCI device frontend driver allows the kernel to import arbitrary
           PCI devices from a PCI backend to support PCI driver domains.
 
-config HT_IRQ
-       bool "Interrupts on hypertransport devices"
-       default y
-       depends on PCI && X86_LOCAL_APIC
-       help
-          This allows native hypertransport devices to use interrupts.
-
-          If unsure say Y.
-
 config PCI_ATS
        bool
 
index 3d5e047f0a3284e1859fcd14dfd7936c2a6424b7..c7819b973df7e4bbd07cd49e3bf29eea92eafa07 100644 (file)
@@ -21,9 +21,6 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
 # Build the PCI MSI interrupt support
 obj-$(CONFIG_PCI_MSI) += msi.o
 
-# Build the Hypertransport interrupt support
-obj-$(CONFIG_HT_IRQ) += htirq.o
-
 obj-$(CONFIG_PCI_ATS) += ats.o
 obj-$(CONFIG_PCI_IOV) += iov.o
 
index 12796eccb2befd9170b1ba8119dc600ef356c7e8..52ab3cb0a0bfe065d8209201cbf03a7ef15cdfa4 100644 (file)
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
        err = rcar_pcie_get_resources(pcie);
        if (err < 0) {
                dev_err(dev, "failed to request resources: %d\n", err);
-               goto err_free_bridge;
+               goto err_free_resource_list;
        }
 
        err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
        if (err)
-               goto err_free_bridge;
+               goto err_free_resource_list;
 
        pm_runtime_enable(dev);
        err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
 err_pm_disable:
        pm_runtime_disable(dev);
 
-err_free_bridge:
-       pci_free_host_bridge(bridge);
+err_free_resource_list:
        pci_free_resource_list(&pcie->resources);
+       pci_free_host_bridge(bridge);
 
        return err;
 }
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
deleted file mode 100644 (file)
index bb88c26..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * File:       htirq.c
- * Purpose:    Hypertransport Interrupt Capability
- *
- * Copyright (C) 2006 Linux Networx
- * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/htirq.h>
-
-/* Global ht irq lock.
- *
- * This is needed to serialize access to the data port in hypertransport
- * irq capability.
- *
- * With multiple simultaneous hypertransport irq devices it might pay
- * to make this more fine grained.  But start with simple, stupid, and correct.
- */
-static DEFINE_SPINLOCK(ht_irq_lock);
-
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
-       struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-       unsigned long flags;
-
-       spin_lock_irqsave(&ht_irq_lock, flags);
-       if (cfg->msg.address_lo != msg->address_lo) {
-               pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
-               pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
-       }
-       if (cfg->msg.address_hi != msg->address_hi) {
-               pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
-               pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
-       }
-       if (cfg->update)
-               cfg->update(cfg->dev, irq, msg);
-       spin_unlock_irqrestore(&ht_irq_lock, flags);
-       cfg->msg = *msg;
-}
-
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
-       struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-
-       *msg = cfg->msg;
-}
-
-void mask_ht_irq(struct irq_data *data)
-{
-       struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
-       struct ht_irq_msg msg = cfg->msg;
-
-       msg.address_lo |= 1;
-       write_ht_irq_msg(data->irq, &msg);
-}
-
-void unmask_ht_irq(struct irq_data *data)
-{
-       struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
-       struct ht_irq_msg msg = cfg->msg;
-
-       msg.address_lo &= ~1;
-       write_ht_irq_msg(data->irq, &msg);
-}
-
-/**
- * __ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- * @update: Function to be called when changing the htirq message
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
-{
-       int max_irq, pos, irq;
-       unsigned long flags;
-       u32 data;
-
-       pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
-       if (!pos)
-               return -EINVAL;
-
-       /* Verify the idx I want to use is in range */
-       spin_lock_irqsave(&ht_irq_lock, flags);
-       pci_write_config_byte(dev, pos + 2, 1);
-       pci_read_config_dword(dev, pos + 4, &data);
-       spin_unlock_irqrestore(&ht_irq_lock, flags);
-
-       max_irq = (data >> 16) & 0xff;
-       if (idx > max_irq)
-               return -EINVAL;
-
-       irq = arch_setup_ht_irq(idx, pos, dev, update);
-       if (irq > 0)
-               dev_dbg(&dev->dev, "irq %d for HT\n", irq);
-
-       return irq;
-}
-EXPORT_SYMBOL(__ht_create_irq);
-
-/**
- * ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- *
- * ht_create_irq needs to be called for all hypertransport devices
- * that generate irqs.
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int ht_create_irq(struct pci_dev *dev, int idx)
-{
-       return __ht_create_irq(dev, idx, NULL);
-}
-EXPORT_SYMBOL(ht_create_irq);
-
-/**
- * ht_destroy_irq - destroy an irq created with ht_create_irq
- * @irq: irq to be destroyed
- *
- * This reverses ht_create_irq removing the specified irq from
- * existence.  The irq should be free before this happens.
- */
-void ht_destroy_irq(unsigned int irq)
-{
-       arch_teardown_ht_irq(irq);
-}
-EXPORT_SYMBOL(ht_destroy_irq);
index 7f47bb72bf301cd62cdcaa48e2e560e2df06e3a3..14fd865a512096393149fd63a3707305648f276f 100644 (file)
@@ -999,7 +999,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
         * the subsequent "thaw" callbacks for the device.
         */
        if (dev_pm_smart_suspend_and_suspended(dev)) {
-               dev->power.direct_complete = true;
+               dev_pm_skip_next_resume_phases(dev);
                return 0;
        }
 
@@ -1012,7 +1012,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_resume_early(dev);
 
-       pci_update_current_state(pci_dev, PCI_D0);
+       /*
+        * pci_restore_state() requires the device to be in D0 (because of MSI
+        * restoration among other things), so force it into D0 in case the
+        * driver's "freeze" callbacks put it into a low-power state directly.
+        */
+       pci_set_power_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
 
        if (drv && drv->pm && drv->pm->thaw_noirq)
index 4500880240f25745f410538fb1a53d42a5210cce..6572550cfe784104179f1fb946ee41e2a312b2ca 100644 (file)
@@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 };
 static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 };
 static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 };
 static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 };
-static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 };
+static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 };
 static const unsigned int dnv_emmc_pins[] = {
        142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
 };
index d45af31b86b41ef6c85591a040fe2dd46c60de3e..bdb8d174efefb7ee897c60bab250a126a53af729 100644 (file)
@@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
 {
        struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
        unsigned int reg = OUTPUT_EN;
-       unsigned int mask;
+       unsigned int mask, val, ret;
 
        armada_37xx_update_reg(&reg, offset);
        mask = BIT(offset);
 
-       return regmap_update_bits(info->regmap, reg, mask, mask);
+       ret = regmap_update_bits(info->regmap, reg, mask, mask);
+
+       if (ret)
+               return ret;
+
+       reg = OUTPUT_VAL;
+       val = value ? mask : 0;
+       regmap_update_bits(info->regmap, reg, mask, val);
+
+       return 0;
 }
 
 static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
index e9b83e291edf43e02e757625ffb4f8d251f6227e..c11b8f14d841e91268eb404b22bb017b83e05e8a 100644 (file)
@@ -2322,7 +2322,7 @@ static const struct gemini_pin_conf *gemini_get_pin_conf(struct gemini_pmx *pmx,
        int i;
 
        for (i = 0; i < pmx->nconfs; i++) {
-               retconf = &gemini_confs_3516[i];
+               retconf = &pmx->confs[i];
                if (retconf->pin == pin)
                        return retconf;
        }
index 4f2a726bbaeb234c1caf7454db0058da903305b9..f5f77432ce6f830677d09887a16b34e0af01b2a4 100644 (file)
@@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x2, "mmc0"),          /* D3 */
-                 SUNXI_FUNCTION(0x4, "uart0")),        /* RX */
+                 SUNXI_FUNCTION(0x3, "uart0")),        /* RX */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
index 97b48336f84a3fdd6a30e98a19e954d625d3cfee..a78d7b922ef47529ce0708201ccd7b60a876959f 100644 (file)
@@ -535,14 +535,16 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data_broken = {
        .pins = sun50i_h5_pins,
        .npins = ARRAY_SIZE(sun50i_h5_pins),
        .irq_banks = 2,
-       .irq_read_needs_mux = true
+       .irq_read_needs_mux = true,
+       .disable_strict_mode = true,
 };
 
 static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
        .pins = sun50i_h5_pins,
        .npins = ARRAY_SIZE(sun50i_h5_pins),
        .irq_banks = 3,
-       .irq_read_needs_mux = true
+       .irq_read_needs_mux = true,
+       .disable_strict_mode = true,
 };
 
 static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
index 472ef0d91b9929c4628084a020374b68d3de3c86..5553c0eb0f41c420e422686bdacb6ea3c1c2c14a 100644 (file)
@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* MCLK */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* SCK */
                  SUNXI_FUNCTION(0x4, "i2c4"),          /* SCK */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
                  SUNXI_FUNCTION(0x3, "mcsi"),          /* SDA */
                  SUNXI_FUNCTION(0x4, "i2c4"),          /* SDA */
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
 
        /* Hole */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
index f3796164329efb9ea00aed180a8c1116b813c979..d4aeac3477f55086b69aafa0098ad2f3d617d508 100644 (file)
@@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
                return;
        }
        input_report_key(data->idev, KEY_RFKILL, 1);
+       input_sync(data->idev);
        input_report_key(data->idev, KEY_RFKILL, 0);
        input_sync(data->idev);
 }
index bf897b1832b188c24a92f33fdf74b33b25aeac43..cd4725e7e0b56d8981bc770ca80b562c874370f8 100644 (file)
@@ -37,6 +37,7 @@
 
 struct quirk_entry {
        u8 touchpad_led;
+       u8 kbd_led_levels_off_1;
 
        int needs_kbd_timeouts;
        /*
@@ -67,6 +68,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
        .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
 };
 
+static struct quirk_entry quirk_dell_latitude_e6410 = {
+       .kbd_led_levels_off_1 = 1,
+};
+
 static struct platform_driver platform_driver = {
        .driver = {
                .name = "dell-laptop",
@@ -269,6 +274,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
                },
                .driver_data = &quirk_dell_xps13_9333,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "Dell Latitude E6410",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
+               },
+               .driver_data = &quirk_dell_latitude_e6410,
+       },
        { }
 };
 
@@ -1149,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info)
        units = (buffer->output[2] >> 8) & 0xFF;
        info->levels = (buffer->output[2] >> 16) & 0xFF;
 
+       if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
+               info->levels--;
+
        if (units & BIT(0))
                info->seconds = (buffer->output[3] >> 0) & 0xFF;
        if (units & BIT(1))
index 39d2f451848332d8346201b9cc1cd08bb5e10427..fb25b20df316f39140a3af0f974e9f1fa30aabff 100644 (file)
@@ -639,6 +639,8 @@ static int dell_wmi_events_set_enabled(bool enable)
        int ret;
 
        buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
        buffer->cmd_class = CLASS_INFO;
        buffer->cmd_select = SELECT_APP_REGISTRATION;
        buffer->input[0] = 0x10000;
index 62aa2c37b8d2821450008fb820cbd7be9552f6a7..935121814c97711a4d8879c36c80361ab5462967 100644 (file)
@@ -363,7 +363,7 @@ static int sony_laptop_input_keycode_map[] = {
 };
 
 /* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(unsigned long unused)
+static void do_sony_laptop_release_key(struct timer_list *unused)
 {
        struct sony_laptop_keypress kp;
        unsigned long flags;
@@ -470,7 +470,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
                goto err_dec_users;
        }
 
-       setup_timer(&sony_laptop_input.release_key_timer,
+       timer_setup(&sony_laptop_input.release_key_timer,
                    do_sony_laptop_release_key, 0);
 
        /* input keys */
index 436b4e4e71a149384c1246baa2a508e4dce21f72..04735649052ab3652d38c8ce775d033fb8cde7ac 100644 (file)
@@ -39,7 +39,7 @@ static struct timer_list ktimer;
  * The kernel timer
  */
 
-static void pps_ktimer_event(unsigned long ptr)
+static void pps_ktimer_event(struct timer_list *unused)
 {
        struct pps_event_time ts;
 
@@ -85,7 +85,7 @@ static int __init pps_ktimer_init(void)
                return -ENOMEM;
        }
 
-       setup_timer(&ktimer, pps_ktimer_event, 0);
+       timer_setup(&ktimer, pps_ktimer_event, 0);
        mod_timer(&ktimer, jiffies + HZ);
 
        dev_info(pps->dev, "ktimer PPS source registered\n");
index 00efe24a60633527aa1e9847234612ebbdb3edb4..215eac68ae2d72afda8c0e114755485ce2f10185 100644 (file)
@@ -71,9 +71,9 @@ static void rtc_uie_task(struct work_struct *work)
        if (num)
                rtc_handle_legacy_irq(rtc, num, RTC_UF);
 }
-static void rtc_uie_timer(unsigned long data)
+static void rtc_uie_timer(struct timer_list *t)
 {
-       struct rtc_device *rtc = (struct rtc_device *)data;
+       struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&rtc->irq_lock, flags);
@@ -460,7 +460,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
 
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
        INIT_WORK(&rtc->uie_task, rtc_uie_task);
-       setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+       timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
 #endif
 
        cdev_init(&rtc->char_dev, &rtc_dev_fops);
index e5225ad9c5b12fdd9d723704d66eb9ec2ff5b734..2fdab400c1fe0f918f2d8f6517f36d32b9ff90e0 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the S/390 specific device drivers
 #
index 31f014b57bfc6600a86aefbdc3d512a6b3fa1b9b..bc27d716aa6b2d404863a22a4242c2ccbd9fc6e6 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 comment "S/390 block device drivers"
        depends on S390 && BLOCK
 
index 29f35e29d4801f83aa74b0a590bdb4412a9caa7c..d4e8dff673ccf321c6fefddcd9298eb51e189b3a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -70,8 +71,8 @@ static void do_restore_device(struct work_struct *);
 static void do_reload_device(struct work_struct *);
 static void do_requeue_requests(struct work_struct *);
 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
-static void dasd_device_timeout(unsigned long);
-static void dasd_block_timeout(unsigned long);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
 static void dasd_profile_init(struct dasd_profile *, struct dentry *);
 static void dasd_profile_exit(struct dasd_profile *);
@@ -119,9 +120,7 @@ struct dasd_device *dasd_alloc_device(void)
                     (void (*)(unsigned long)) dasd_device_tasklet,
                     (unsigned long) device);
        INIT_LIST_HEAD(&device->ccw_queue);
-       init_timer(&device->timer);
-       device->timer.function = dasd_device_timeout;
-       device->timer.data = (unsigned long) device;
+       timer_setup(&device->timer, dasd_device_timeout, 0);
        INIT_WORK(&device->kick_work, do_kick_device);
        INIT_WORK(&device->restore_device, do_restore_device);
        INIT_WORK(&device->reload_device, do_reload_device);
@@ -163,9 +162,7 @@ struct dasd_block *dasd_alloc_block(void)
                     (unsigned long) block);
        INIT_LIST_HEAD(&block->ccw_queue);
        spin_lock_init(&block->queue_lock);
-       init_timer(&block->timer);
-       block->timer.function = dasd_block_timeout;
-       block->timer.data = (unsigned long) block;
+       timer_setup(&block->timer, dasd_block_timeout, 0);
        spin_lock_init(&block->profile.lock);
 
        return block;
@@ -762,7 +759,7 @@ static void dasd_profile_end_add_data(struct dasd_profile_info *data,
        /* in case of an overflow, reset the whole profile */
        if (data->dasd_io_reqs == UINT_MAX) {
                        memset(data, 0, sizeof(*data));
-                       getnstimeofday(&data->starttod);
+                       ktime_get_real_ts64(&data->starttod);
        }
        data->dasd_io_reqs++;
        data->dasd_io_sects += sectors;
@@ -897,7 +894,7 @@ void dasd_profile_reset(struct dasd_profile *profile)
                return;
        }
        memset(data, 0, sizeof(*data));
-       getnstimeofday(&data->starttod);
+       ktime_get_real_ts64(&data->starttod);
        spin_unlock_bh(&profile->lock);
 }
 
@@ -914,7 +911,7 @@ int dasd_profile_on(struct dasd_profile *profile)
                kfree(data);
                return 0;
        }
-       getnstimeofday(&data->starttod);
+       ktime_get_real_ts64(&data->starttod);
        profile->data = data;
        spin_unlock_bh(&profile->lock);
        return 0;
@@ -998,8 +995,8 @@ static void dasd_stats_array(struct seq_file *m, unsigned int *array)
 static void dasd_stats_seq_print(struct seq_file *m,
                                 struct dasd_profile_info *data)
 {
-       seq_printf(m, "start_time %ld.%09ld\n",
-                  data->starttod.tv_sec, data->starttod.tv_nsec);
+       seq_printf(m, "start_time %lld.%09ld\n",
+                  (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
        seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
        seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
        seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
@@ -1560,12 +1557,12 @@ EXPORT_SYMBOL(dasd_start_IO);
  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
  * DASD_CQR_QUEUED for 2) and 3).
  */
-static void dasd_device_timeout(unsigned long ptr)
+static void dasd_device_timeout(struct timer_list *t)
 {
        unsigned long flags;
        struct dasd_device *device;
 
-       device = (struct dasd_device *) ptr;
+       device = from_timer(device, t, timer);
        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
        /* re-activate request queue */
        dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
@@ -2628,12 +2625,12 @@ EXPORT_SYMBOL(dasd_cancel_req);
  * is waiting for something that may not come reliably, (e.g. a state
  * change interrupt)
  */
-static void dasd_block_timeout(unsigned long ptr)
+static void dasd_block_timeout(struct timer_list *t)
 {
        unsigned long flags;
        struct dasd_block *block;
 
-       block = (struct dasd_block *) ptr;
+       block = from_timer(block, t, timer);
        spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
        /* re-activate request queue */
        dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
index c95a4784c1911ef1540450f9fc6c2ffee97fcee5..e7cd28ff1984460540f70fd203326b08f2d5c8f9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
index 98fb28e49d2c0605c5ea3e2062d4ad790788d504..f035c2f25d35a8c5aa12cb0552aaf3484edb2798 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Based on.......: linux/drivers/s390/block/mdisk.c
index 8eafcd5fa0049ed9d3384aa6a8999fcec4b61ba2..a2edf2a7ace968f7e72beeed372a8a723e1c2d85 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -530,10 +531,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
        pfxdata->validity.define_extent = 1;
 
        /* private uid is kept up to date, conf_data may be outdated */
-       if (startpriv->uid.type != UA_BASE_DEVICE) {
+       if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
                pfxdata->validity.verify_base = 1;
-               if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
-                       pfxdata->validity.hyper_pav = 1;
+
+       if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+               pfxdata->validity.verify_base = 1;
+               pfxdata->validity.hyper_pav = 1;
        }
 
        rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
@@ -3414,10 +3417,12 @@ static int prepare_itcw(struct itcw *itcw,
        pfxdata.validity.define_extent = 1;
 
        /* private uid is kept up to date, conf_data may be outdated */
-       if (startpriv->uid.type != UA_BASE_DEVICE) {
+       if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+               pfxdata.validity.verify_base = 1;
+
+       if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
                pfxdata.validity.verify_base = 1;
-               if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
-                       pfxdata.validity.hyper_pav = 1;
+               pfxdata.validity.hyper_pav = 1;
        }
 
        switch (cmd) {
index 6168ccdb389c37bb686196f49d68f0780b8fd010..a6b132f7e869eb4eb804b3fa8407cd064c92b699 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
index b095a23bcc0c133c44f51b366aca53f790891461..96709b1a7bf8d8af0f4e0db7748cd5ac8e5a8650 100644 (file)
@@ -441,7 +441,7 @@ struct dasd_profile_info {
        unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
 
        /* new data */
-       struct timespec starttod;          /* time of start or last reset */
+       struct timespec64 starttod;        /* time of start or last reset */
        unsigned int dasd_io_alias;        /* requests using an alias */
        unsigned int dasd_io_tpm;          /* requests using transport mode */
        unsigned int dasd_read_reqs;       /* total number of read  requests */
index 7abb240847c07dd0b24f3f2e7f03d221a1416f5f..6aaefb78043696e658e36b6637b4e6dde59b5c83 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * dcssblk.c -- the S/390 block driver for dcss memory
  *
index eb51893c74a4ba4053fe8d15e064fbf42bed9845..b4130c7880d874862f14eeb381f36c472b231a0d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Block driver for s390 storage class memory.
  *
index 571a0709e1e5b98ba14708d13e9f944e5ad85a6a..2a6334ca750efdf68f818df0af4b08ac66b8df78 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Xpram.c -- the S/390 expanded memory RAM-disk
  *           
index 97c4c9fdd53da316934f14bf261bc1885bd98bb6..ab0b243a947d6f1a8725721ee3f12ae0da538dfd 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 comment "S/390 character device drivers"
        depends on S390
 
index 353b3f2688241d1077a29f2aef365113375cd810..f4c095612a025b016f86b44da52b66ba82907754 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 # Default keymap for 3270 (ebcdic codepage 037).
 keymaps 0-1,4-5
 
index c4518168fd02c98013b349e17fdba30c8d65eec5..61822480a2a0bdfa808e4f9d3e19965857e7700d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - fullscreen driver.
  *
index 251a318a9b7541452c0142f0f0f8ce84167b8dc2..1447d08872253e3498914fb6da6c3504cd207f47 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    HMC Drive DVD Module
  *
index 027ac6ae5eea512c530a9afbb87bb31ad2bedd8e..bf4ab4efed7355dd88007c7bfc304f5251530e9a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for reading z/VM *MONITOR service records.
  *
index 571a7e3527553ad905612007b860197ca4105b5a..76c158c41510374ac4b814aca55587193c8b8fe7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for writing z/VM *MONITOR service records.
  *
index 5d4f053d7c38c330d969586fa3eae0b40f6955ca..f8cd2935fbfd48c5aef1ad980457cc55433b6db4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - core functions.
  *
index 9b4c61c1e3097e8888e9e4b6fbacad76e30df2b8..e4e2df7a478e36aba8ee63775195b4c97886ead8 100644 (file)
@@ -158,7 +158,7 @@ static inline void
 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
 {
        del_timer(&sclp_request_timer);
-       sclp_request_timer.function = (TIMER_FUNC_TYPE)cb;
+       sclp_request_timer.function = cb;
        sclp_request_timer.expires = jiffies + time;
        add_timer(&sclp_request_timer);
 }
@@ -566,7 +566,7 @@ sclp_sync_wait(void)
                if (timer_pending(&sclp_request_timer) &&
                    get_tod_clock_fast() > timeout &&
                    del_timer(&sclp_request_timer))
-                       sclp_request_timer.function((TIMER_DATA_TYPE)&sclp_request_timer);
+                       sclp_request_timer.function(&sclp_request_timer);
                cpu_relax();
        }
        local_irq_disable();
index 19c25427f27fdd702864153fe64f71abb2a175b0..ee6f3b563728319ba5c3d4964f05843453e3ce99 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Enable Asynchronous Notification via SCLP.
  *
index de69f0ddc321dedbb7270ae9fcdf75afe1d148fc..6d73ee3f827a6ca401b0eaa5e4f66b2e6e1766e5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3480/3490 tapes.
  *
index e352047ed9f7a8d6d8ec0a70a688c1efadbfdf9d..37e65a05517f50606f73db539e0871e76452d142 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3590 tapes.
  *
index e7d23048d3f00d0ea1d2a59bf1128d38f4cb6d1d..a07102472ce97eba06a526dcb56d5690b9be2fd4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2004
  *
index 32503a60ee851698049c2fc1221ce01c581ebb6a..8d3370da2dfc294e1286caa337bd9d305fb624c5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    basic function of the tape device driver
  *
index e417ccd9e299891560b2b2c1e67565f0eb46df47..1c98023cffd4165a8ad5117c907fa9258d918c4f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    IBM/3270 Driver - tty functions.
  *
index 62559dc0169f8c9f32a4677e946e0e88880ae17f..069b9ef08206b1bc7168bdbfd4dd3de2ba026e6c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *     character device driver for reading z/VM system service records
  *
index fa90ef05afc00c32805238c4dccdb1a5c589390d..52aa894243187484c03bf301d274990cdbeacb32 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux driver for System z and s390 unit record devices
  * (z/VM virtual punch, reader, printer)
index aaed778f67c4ab84bf13dc9435260a5bdae361dc..4369662cfff5a7ad094d522590901bc845933872 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * zcore module to export memory content and register sets for creating system
  * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
@@ -7,7 +8,6 @@
  *
  * Copyright IBM Corp. 2003, 2008
  * Author(s): Michael Holzheu
- * License: GPL
  */
 
 #define KMSG_COMPONENT "zdump"
index 95e25c1df9221a53bd2e77c594c968d63b1c295a..140e3e4ee2fd0ced47055eb0de2200dfd4e64069 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef S390_BLACKLIST_H
 #define S390_BLACKLIST_H
 
index e2f7b6e93efddf85dd45457d70ce3c3bc3602ba3..bfec1485ca2332ac5bfe8adf2e7c6c50307a3c97 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  bus driver for ccwgroup
  *
index f4166f80c4d4e4408c97e8742a7275b90464898d..5c94a3aec4dd293dfdce50e33219037564ebf118 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 1999, 2010
  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
index 7b0b295b2313b8f3056378ed64c4249381a0213b..c08fc5a8df0c61935c02e282a6ec868d2d0e7630 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- channel subsystem call
  *
index 8e7e19b9e92c028e1097fe9ff82386203594bc37..0015729d917d90e049a1def14cba883ec2c31cc3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 chsc subchannels
  *
index 89216174fcbba8f83d3a0a79a633f6e1cb91ad88..987bf9a8c9f7237d06c578e42807a6e168ef7dfd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- low level i/o calls
  *
index 7d59230e88bb3a2452e8e4eaf667f25659d3d13d..5e495c62cfa7749aef468cc137d0d5eab0c959c7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Linux on zSeries Channel Measurement Facility support
  *
@@ -7,20 +8,6 @@
  *         Cornelia Huck <cornelia.huck@de.ibm.com>
  *
  * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "cio"
index d3e504c3c362655f4eec5903893d8bc8f8e5af2c..0f11dce6e2240c14151ab690fd28e1a39c0694a2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * driver for channel subsystem
  *
@@ -5,8 +6,6 @@
  *
  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index 318d8269f5dee10c56114224b4a08b8b617a96da..75a245f38e2eb7558b9da624a1e2f190cd77b8c2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *  bus driver for ccw devices
  *
@@ -5,8 +6,6 @@
  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *              Cornelia Huck (cornelia.huck@de.ibm.com)
  *              Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index dd7d79d30edc440662a02432a3ad3ce822503225..1319122e9d1231920ef0325a9e56a4c6de91ff80 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * finite state machine for device handling
  *
index cf8c4ac6323a6d1c91dfe93dfdb22e2d9d0432b3..1caf6a398760bb1f156f5c088759f12e6039e589 100644 (file)
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * Copyright IBM Corp. 2002, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 #include <linux/export.h>
 #include <linux/init.h>
index ce16e4f45d440fd25538d9223db5e07566ad22f5..53468ae64b999fa17bc154bb7eeda57293aed2da 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 eadm subchannels
  *
index c592087be0f1a6b0b8083dce448e278c2388c7c9..77fde9f5ea8baeb55ff403096358d20ff7974c5e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Functions for registration of I/O interruption subclasses on s390.
  *
index ed4852fab44b5737fa5edae05ddd640067486304..95b0efe28afb57650e8b15224f8997d04667d555 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux for s390 qdio support, buffer handling, qdio API and module support.
  *
@@ -430,8 +431,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
        q->qdio_error = QDIO_ERROR_SLSB_STATE;
 
        /* special handling for no target buffer empty */
-       if ((!q->is_input_q &&
-           (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
+       if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
+           q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
                qperf_inc(q, target_full);
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
                              q->first_to_check);
@@ -535,7 +536,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
        case SLSB_P_INPUT_ERROR:
                process_buffer_error(q, count);
                q->first_to_check = add_buf(q->first_to_check, count);
-               atomic_sub(count, &q->nr_buf_used);
+               if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+                       qperf_inc(q, inbound_queue_full);
                if (q->irq_ptr->perf_stat_enabled)
                        account_sbals_error(q, count);
                break;
index 9ae1380cbc31300f5e251f03e6027ad903b2d666..98f3cfdc0d027dd0c0e7bcd8d05be86c0fc3a09d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * qdio queue initialization
  *
index 1fa53ecdc2aaa2ec1a81b7bf65b5d0dcf32a16c2..6bca1d5455d4f6ce1997d39d09792a90e65511a0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Recognize and maintain s390 storage class memory.
  *
index 82f05c4b8c526f73a52aed819eacb33cdfd18e3a..ea6a2d0b2894decac95c3421c544183ee89c3383 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * VFIO based Physical Subchannel device driver
  *
index faeba9db3d95999526fdf2ab0667751cd82ab1e0..48d55dc9e98648738b78f3dbc311ea3e141573cd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "ap"
index 7e45c4d08cad40e9124913abd79b715312cbd990..e0827eaa42f1dda711ed08fbf8d61dc96b65630f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus header file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _AP_BUS_H_
index 8dda5bb34a2f2710c6d0f8fc40b291d3e848d99e..e7c2e4f9529ac6bab55a8df8f854c8ed64442cdc 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  pkey device driver
  *
  *  Copyright IBM Corp. 2017
  *  Author(s): Harald Freudenberger
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "pkey"
index b5f4006198b9e0d977b04c2c08d7626c48056569..ce15f101ee282701cdf55ac0ddba557001c737fc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
@@ -218,8 +205,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
        weight += atomic_read(&zq->load);
        pref_weight += atomic_read(&pref_zq->load);
        if (weight == pref_weight)
-               return &zq->queue->total_request_count >
-                       &pref_zq->queue->total_request_count;
+               return zq->queue->total_request_count >
+                       pref_zq->queue->total_request_count;
        return weight > pref_weight;
 }
 
index 73541a798db7a4a1c41bd0e555f33bd1a1bbbbde..9fff8912f6e3b05bd7b2c76a529e5a99d6aec863 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_API_H_
index f85dacf1c28442a38dac33978a20d412ceaf8c42..233e1e695208b9b870edb4259127c41e2ee3357a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index 12cff6262566b5f4c1960b5497e37da7025ad7ad..011d61d8a4ae5869e7d41d1654a218c5f53f96bc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CCA_KEY_H_
index b97c5d5ee5a4aba9e70f88f791b674046092a6f7..e701194d36115c06d4435df2b1dcc4dbc9cc103e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 0dce4b9af184114ecdc13bf3741ab167dfb402d8..c3c116777c937cd4fafbe974f765cd5725327f27 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CEX2A_H_
index e2eebc775a37a0e80ced5520cb5446d85e33fb23..f305538334adb14f0dd296fd2466cb478a2ba3ba 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012
  *  Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
index 13df60209ed33a05604e34e381d40a50b1c40904..01598d83c60a0a1c478c1ade86ea603feb05eab0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_ERROR_H_
index db5bde47dfb0d17b49a8dad9eeec8279c2a6e226..afe1b2bcd7ecf5e211712b567ca186f3a32e4a57 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 5cc280318ee705e9e64fe035b9ccb32c8aa82a40..0a36545cfb8eeb09da1f2136ba31adde5b54775d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE50_H_
index 785620d3050433e33a2af975595d6969e067e45e..f54bef4a928e90b34e7158d97f1d1db998617bf3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 7a0d5b57821f07868c9af78da9873c1eca997c2e..d314f4525518b63693d70df42e768a8dc9e8af33 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE6_H_
index 600604782b65e972705d01984568134949e61039..159b0a0dd211b9a561cad4d326c5d481b1a8418d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index eacafc8962f204377d2c95f8d9a5bfd032d16eb2..d678a3af83a7baa9dda7b1dfdcba632014b975a1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_PCIXCC_H_
index 4742be0eec24f8ca87787105eada3974ab61e4ab..720434e18007e3a8e1c9e5228c4841ba0cb782a5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index b2837b1c70b750c229c9c42265dd232cedfa73e8..a782a207ad31c31f996833ed3d0ef3aa8ff04238 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 menu "S/390 network device drivers"
        depends on NETDEVICES && S390
 
index be9f172185310ac081d6a44f0e061868bc5885d3..7ce98b70cad38bf55be1fd4a15bdaa62761ff159 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2001, 2009
  * Author(s):
index 8c14c6c3ad3d65fa5f0524d49720c1d3321246f3..eb07862bd36a03f5d043ee1fb492461695a506ee 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /**
  * A generic FSM based on fsm used in isdn4linux
  *
@@ -129,8 +130,9 @@ fsm_getstate_str(fsm_instance *fi)
 }
 
 static void
-fsm_expire_timer(fsm_timer *this)
+fsm_expire_timer(struct timer_list *t)
 {
+       fsm_timer *this = from_timer(this, t, tl);
 #if FSM_TIMER_DEBUG
        printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
               this->fi->name, this);
@@ -142,13 +144,11 @@ void
 fsm_settimer(fsm_instance *fi, fsm_timer *this)
 {
        this->fi = fi;
-       this->tl.function = (void *)fsm_expire_timer;
-       this->tl.data = (long)this;
 #if FSM_TIMER_DEBUG
        printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
               this);
 #endif
-       init_timer(&this->tl);
+       timer_setup(&this->tl, fsm_expire_timer, 0);
 }
 
 void
@@ -170,7 +170,7 @@ fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
               this->fi->name, this, millisec);
 #endif
 
-       setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+       timer_setup(&this->tl, fsm_expire_timer, 0);
        this->expire_event = event;
        this->event_arg = arg;
        this->tl.expires = jiffies + (millisec * HZ) / 1000;
@@ -189,7 +189,7 @@ fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
 #endif
 
        del_timer(&this->tl);
-       setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+       timer_setup(&this->tl, fsm_expire_timer, 0);
        this->expire_event = event;
        this->event_arg = arg;
        this->tl.expires = jiffies + (millisec * HZ) / 1000;
index e131a03262ad7bcb3041a9e1e98da574930c727b..92ae84a927fcf391abebaccbd907d5d962ed9bed 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Linux for S/390 Lan Channel Station Network Driver
  *
@@ -7,20 +8,6 @@
  *            Rewritten by
  *                     Frank Pavlic <fpavlic@de.ibm.com> and
  *                     Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT         "lcs"
index b9c7c1e61da296f743f7bbd6f5d30e43d5940117..5ce2424ca7290397e43b55c66581071424da99b9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV network driver
  *
  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #define KMSG_COMPONENT "netiucv"
index 9cd569ef43ecfbaaf10680b3ac091267c1d1577f..badf42acbf95b8104d6167dfc811640b1ec311df 100644 (file)
@@ -565,9 +565,9 @@ enum qeth_cq {
 };
 
 struct qeth_ipato {
-       int enabled;
-       int invert4;
-       int invert6;
+       bool enabled;
+       bool invert4;
+       bool invert6;
        struct list_head entries;
 };
 
@@ -987,6 +987,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
 int qeth_set_features(struct net_device *, netdev_features_t);
 void qeth_recover_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features);
 int qeth_vm_request_mac(struct qeth_card *card);
 int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
 
index 49b9efeba1bda1e2390289b8ba536fa7bad0542c..3614df68830f8f6a4abd756c52ca4e1e72e8e1d1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 #include <linux/mii.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
 
@@ -1474,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card)
        qeth_set_intial_options(card);
        /* IP address takeover */
        INIT_LIST_HEAD(&card->ipato.entries);
-       card->ipato.enabled = 0;
-       card->ipato.invert4 = 0;
-       card->ipato.invert6 = 0;
+       card->ipato.enabled = false;
+       card->ipato.invert4 = false;
+       card->ipato.invert6 = false;
        /* init QDIO stuff */
        qeth_init_qdio_info(card);
        INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
@@ -5380,6 +5386,13 @@ out:
 }
 EXPORT_SYMBOL_GPL(qeth_poll);
 
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+       if (!cmd->hdr.return_code)
+               cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+       return cmd->hdr.return_code;
+}
+
 int qeth_setassparms_cb(struct qeth_card *card,
                        struct qeth_reply *reply, unsigned long data)
 {
@@ -6236,7 +6249,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
                                (struct qeth_checksum_cmd *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "chkdoccb");
-       if (cmd->hdr.return_code)
+       if (qeth_setassparms_inspect_rc(cmd))
                return 0;
 
        memset(chksum_cb, 0, sizeof(*chksum_cb));
@@ -6438,6 +6451,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(qeth_fix_features);
 
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features)
+{
+       /* GSO segmentation builds skbs with
+        *      a (small) linear part for the headers, and
+        *      page frags for the data.
+        * Compared to a linear skb, the header-only part consumes an
+        * additional buffer element. This reduces buffer utilization, and
+        * hurts throughput. So compress small segments into one element.
+        */
+       if (netif_needs_gso(skb, features)) {
+               /* match skb_segment(): */
+               unsigned int doffset = skb->data - skb_mac_header(skb);
+               unsigned int hsize = skb_shinfo(skb)->gso_size;
+               unsigned int hroom = skb_headroom(skb);
+
+               /* linearize only if resulting skb allocations are order-0: */
+               if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+                       features &= ~NETIF_F_SG;
+       }
+
+       return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
 static int __init qeth_core_init(void)
 {
        int rc;
index b22ed2a57acd94661c97b77966246f0785c77ae2..ae81534de91228910fd877fce0e1e262cc24fddf 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
index d2537c09126d676a77eee6d2bbacb54030c2f657..5863ea170ff26447630ed22acd5174db23bf0ee2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -960,6 +961,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_stop               = qeth_l2_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l2_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l2_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -1010,6 +1012,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
                card->dev->hw_features = NETIF_F_SG;
                card->dev->vlan_features = NETIF_F_SG;
+               card->dev->features |= NETIF_F_SG;
                /* OSA 3S and earlier has no RX/TX support */
                if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
                        card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1028,8 +1031,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
index 194ae9b577ccaeda712fece16868115c13fc38bb..e5833837b799eceb069e708fe88394731eecdda4 100644 (file)
@@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
                        const u8 *);
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+void qeth_l3_update_ipato(struct qeth_card *card);
 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
 int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
index aadd384316a375f15506cada437a818f4a621239..ef0961e186869dd6b8ab06c0ebd901524bc41ffc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -163,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
        }
 }
 
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
-                                               struct qeth_ipaddr *addr)
+static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+                                            struct qeth_ipaddr *addr)
 {
        struct qeth_ipato_entry *ipatoe;
        u8 addr_bits[128] = {0, };
@@ -173,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
 
        if (!card->ipato.enabled)
                return 0;
+       if (addr->type != QETH_IP_TYPE_NORMAL)
+               return 0;
 
        qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
                                  (addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -289,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
                memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
                addr->ref_counter = 1;
 
-               if (addr->type == QETH_IP_TYPE_NORMAL  &&
-                               qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+               if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
                        QETH_CARD_TEXT(card, 2, "tkovaddr");
                        addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
                }
@@ -604,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
 /*
  * IP address takeover related functions
  */
+
+/**
+ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
+ *
+ * Caller must hold ip_lock.
+ */
+void qeth_l3_update_ipato(struct qeth_card *card)
+{
+       struct qeth_ipaddr *addr;
+       unsigned int i;
+
+       hash_for_each(card->ip_htable, i, addr, hnode) {
+               if (addr->type != QETH_IP_TYPE_NORMAL)
+                       continue;
+               if (qeth_l3_is_addr_covered_by_ipato(card, addr))
+                       addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+               else
+                       addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+       }
+}
+
 static void qeth_l3_clear_ipato_list(struct qeth_card *card)
 {
        struct qeth_ipato_entry *ipatoe, *tmp;
@@ -615,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
                kfree(ipatoe);
        }
 
+       qeth_l3_update_ipato(card);
        spin_unlock_bh(&card->ip_lock);
 }
 
@@ -639,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
                }
        }
 
-       if (!rc)
+       if (!rc) {
                list_add_tail(&new->entry, &card->ipato.entries);
+               qeth_l3_update_ipato(card);
+       }
 
        spin_unlock_bh(&card->ip_lock);
 
@@ -663,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
                            (proto == QETH_PROT_IPV4)? 4:16) &&
                    (ipatoe->mask_bits == mask_bits)) {
                        list_del(&ipatoe->entry);
+                       qeth_l3_update_ipato(card);
                        kfree(ipatoe);
                }
        }
@@ -1376,6 +1403,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
                memcpy(tmp->mac, buf, sizeof(tmp->mac));
+               tmp->is_multicast = 1;
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
                if (ipm) {
@@ -2917,6 +2945,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_stop               = qeth_l3_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -2957,6 +2986,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                card->dev->vlan_features = NETIF_F_SG |
                                        NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
                                        NETIF_F_TSO;
+                               card->dev->features |= NETIF_F_SG;
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2984,8 +3014,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
        netif_keep_dst(card->dev);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
+       netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+                                         PAGE_SIZE);
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
index bd12fdf678bec32d7f40ff38e5c66a4c1e26f402..6ea2b528a64efbabee5782da7bf8c5d1bce3ff4e 100644 (file)
@@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
-       struct qeth_ipaddr *addr;
-       int i, rc = 0;
+       bool enable;
+       int rc = 0;
 
        if (!card)
                return -EINVAL;
@@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
        }
 
        if (sysfs_streq(buf, "toggle")) {
-               card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
-       } else if (sysfs_streq(buf, "1")) {
-               card->ipato.enabled = 1;
-               hash_for_each(card->ip_htable, i, addr, hnode) {
-                               if ((addr->type == QETH_IP_TYPE_NORMAL) &&
-                               qeth_l3_is_addr_covered_by_ipato(card, addr))
-                                       addr->set_flags |=
-                                       QETH_IPA_SETIP_TAKEOVER_FLAG;
-                       }
-       } else if (sysfs_streq(buf, "0")) {
-               card->ipato.enabled = 0;
-               hash_for_each(card->ip_htable, i, addr, hnode) {
-                       if (addr->set_flags &
-                       QETH_IPA_SETIP_TAKEOVER_FLAG)
-                               addr->set_flags &=
-                               ~QETH_IPA_SETIP_TAKEOVER_FLAG;
-                       }
-       } else
+               enable = !card->ipato.enabled;
+       } else if (kstrtobool(buf, &enable)) {
                rc = -EINVAL;
+               goto out;
+       }
+
+       if (card->ipato.enabled != enable) {
+               card->ipato.enabled = enable;
+               spin_lock_bh(&card->ip_lock);
+               qeth_l3_update_ipato(card);
+               spin_unlock_bh(&card->ip_lock);
+       }
 out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
@@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
                                const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       bool invert;
        int rc = 0;
 
        if (!card)
                return -EINVAL;
 
        mutex_lock(&card->conf_mutex);
-       if (sysfs_streq(buf, "toggle"))
-               card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
-       else if (sysfs_streq(buf, "1"))
-               card->ipato.invert4 = 1;
-       else if (sysfs_streq(buf, "0"))
-               card->ipato.invert4 = 0;
-       else
+       if (sysfs_streq(buf, "toggle")) {
+               invert = !card->ipato.invert4;
+       } else if (kstrtobool(buf, &invert)) {
                rc = -EINVAL;
+               goto out;
+       }
+
+       if (card->ipato.invert4 != invert) {
+               card->ipato.invert4 = invert;
+               spin_lock_bh(&card->ip_lock);
+               qeth_l3_update_ipato(card);
+               spin_unlock_bh(&card->ip_lock);
+       }
+out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
@@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       bool invert;
        int rc = 0;
 
        if (!card)
                return -EINVAL;
 
        mutex_lock(&card->conf_mutex);
-       if (sysfs_streq(buf, "toggle"))
-               card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
-       else if (sysfs_streq(buf, "1"))
-               card->ipato.invert6 = 1;
-       else if (sysfs_streq(buf, "0"))
-               card->ipato.invert6 = 0;
-       else
+       if (sysfs_streq(buf, "toggle")) {
+               invert = !card->ipato.invert6;
+       } else if (kstrtobool(buf, &invert)) {
                rc = -EINVAL;
+               goto out;
+       }
+
+       if (card->ipato.invert6 != invert) {
+               card->ipato.invert6 = invert;
+               spin_lock_bh(&card->ip_lock);
+               qeth_l3_update_ipato(card);
+               spin_unlock_bh(&card->ip_lock);
+       }
+out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
index a851d34c642b5d26866fafdde925eb48ddf61003..3b0c8b8a7634d18df62ece8f94936ed39666a2af 100644 (file)
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV special message driver
  *
  * Copyright IBM Corp. 2003, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 32515a201bbc65c4a0c6e9d48282a188ff16cdda..0a263999f7ae44b181ac7dc786908698c42a9974 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Deliver z/VM CP special messages (SMSG) as uevents.
  *
index 9259039e886dfae36e53503cc722309ab7fcb244..9dda431ec8f3ff036a94e0bad7311721cf86e922 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the S/390 specific device drivers
 #
index 84752152d41fd682c5ae350ddb4bd3ac80d47cde..a3a8c8d9d7171a8d6994548212084ff3380ba493 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * zfcp device driver
  *
index 51b81c0a06520bfaa55446aa443b3d394f4c120e..b12cb81ad8a23a84beac4f3455f9234b3dd7e0a9 100644 (file)
@@ -34,7 +34,7 @@ static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
                                 unsigned long timeout)
 {
-       fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_fsf_request_timeout_handler;
+       fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
        fsf_req->timer.expires = jiffies + timeout;
        add_timer(&fsf_req->timer);
 }
@@ -42,7 +42,7 @@ static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
 {
        BUG_ON(!fsf_req->erp_action);
-       fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_erp_timeout_handler;
+       fsf_req->timer.function = zfcp_erp_timeout_handler;
        fsf_req->timer.expires = jiffies + 30 * HZ;
        add_timer(&fsf_req->timer);
 }
index f68af1f317f15460d489c9b8324ebc4d06142ca9..2dc4d9aab634592363138cb69a07f6885fb25438 100644 (file)
@@ -1,9 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
 # Makefile for kvm guest drivers on s390
 #
 # Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
 
 obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
index b18fe2014cf2195a193186c08c956dc8e5cfe7e3..ba2e0856d22cdfb5396457366276e01bc9ac7851 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * ccw based virtio transport
  *
  * Copyright IBM Corp. 2012, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 
index 403a639574e5ea10c5c8500141204ebc513bd7c1..d52265416da2af0da11cca770304f33ab203ad20 100644 (file)
@@ -1673,6 +1673,7 @@ struct aac_dev
        struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
        u8                      adapter_shutdown;
        u32                     handle_pci_error;
+       bool                    init_reset;
 };
 
 #define aac_adapter_interrupt(dev) \
@@ -1724,6 +1725,7 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG_NATIVE_HBA            (0x00000010)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF        (0x00000020)
 #define FIB_CONTEXT_FLAG_SCSI_CMD      (0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET      (0x00000080)
 
 /*
  *     Define the command values
index 525a652dab48e9923af67ce6e884154855df308c..80a8cb26cdea43c8252bc5afe68b2b72540ff045 100644 (file)
@@ -467,35 +467,6 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
        return 0;
 }
 
-#ifdef CONFIG_EEH
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
-       /* Check for an EEH failure for the given
-        * device node. Function eeh_dev_check_failure()
-        * returns 0 if there has not been an EEH error
-        * otherwise returns a non-zero value.
-        *
-        * Need to be called before any PCI operation,
-        * i.e.,before aac_adapter_check_health()
-        */
-       struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
-
-       if (eeh_dev_check_failure(edev)) {
-               /* The EEH mechanisms will handle this
-                * error and reset the device if
-                * necessary.
-                */
-               return 1;
-       }
-       return 0;
-}
-#else
-static inline int aac_check_eeh_failure(struct aac_dev *dev)
-{
-       return 0;
-}
-#endif
-
 /*
  *     Define the highest level of host to adapter communication routines.
  *     These routines will support host to adapter FS commuication. These
@@ -701,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        return -ETIMEDOUT;
                                }
 
-                               if (aac_check_eeh_failure(dev))
+                               if (unlikely(pci_channel_offline(dev->pdev)))
                                        return -EFAULT;
 
                                if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -801,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-               if (aac_check_eeh_failure(dev))
+               if (unlikely(pci_channel_offline(dev->pdev)))
                        return -EFAULT;
 
                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
@@ -1583,6 +1554,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
         * will ensure that i/o is queisced and the card is flushed in that
         * case.
         */
+       aac_free_irq(aac);
        aac_fib_map_free(aac);
        dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
                          aac->comm_phys);
@@ -1590,7 +1562,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
        aac->comm_phys = 0;
        kfree(aac->queues);
        aac->queues = NULL;
-       aac_free_irq(aac);
        kfree(aac->fsa_dev);
        aac->fsa_dev = NULL;
 
@@ -2511,8 +2482,8 @@ int aac_command_thread(void *data)
                        /* Synchronize our watches */
                        if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
                         && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
-                               difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
-                                 + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
+                               difference = HZ + HZ / 2 -
+                                            now.tv_nsec / (NSEC_PER_SEC / HZ);
                        else {
                                if (now.tv_nsec > NSEC_PER_SEC / 2)
                                        ++now.tv_sec;
@@ -2536,6 +2507,10 @@ int aac_command_thread(void *data)
                if (kthread_should_stop())
                        break;
 
+               /*
+                * we probably want usleep_range() here instead of the
+                * jiffies computation
+                */
                schedule_timeout(difference);
 
                if (kthread_should_stop())
index c9252b138c1fe0e21d217b0fb305cc45afc1545a..d55332de08f91ad8e54e1296867569a8fa109a34 100644 (file)
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
                        info = &aac->hba_map[bus][cid];
                        if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
                            info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
-                               fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+                               fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
                                cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
                        }
                }
@@ -1680,6 +1680,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        aac->cardtype = index;
        INIT_LIST_HEAD(&aac->entry);
 
+       if (aac_reset_devices || reset_devices)
+               aac->init_reset = true;
+
        aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
        if (!aac->fibs)
                goto out_free_host;
index 93ef7c37e568e0e2ca4a38d8a08dd1b987482626..6201666941717042e3c7a2a1cd5f459b41b3360c 100644 (file)
@@ -561,11 +561,16 @@ int _aac_rx_init(struct aac_dev *dev)
        dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
        dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
-       if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
-         !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               /* Make sure the Hardware FIFO is empty */
-               while ((++restart < 512) &&
-                 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+
+       if (((status & 0x0c) != 0x0c) || dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
+                       /* Make sure the Hardware FIFO is empty */
+                       while ((++restart < 512) &&
+                              (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+               }
+       }
+
        /*
         *      Check to see if the board panic'd while booting.
         */
index 0c9361c87ec8de8b853f6ccaa6132663a4b982bd..fde6b6aa86e38a1af487d94b1117f3ffc340c5a5 100644 (file)
@@ -868,9 +868,13 @@ int aac_src_init(struct aac_dev *dev)
        /* Failure to reset here is an option ... */
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
-       if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               ++restart;
+
+       if (dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+                       ++restart;
+       }
+
        /*
         *      Check to see if the board panic'd while booting.
         */
@@ -1014,9 +1018,13 @@ int aac_srcv_init(struct aac_dev *dev)
        /* Failure to reset here is an option ... */
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
-       if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
-               ++restart;
+
+       if (dev->init_reset) {
+               dev->init_reset = false;
+               if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+                       ++restart;
+       }
+
        /*
         *      Check to see if flash update is running.
         *      Wait for the adapter to be up and running. Wait up to 5 minutes
index 5402b85b0bdc397361e4d9ba965f995675b81eeb..2dbc8330d7d34b4e9904e88f6ae79778c50cd07c 100644 (file)
@@ -1175,7 +1175,7 @@ static void asd_start_scb_timers(struct list_head *list)
        struct asd_ascb *ascb;
        list_for_each_entry(ascb, list, list) {
                if (!ascb->uldd_timer) {
-                       ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout;
+                       ascb->timer.function = asd_ascb_timedout;
                        ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
                        add_timer(&ascb->timer);
                }
index 4637119c09d8aad052c566cf6e9fb2d16c7e87f3..2a01702d5ba77ffcdee48e56d47c71646fd2f8fe 100644 (file)
@@ -42,7 +42,7 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
        ascb->tasklet_complete = tasklet_complete;
        ascb->uldd_timer = 1;
 
-       ascb->timer.function = (TIMER_FUNC_TYPE)timed_out;
+       ascb->timer.function = timed_out;
        ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
 
        add_timer(&ascb->timer);
index af032c46ec0e1950deecbc69a33c24035bb4ecae..21f6421536a05f5c20c72277733bc9aecd6b5b4e 100644 (file)
@@ -101,7 +101,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
-static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_device_map(struct timer_list *t);
 static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
 static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
 static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
@@ -837,10 +837,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        atomic_set(&acb->rq_map_token, 16);
        atomic_set(&acb->ante_token_value, 16);
        acb->fw_flag = FW_NORMAL;
-       init_timer(&acb->eternal_timer);
+       timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
        acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
-       acb->eternal_timer.data = (unsigned long) acb;
-       acb->eternal_timer.function = &arcmsr_request_device_map;
        add_timer(&acb->eternal_timer);
        if(arcmsr_alloc_sysfs_attr(acb))
                goto out_free_sysfs;
@@ -930,10 +928,8 @@ static int arcmsr_resume(struct pci_dev *pdev)
        atomic_set(&acb->rq_map_token, 16);
        atomic_set(&acb->ante_token_value, 16);
        acb->fw_flag = FW_NORMAL;
-       init_timer(&acb->eternal_timer);
+       timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
        acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
-       acb->eternal_timer.data = (unsigned long) acb;
-       acb->eternal_timer.function = &arcmsr_request_device_map;
        add_timer(&acb->eternal_timer);
        return 0;
 controller_stop:
@@ -3459,9 +3455,9 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
        }
 }
 
-static void arcmsr_request_device_map(unsigned long pacb)
+static void arcmsr_request_device_map(struct timer_list *t)
 {
-       struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+       struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
        switch (acb->adapter_type) {
                case ACB_ADAPTER_TYPE_A: {
                        arcmsr_hbaA_request_device_map(acb);
index 24388795ee9a30ea933a3c11b60009fcff7b656f..f4775ca70babac8e1b9bec3dba86220ee59937fb 100644 (file)
@@ -2318,9 +2318,9 @@ DEF_SCSI_QCMD(fas216_noqueue_command)
  * Error handler timeout function.  Indicate that we timed out,
  * and wake up any error handler process so it can continue.
  */
-static void fas216_eh_timer(unsigned long data)
+static void fas216_eh_timer(struct timer_list *t)
 {
-       FAS216_Info *info = (FAS216_Info *)data;
+       FAS216_Info *info = from_timer(info, t, eh_timer);
 
        fas216_log(info, LOG_ERROR, "error handling timed out\n");
 
@@ -2849,9 +2849,7 @@ int fas216_init(struct Scsi_Host *host)
        info->rst_dev_status = -1;
        info->rst_bus_status = -1;
        init_waitqueue_head(&info->eh_wait);
-       init_timer(&info->eh_timer);
-       info->eh_timer.data  = (unsigned long)info;
-       info->eh_timer.function = fas216_eh_timer;
+       timer_setup(&info->eh_timer, fas216_eh_timer, 0);
        
        spin_lock_init(&info->host_lock);
 
index be96aa1e507722da1665f967027b0fb5a16e48e4..b3cfdd5f4d1c3812baf40f34c936b53f3b34acdf 100644 (file)
@@ -5279,7 +5279,7 @@ static void beiscsi_hw_health_check(struct timer_list *t)
                if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
                        return;
                /* modify this timer to check TPE */
-               phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check;
+               phba->hw_check.function = beiscsi_hw_tpe_check;
        }
 
        mod_timer(&phba->hw_check,
@@ -5367,7 +5367,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
         * Timer function gets modified for TPE detection.
         * Always reinit to do health check first.
         */
-       phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check;
+       phba->hw_check.function = beiscsi_hw_health_check;
        mod_timer(&phba->hw_check,
                  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
        return 0;
index 5caf5f3ff642282ee13776e9df9ca9a18f494536..cf04666868045d6e632dfda9a6fff39c4e8d88b1 100644 (file)
@@ -692,9 +692,9 @@ ext:
 }
 
 void
-bfad_bfa_tmo(unsigned long data)
+bfad_bfa_tmo(struct timer_list *t)
 {
-       struct bfad_s         *bfad = (struct bfad_s *) data;
+       struct bfad_s         *bfad = from_timer(bfad, t, hal_tmo);
        unsigned long   flags;
        struct list_head               doneq;
 
@@ -719,9 +719,7 @@ bfad_bfa_tmo(unsigned long data)
 void
 bfad_init_timer(struct bfad_s *bfad)
 {
-       init_timer(&bfad->hal_tmo);
-       bfad->hal_tmo.function = bfad_bfa_tmo;
-       bfad->hal_tmo.data = (unsigned long)bfad;
+       timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0);
 
        mod_timer(&bfad->hal_tmo,
                  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
index 72ca2a2e08e259b70be45e8880354bf8291ef9c4..b2fa195adc7a3a6e405a6c23132060952916138f 100644 (file)
@@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
        struct fc_bsg_request *bsg_request = job->request;
        struct fc_bsg_reply *bsg_reply = job->reply;
        uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
-       struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
        struct bfad_s *bfad = im_port->bfad;
        void *payload_kbuf;
        int rc = -EINVAL;
@@ -3350,7 +3351,8 @@ int
 bfad_im_bsg_els_ct_request(struct bsg_job *job)
 {
        struct bfa_bsg_data *bsg_data;
-       struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
        struct bfad_s *bfad = im_port->bfad;
        bfa_bsg_fcpt_t *bsg_fcpt;
        struct bfad_fcxp    *drv_fcxp;
index cfcfff48e8e16e3fb2b66c0d491c0d612a8cdd38..4fe980a6441f5bbed6bda0892237a8c342653460 100644 (file)
@@ -314,7 +314,7 @@ int         bfad_setup_intr(struct bfad_s *bfad);
 void           bfad_remove_intr(struct bfad_s *bfad);
 void           bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
 bfa_status_t   bfad_hal_mem_alloc(struct bfad_s *bfad);
-void           bfad_bfa_tmo(unsigned long data);
+void           bfad_bfa_tmo(struct timer_list *t);
 void           bfad_init_timer(struct bfad_s *bfad);
 int            bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
 void           bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
index 24e657a4ec80df3caf82eff0b284288acc875a28..c05d6e91e4bde9cde7e9146ad48bae69727d476f 100644 (file)
@@ -546,6 +546,7 @@ int
 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
                        struct device *dev)
 {
+       struct bfad_im_port_pointer *im_portp;
        int error = 1;
 
        mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
                goto out_free_idr;
        }
 
-       im_port->shost->hostdata[0] = (unsigned long)im_port;
+       im_portp = shost_priv(im_port->shost);
+       im_portp->p = im_port;
        im_port->shost->unique_id = im_port->idr_id;
        im_port->shost->this_id = -1;
        im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 
        sht->sg_tablesize = bfad->cfg_data.io_max_sge;
 
-       return scsi_host_alloc(sht, sizeof(unsigned long));
+       return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
 }
 
 void
index c81ec2a77ef5034d3fdd3c70f1466002e012be10..06ce4ba2b7bc9e6e562a20d98de2a6a99fff93d1 100644 (file)
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
        struct fc_vport *fc_vport;
 };
 
+struct bfad_im_port_pointer {
+       struct bfad_im_port_s *p;
+};
+
+static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
+{
+       struct bfad_im_port_pointer *im_portp = shost_priv(host);
+       return im_portp->p;
+}
+
 enum bfad_itnim_state {
        ITNIM_STATE_NONE,
        ITNIM_STATE_ONLINE,
index 59a2dfbcbc6991efb8ff2860b17cef4e198a7b46..a8ae1a019eea55eaef72823d9ed58eb376a11976 100644 (file)
@@ -14,8 +14,8 @@
  */
 
 #include "bnx2fc.h"
-static void bnx2fc_upld_timer(unsigned long data);
-static void bnx2fc_ofld_timer(unsigned long data);
+static void bnx2fc_upld_timer(struct timer_list *t);
+static void bnx2fc_ofld_timer(struct timer_list *t);
 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
                           struct fcoe_port *port,
                           struct fc_rport_priv *rdata);
@@ -27,10 +27,10 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
                              struct bnx2fc_rport *tgt);
 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
 
-static void bnx2fc_upld_timer(unsigned long data)
+static void bnx2fc_upld_timer(struct timer_list *t)
 {
 
-       struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+       struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
 
        BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
        /* fake upload completion */
@@ -40,10 +40,10 @@ static void bnx2fc_upld_timer(unsigned long data)
        wake_up_interruptible(&tgt->upld_wait);
 }
 
-static void bnx2fc_ofld_timer(unsigned long data)
+static void bnx2fc_ofld_timer(struct timer_list *t)
 {
 
-       struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+       struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
 
        BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
        /* NOTE: This function should never be called, as
@@ -65,7 +65,7 @@ static void bnx2fc_ofld_timer(unsigned long data)
 
 static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
 {
-       setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+       timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
        mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
 
        wait_event_interruptible(tgt->ofld_wait,
@@ -277,7 +277,7 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
 
 static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
 {
-       setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+       timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
        mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
        wait_event_interruptible(tgt->upld_wait,
                                 (test_bit(
index babd79361a461097f93dedba4fb400d8caecf390..bf07735275a49d7720e98b4af5c4ef7c7693f72b 100644 (file)
@@ -586,8 +586,8 @@ static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        cxgbi_sock_get(csk);
        spin_lock_bh(&csk->lock);
        if (rpl->status == CPL_ERR_CONN_EXIST &&
-           csk->retry_timer.function != (TIMER_FUNC_TYPE)act_open_retry_timer) {
-               csk->retry_timer.function = (TIMER_FUNC_TYPE)act_open_retry_timer;
+           csk->retry_timer.function != act_open_retry_timer) {
+               csk->retry_timer.function = act_open_retry_timer;
                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
        } else
                cxgbi_sock_fail_act_open(csk,
index 266eddf17a991b207f07f67d2fdc0877d63dfb1f..406e94312d4e9a49b0015e811f14b6a02b1177a8 100644 (file)
@@ -963,8 +963,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
        spin_lock_bh(&csk->lock);
 
        if (status == CPL_ERR_CONN_EXIST &&
-           csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) {
-               csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer;
+           csk->retry_timer.function != csk_act_open_retry_timer) {
+               csk->retry_timer.function = csk_act_open_retry_timer;
                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
        } else
                cxgbi_sock_fail_act_open(csk,
index 81f226be3e3b72067b87fe6d2553f91e7783b62f..4eb14301a497bdd7371aed487ccee2f210ac3f81 100644 (file)
@@ -1631,23 +1631,21 @@ void esas2r_adapter_tasklet(unsigned long context)
        }
 }
 
-static void esas2r_timer_callback(unsigned long context);
+static void esas2r_timer_callback(struct timer_list *t);
 
 void esas2r_kickoff_timer(struct esas2r_adapter *a)
 {
-       init_timer(&a->timer);
+       timer_setup(&a->timer, esas2r_timer_callback, 0);
 
-       a->timer.function = esas2r_timer_callback;
-       a->timer.data = (unsigned long)a;
        a->timer.expires = jiffies +
                           msecs_to_jiffies(100);
 
        add_timer(&a->timer);
 }
 
-static void esas2r_timer_callback(unsigned long context)
+static void esas2r_timer_callback(struct timer_list *t)
 {
-       struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+       struct esas2r_adapter *a = from_timer(a, t, timer);
 
        set_bit(AF2_TIMER_TICK, &a->flags2);
 
index fff6f1851dc1e56779acff511c5b6c3625a9f51c..097f37de6ce91231082353f327485d8f189d68ba 100644 (file)
@@ -49,7 +49,7 @@
 #define        FCOE_CTLR_MIN_FKA       500             /* min keep alive (mS) */
 #define        FCOE_CTLR_DEF_FKA       FIP_DEF_FKA     /* default keep alive (mS) */
 
-static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_timeout(struct timer_list *);
 static void fcoe_ctlr_timer_work(struct work_struct *);
 static void fcoe_ctlr_recv_work(struct work_struct *);
 static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
@@ -156,7 +156,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
        mutex_init(&fip->ctlr_mutex);
        spin_lock_init(&fip->ctlr_lock);
        fip->flogi_oxid = FC_XID_UNKNOWN;
-       setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+       timer_setup(&fip->timer, fcoe_ctlr_timeout, 0);
        INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
        INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
        skb_queue_head_init(&fip->fip_recv_list);
@@ -1786,9 +1786,9 @@ unlock:
  * fcoe_ctlr_timeout() - FIP timeout handler
  * @arg: The FCoE controller that timed out
  */
-static void fcoe_ctlr_timeout(unsigned long arg)
+static void fcoe_ctlr_timeout(struct timer_list *t)
 {
-       struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+       struct fcoe_ctlr *fip = from_timer(fip, t, timer);
 
        schedule_work(&fip->timer_work);
 }
index aacadbf20b6954c28990d5dd592182a95cbaeba4..e52599f441707adc00df3bbb8d972f2e3a9ffe96 100644 (file)
@@ -407,18 +407,18 @@ static int fnic_notify_set(struct fnic *fnic)
        return err;
 }
 
-static void fnic_notify_timer(unsigned long data)
+static void fnic_notify_timer(struct timer_list *t)
 {
-       struct fnic *fnic = (struct fnic *)data;
+       struct fnic *fnic = from_timer(fnic, t, notify_timer);
 
        fnic_handle_link_event(fnic);
        mod_timer(&fnic->notify_timer,
                  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 }
 
-static void fnic_fip_notify_timer(unsigned long data)
+static void fnic_fip_notify_timer(struct timer_list *t)
 {
-       struct fnic *fnic = (struct fnic *)data;
+       struct fnic *fnic = from_timer(fnic, t, fip_timer);
 
        fnic_handle_fip_timer(fnic);
 }
@@ -777,8 +777,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
                fnic->set_vlan = fnic_set_vlan;
                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
-               setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
-                                                       (unsigned long)fnic);
+               timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
                spin_lock_init(&fnic->vlans_lock);
                INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
                INIT_WORK(&fnic->event_work, fnic_handle_event);
@@ -809,8 +808,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* Setup notify timer when using MSI interrupts */
        if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
-               setup_timer(&fnic->notify_timer,
-                           fnic_notify_timer, (unsigned long)fnic);
+               timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
 
        /* allocate RQ buffers and post them to RQ*/
        for (i = 0; i < fnic->rq_count; i++) {
index 61a85ff8e459f429b7090cbaa77d4712a7e854c7..5f503cb095085d2baa7e2f24248419dbbb432e29 100644 (file)
@@ -839,7 +839,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
                }
                task->task_done = hisi_sas_task_done;
 
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+               task->slow_task->timer.function = hisi_sas_tmf_timedout;
                task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
@@ -1451,7 +1451,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
        task->dev = device;
        task->task_proto = device->tproto;
        task->task_done = hisi_sas_task_done;
-       task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+       task->slow_task->timer.function = hisi_sas_tmf_timedout;
        task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
        add_timer(&task->slow_task->timer);
 
index d02c2a791981f9a68893efcfc9811666e6494c74..5d3467fd728d9f583491617e9e513266100a48b9 100644 (file)
@@ -1268,7 +1268,7 @@ static void link_timeout_enable_link(struct timer_list *t)
                }
        }
 
-       hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+       hisi_hba->timer.function = link_timeout_disable_link;
        mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
 }
 
@@ -1289,13 +1289,13 @@ static void link_timeout_disable_link(struct timer_list *t)
                }
        }
 
-       hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link;
+       hisi_hba->timer.function = link_timeout_enable_link;
        mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
 }
 
 static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
 {
-       hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+       hisi_hba->timer.function = link_timeout_disable_link;
        hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
        add_timer(&hisi_hba->timer);
 }
index d53429371127a4eb3ea76ce52dc2408ec05b9a11..cc0187965eee95fe242ab853469ed21f92546d66 100644 (file)
@@ -997,7 +997,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
        ipr_cmd->done = done;
 
        ipr_cmd->timer.expires = jiffies + timeout;
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+       ipr_cmd->timer.function = timeout_func;
 
        add_timer(&ipr_cmd->timer);
 
@@ -8312,7 +8312,7 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
        ipr_cmd->done = ipr_reset_ioa_job;
 
        ipr_cmd->timer.expires = jiffies + timeout;
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_reset_timer_done;
+       ipr_cmd->timer.function = ipr_reset_timer_done;
        add_timer(&ipr_cmd->timer);
 }
 
@@ -8397,7 +8397,7 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
        }
 
        ipr_cmd->timer.expires = jiffies + stage_time * HZ;
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+       ipr_cmd->timer.function = ipr_oper_timeout;
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
 
@@ -8468,7 +8468,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
        }
 
        ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+       ipr_cmd->timer.function = ipr_oper_timeout;
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
index 1a4e701a844966e4a7db873a3c2dfe3a58768fc2..4fae253d4f3ded0a9453c01383f20a44450122ae 100644 (file)
@@ -1214,7 +1214,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
        fsp->seq_ptr = seq;
        fc_fcp_pkt_hold(fsp);   /* hold for fc_fcp_pkt_destroy */
 
-       fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+       fsp->timer.function = fc_fcp_timeout;
        if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
                fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
 
@@ -1307,7 +1307,7 @@ static void fc_lun_reset_send(struct timer_list *t)
                        return;
                if (fc_fcp_lock_pkt(fsp))
                        return;
-               fsp->timer.function = (TIMER_FUNC_TYPE)fc_lun_reset_send;
+               fsp->timer.function = fc_lun_reset_send;
                fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
                fc_fcp_unlock_pkt(fsp);
        }
@@ -1445,7 +1445,7 @@ static void fc_fcp_timeout(struct timer_list *t)
        if (fsp->lp->qfull) {
                FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
                           fsp->timer_delay);
-               fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+               fsp->timer.function = fc_fcp_timeout;
                fc_fcp_timer_set(fsp, fsp->timer_delay);
                goto unlock;
        }
index 5da46052e179c200a8e476f38551e470f6dd9173..21be672679fb5026120049b1609569bf8d25259a 100644 (file)
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
                case ELS_FLOGI:
                        if (!lport->point_to_multipoint)
                                fc_lport_recv_flogi_req(lport, fp);
+                       else
+                               fc_rport_recv_req(lport, fp);
                        break;
                case ELS_LOGO:
                        if (fc_frame_sid(fp) == FC_FID_FLOGI)
                                fc_lport_recv_logo_req(lport, fp);
+                       else
+                               fc_rport_recv_req(lport, fp);
                        break;
                case ELS_RSCN:
                        lport->tt.disc_recv_req(lport, fp);
index 174e5eff615579d3c2822692de37e59a0b219b03..3183d63de4dab7f5b000f2b6da856eea70c6b900 100644 (file)
@@ -92,7 +92,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
 
                task->task_done = smp_task_done;
 
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)smp_task_timedout;
+               task->slow_task->timer.function = smp_task_timedout;
                task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
                struct sas_rphy *rphy)
 {
        struct domain_device *dev;
-       unsigned int reslen = 0;
+       unsigned int rcvlen = 0;
        int ret = -EINVAL;
 
        /* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
 
        ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
                        job->reply_payload.sg_list);
-       if (ret > 0) {
-               /* positive number is the untransferred residual */
-               reslen = ret;
+       if (ret >= 0) {
+               /* bsg_job_done() requires the length received  */
+               rcvlen = job->reply_payload.payload_len - ret;
                ret = 0;
        }
 
 out:
-       bsg_job_done(job, ret, reslen);
+       bsg_job_done(job, ret, rcvlen);
 }
index 91795eb56206603bb0a52baa62237e9787dd2bcf..58476b728c57e11cd20232f1b43215eb3b2c1e5b 100644 (file)
@@ -919,7 +919,7 @@ void sas_task_abort(struct sas_task *task)
                        return;
                if (!del_timer(&slow->timer))
                        return;
-               slow->timer.function((TIMER_DATA_TYPE)&slow->timer);
+               slow->timer.function(&slow->timer);
                return;
        }
 
index 56faeb049b4ac50ec04783349fcea9b79c20583f..87c08ff37dddff46fed7841247a41f585ebc8f1d 100644 (file)
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
        drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
        rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
        if (rc < 0) {
-               (rqbp->rqb_free_buffer)(phba, rqb_entry);
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "6409 Cannot post to RQ %d: %x %x\n",
                                rqb_entry->hrq->queue_id,
                                rqb_entry->hrq->host_index,
                                rqb_entry->hrq->hba_index);
+               (rqbp->rqb_free_buffer)(phba, rqb_entry);
        } else {
                list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
                rqbp->buffer_count++;
index cff1c37b8d2e46374ffda812b97877851b1cea2c..cff43bd9f6751ae1197b76c4d9eb22bc92ef2784 100644 (file)
@@ -1310,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
                memcpy(&task->ssp_task, parameter, para_len);
                task->task_done = mvs_task_done;
 
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)mvs_tmf_timedout;
+               task->slow_task->timer.function = mvs_tmf_timedout;
                task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
@@ -2020,7 +2020,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
                MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
                                        tmp | PHYEV_SIG_FIS);
                if (phy->timer.function == NULL) {
-                       phy->timer.function = (TIMER_FUNC_TYPE)mvs_sig_time_out;
+                       phy->timer.function = mvs_sig_time_out;
                        phy->timer.expires = jiffies + 5*HZ;
                        add_timer(&phy->timer);
                }
index 5b93ed810f6ef099e265a5e45305ef26224ee4bd..dc4e801b2cefef35822ea70cace8b6cdf806f33d 100644 (file)
@@ -8093,9 +8093,9 @@ irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
      return IRQ_HANDLED;
 }
 
-static void ncr53c8xx_timeout(unsigned long npref)
+static void ncr53c8xx_timeout(struct timer_list *t)
 {
-       struct ncb *np = (struct ncb *) npref;
+       struct ncb *np = from_timer(np, t, timer);
        unsigned long flags;
        struct scsi_cmnd *done_list;
 
@@ -8357,9 +8357,7 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
        if (!np->scripth0)
                goto attach_error;
 
-       init_timer(&np->timer);
-       np->timer.data     = (unsigned long) np;
-       np->timer.function = ncr53c8xx_timeout;
+       timer_setup(&np->timer, ncr53c8xx_timeout, 0);
 
        /* Try to map the controller chip to virtual and physical memory. */
 
index a4f28b7e4c65df81ef583eab878a3aa9fc45e0e4..e18877177f1b52d9c43ad3b991b858c80a6cc079 100644 (file)
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
                return req;
 
        for_each_bio(bio) {
-               ret = blk_rq_append_bio(req, bio);
+               struct bio *bounce_bio = bio;
+
+               ret = blk_rq_append_bio(req, &bounce_bio);
                if (ret)
                        return ERR_PTR(ret);
        }
index 0e294e80c1690f20941fd26a506d3273fe341853..947d6017d004c83b3e758392d6278524613a8621 100644 (file)
@@ -695,7 +695,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                task->task_proto = dev->tproto;
                memcpy(&task->ssp_task, parameter, para_len);
                task->task_done = pm8001_task_done;
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+               task->slow_task->timer.function = pm8001_tmf_timedout;
                task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
@@ -781,7 +781,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
                task->dev = dev;
                task->task_proto = dev->tproto;
                task->task_done = pm8001_task_done;
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+               task->slow_task->timer.function = pm8001_tmf_timedout;
                task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
                add_timer(&task->slow_task->timer);
 
index 4f9f115fb6a0c8c9a3d3e5753d5d236c52897e60..e58be98430b014a40e1cd5eeba5472a39fe7419f 100644 (file)
@@ -604,7 +604,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
 
        cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
        cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
-       cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_bist_done;
+       cmd->timer.function = pmcraid_bist_done;
        add_timer(&cmd->timer);
 }
 
@@ -636,7 +636,7 @@ static void pmcraid_reset_alert_done(struct timer_list *t)
                /* restart timer if some more time is available to wait */
                cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
                cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
-               cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+               cmd->timer.function = pmcraid_reset_alert_done;
                add_timer(&cmd->timer);
        }
 }
@@ -673,7 +673,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
                 */
                cmd->time_left = PMCRAID_RESET_TIMEOUT;
                cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
-               cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+               cmd->timer.function = pmcraid_reset_alert_done;
                add_timer(&cmd->timer);
 
                iowrite32(DOORBELL_IOA_RESET_ALERT,
@@ -923,7 +923,7 @@ static void pmcraid_send_cmd(
        if (timeout_func) {
                /* setup timeout handler */
                cmd->timer.expires = jiffies + timeout;
-               cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+               cmd->timer.function = timeout_func;
                add_timer(&cmd->timer);
        }
 
@@ -1951,7 +1951,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
        cmd->cmd_done = pmcraid_ioa_reset;
        cmd->timer.expires = jiffies +
                             msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
-       cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_timeout_handler;
+       cmd->timer.function = pmcraid_timeout_handler;
 
        if (!timer_pending(&cmd->timer))
                add_timer(&cmd->timer);
index 01f08c03f2c185dc3f7e1d423c3459da95fb6cbb..c3765d29fd3ff8167a647563b43be57e885dd6a9 100644 (file)
@@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq)
 {
        struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
        int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
-       char buf[80];
+       const u8 *const cdb = READ_ONCE(cmd->cmnd);
+       char buf[80] = "(?)";
 
-       __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
+       if (cdb)
+               __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
        seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
                   cmd->retries, msecs / 1000, msecs % 1000);
 }
index 78d4aa8df675a1671df5daf12dfc7d9000f3cbe5..dfb8da83fa504c979e9ba0639b32a4cae2c8969c 100644 (file)
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
 };
 
 
-static const char spaces[] = "                "; /* 16 of them */
 static blist_flags_t scsi_default_dev_flags;
 static LIST_HEAD(scsi_dev_info_list);
 static char scsi_dev_flags[256];
@@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
        size_t from_length;
 
        from_length = strlen(from);
-       strncpy(to, from, min(to_length, from_length));
-       if (from_length < to_length) {
-               if (compatible) {
-                       /*
-                        * NUL terminate the string if it is short.
-                        */
-                       to[from_length] = '\0';
-               } else {
-                       /*
-                        * space pad the string if it is short.
-                        */
-                       strncpy(&to[from_length], spaces,
-                               to_length - from_length);
-               }
+       /* This zero-pads the destination */
+       strncpy(to, from, to_length);
+       if (from_length < to_length && !compatible) {
+               /*
+                * space pad the string if it is short.
+                */
+               memset(&to[from_length], ' ', to_length - from_length);
        }
        if (from_length > to_length)
                 printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -382,10 +374,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
                            model, compatible);
 
        if (strflags)
-               devinfo->flags = simple_strtoul(strflags, NULL, 0);
-       else
-               devinfo->flags = flags;
-
+               flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+       devinfo->flags = flags;
        devinfo->compatible = compatible;
 
        if (compatible)
@@ -458,7 +448,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
                        /*
                         * vendor strings must be an exact match
                         */
-                       if (vmax != strlen(devinfo->vendor) ||
+                       if (vmax != strnlen(devinfo->vendor,
+                                           sizeof(devinfo->vendor)) ||
                            memcmp(devinfo->vendor, vskip, vmax))
                                continue;
 
@@ -466,7 +457,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
                         * @model specifies the full string, and
                         * must be larger or equal to devinfo->model
                         */
-                       mlen = strlen(devinfo->model);
+                       mlen = strnlen(devinfo->model, sizeof(devinfo->model));
                        if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
                                continue;
                        return devinfo;
index 1cbc497e00bd95ffff6ee2020f06531f8059846c..d9ca1dfab154c83f79ba1dab210f1a3521d1ea5d 100644 (file)
@@ -1967,6 +1967,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
 out_put_device:
        put_device(&sdev->sdev_gendev);
 out:
+       if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
+               blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
        return false;
 }
 
@@ -2148,11 +2150,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
                q->limits.cluster = 0;
 
        /*
-        * set a reasonable default alignment on word boundaries: the
-        * host and device may alter it using
-        * blk_queue_update_dma_alignment() later.
+        * Set a reasonable default alignment:  The larger of 32-byte (dword),
+        * which is a common minimum for HBAs, and the minimum DMA alignment,
+        * which is set by the platform.
+        *
+        * Devices that require a bigger alignment can increase it later.
         */
-       blk_queue_dma_alignment(q, 0x03);
+       blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
 }
 EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
index be5e919db0e8cd9e713727a91bc46923673ea556..0880d975eed3a56c58d27172bfd18c1a59da5d4b 100644 (file)
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
-               int *bflags, int async)
+               blist_flags_t *bflags, int async)
 {
        int ret;
 
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
  *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_probe_and_add_lun(struct scsi_target *starget,
-                                 u64 lun, int *bflagsp,
+                                 u64 lun, blist_flags_t *bflagsp,
                                  struct scsi_device **sdevp,
                                  enum scsi_scan_mode rescan,
                                  void *hostdata)
 {
        struct scsi_device *sdev;
        unsigned char *result;
-       int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+       blist_flags_t bflags;
+       int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
        /*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
  *     Modifies sdevscan->lun.
  **/
 static void scsi_sequential_lun_scan(struct scsi_target *starget,
-                                    int bflags, int scsi_level,
+                                    blist_flags_t bflags, int scsi_level,
                                     enum scsi_scan_mode rescan)
 {
        uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
  *     0: scan completed (or no memory, so further scanning is futile)
  *     1: could not scan with REPORT LUN
  **/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
                                enum scsi_scan_mode rescan)
 {
        unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
                unsigned int id, u64 lun, enum scsi_scan_mode rescan)
 {
        struct Scsi_Host *shost = dev_to_shost(parent);
-       int bflags = 0;
+       blist_flags_t bflags = 0;
        int res;
        struct scsi_target *starget;
 
index 50e7d7e4a86179b9a47d18569bb759be0b674b88..a9996c16f4ae63fc820065c75b68fb51c5a01ab4 100644 (file)
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
 
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name)                                  \
+       [ilog2((__force unsigned int)BLIST_##name)] = #name
 static const char *const sdev_bflags_name[] = {
 #include "scsi_devinfo_tbl.c"
 };
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
        for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
                const char *name = NULL;
 
-               if (!(sdev->sdev_bflags & BIT(i)))
+               if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
                        continue;
                if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
                        name = sdev_bflags_name[i];
index d0219e36080c3b79109ac405eb0cd726545585fc..10ebb213ddb33e2920e2fe83e60cc712a50c3002 100644 (file)
 
 /* Our blacklist flags */
 enum {
-       SPI_BLIST_NOIUS = 0x1,
+       SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
 };
 
 /* blacklist table, modelled on scsi_devinfo.c */
 static struct {
        char *vendor;
        char *model;
-       unsigned flags;
+       blist_flags_t flags;
 } spi_static_device_list[] __initdata = {
        {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
        {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +221,11 @@ static int spi_device_configure(struct transport_container *tc,
 {
        struct scsi_device *sdev = to_scsi_device(dev);
        struct scsi_target *starget = sdev->sdev_target;
-       unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
-                                                     &sdev->inquiry[16],
-                                                     SCSI_DEVINFO_SPI);
+       blist_flags_t bflags;
+
+       bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+                                            &sdev->inquiry[16],
+                                            SCSI_DEVINFO_SPI);
 
        /* Populate the target capability fields with the values
         * gleaned from the device inquiry */
index 24fe685227169d1be705e9ddc1d8861cb962d2bf..a028ab3322a9a4ed3b37530c19268dc90d9098f4 100644 (file)
@@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
 {
        struct request *rq = SCpnt->request;
+       u8 *cmnd;
 
        if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
                sd_zbc_write_unlock_zone(SCpnt);
@@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
                __free_page(rq->special_vec.bv_page);
 
        if (SCpnt->cmnd != scsi_req(rq)->cmd) {
-               mempool_free(SCpnt->cmnd, sd_cdb_pool);
+               cmnd = SCpnt->cmnd;
                SCpnt->cmnd = NULL;
                SCpnt->cmd_len = 0;
+               mempool_free(cmnd, sd_cdb_pool);
        }
 }
 
index d32e3ba8863e86ef9e6ca55c5311697683050c16..791a2182de53592ddc70fe1661a2969a1de5713c 100644 (file)
@@ -565,9 +565,9 @@ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
 /*
  *  Linux entry point of the timer handler
  */
-static void sym53c8xx_timer(unsigned long npref)
+static void sym53c8xx_timer(struct timer_list *t)
 {
-       struct sym_hcb *np = (struct sym_hcb *)npref;
+       struct sym_hcb *np = from_timer(np, t, s.timer);
        unsigned long flags;
 
        spin_lock_irqsave(np->s.host->host_lock, flags);
@@ -1351,9 +1351,7 @@ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
        /*
         *  Start the timer daemon
         */
-       init_timer(&np->s.timer);
-       np->s.timer.data     = (unsigned long) np;
-       np->s.timer.function = sym53c8xx_timer;
+       timer_setup(&np->s.timer, sym53c8xx_timer, 0);
        np->s.lasttime=0;
        sym_timer (np);
 
index 011c3369082c6f19772e227989cd7eb6b335179a..a355d989b414f9059abe8e04bad9bbf6d2459c34 100644 (file)
@@ -6559,12 +6559,15 @@ static int ufshcd_config_vreg(struct device *dev,
                struct ufs_vreg *vreg, bool on)
 {
        int ret = 0;
-       struct regulator *reg = vreg->reg;
-       const char *name = vreg->name;
+       struct regulator *reg;
+       const char *name;
        int min_uV, uA_load;
 
        BUG_ON(!vreg);
 
+       reg = vreg->reg;
+       name = vreg->name;
+
        if (regulator_count_voltages(reg) > 0) {
                min_uV = on ? vreg->min_uV : 0;
                ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
index 89f4cf507be6819d468f84e438cf0c237ee8f257..f2d8c3c53ea44a1d3463f1d603d2ea17e7cb207d 100644 (file)
@@ -20,8 +20,8 @@
 #define AO_SEC_SOCINFO_OFFSET  AO_SEC_SD_CFG8
 
 #define SOCINFO_MAJOR  GENMASK(31, 24)
-#define SOCINFO_MINOR  GENMASK(23, 16)
-#define SOCINFO_PACK   GENMASK(15, 8)
+#define SOCINFO_PACK   GENMASK(23, 16)
+#define SOCINFO_MINOR  GENMASK(15, 8)
 #define SOCINFO_MISC   GENMASK(7, 0)
 
 static const struct meson_gx_soc_id {
index 77fe55ce790c61a8835c4e2338a36be43dafcbac..d65345312527ce450b539964aa0465e1e6787b44 100644 (file)
@@ -79,6 +79,7 @@
 #define A3700_SPI_BYTE_LEN             BIT(5)
 #define A3700_SPI_CLK_PRESCALE         BIT(0)
 #define A3700_SPI_CLK_PRESCALE_MASK    (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS                (0x10)
 
 #define A3700_SPI_WFIFO_THRS_BIT       28
 #define A3700_SPI_RFIFO_THRS_BIT       24
@@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
 
        prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
 
+       /* For prescaler values over 15, we can only set it by steps of 2.
+        * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+        * 30. We only use this range from 16 to 30.
+        */
+       if (prescale > 15)
+               prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
        val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
        val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
 
index f95da364c2832b0142158e12c97648aab81b7165..66947097102370d0f54ba2559abddab1ac812a5d 100644 (file)
@@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        /* reset the hardware and block queue progress */
-       spin_lock_irq(&as->lock);
        if (as->use_dma) {
                atmel_spi_stop_dma(master);
                atmel_spi_release_dma(master);
        }
 
+       spin_lock_irq(&as->lock);
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        spi_readl(as, SR);
index 2ce875764ca646a2bdfb803cae33465ab8fa1786..0835a8d88fb8f85ab5ae44a4aa74d94121d19d87 100644 (file)
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
        /* Sets SPCMD */
        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
-       /* Enables SPI function in master mode */
-       rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+       /* Sets RSPI mode */
+       rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 
        return 0;
 }
index c5cd635c28f388bec2cfd47b9a6c6c9dcec9e046..41410031f8e99e6a1d54b8f94990df0133356ced 100644 (file)
@@ -525,7 +525,7 @@ err_free_master:
 
 static int sun4i_spi_remove(struct platform_device *pdev)
 {
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_force_suspend(&pdev->dev);
 
        return 0;
 }
index bc7100b93dfcf0c24213f479f9a5fffc41666315..e0b9fe1d0e37d98a7243ca35a56b1d62e024e8b3 100644 (file)
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
        while (remaining_words) {
                int n_words, tx_words, rx_words;
                u32 sr;
+               int stalled;
 
                n_words = min(remaining_words, xspi->buffer_size);
 
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 
                /* Read out all the data from the Rx FIFO */
                rx_words = n_words;
+               stalled = 10;
                while (rx_words) {
+                       if (rx_words == n_words && !(stalled--) &&
+                           !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+                           (sr & XSPI_SR_RX_EMPTY_MASK)) {
+                               dev_err(&spi->dev,
+                                       "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+                               xspi_init_hw(xspi);
+                               return -EIO;
+                       }
+
                        if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
                                xilinx_spi_rx(xspi);
                                rx_words--;
index d79090ed7f9c71ee5bcbed31e27a05a5da47bb83..2035835b62dcd797e41209acdf079f0daf9f4f8f 100644 (file)
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
        struct device *dev = drvdata_to_dev(ctx->drvdata);
        struct ahash_req_ctx *state = ahash_request_ctx(req);
        u32 tmp;
-       int rc;
+       int rc = 0;
 
        memcpy(&tmp, in, sizeof(u32));
        if (tmp != CC_EXPORT_MAGIC) {
@@ -1778,9 +1778,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
        }
        in += sizeof(u32);
 
-       rc = ssi_hash_init(state, ctx);
-       if (rc)
-               goto out;
+       /* call init() to allocate bufs if the user hasn't */
+       if (!state->digest_buff) {
+               rc = ssi_hash_init(state, ctx);
+               if (rc)
+                       goto out;
+       }
 
        dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
                                ctx->inter_digestsize, DMA_BIDIRECTIONAL);
index 2d62a8c5733241738f68bd87a53247f2059dbdf8..ae6ed96d7874c4ce34d7ce3574b9eded74dbb625 100644 (file)
@@ -361,3 +361,8 @@ static struct comedi_driver ni_atmio_driver = {
        .detach         = ni_atmio_detach,
 };
 module_comedi_driver(ni_atmio_driver);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
+
index 609332b3e15b5415b589eb099620d4961f189dc0..c462b1c046cd4709bdf5b4b6d93081966c197490 100644 (file)
@@ -293,9 +293,9 @@ static void gb_operation_work(struct work_struct *work)
        gb_operation_put(operation);
 }
 
-static void gb_operation_timeout(unsigned long arg)
+static void gb_operation_timeout(struct timer_list *t)
 {
-       struct gb_operation *operation = (void *)arg;
+       struct gb_operation *operation = from_timer(operation, t, timer);
 
        if (gb_operation_result_set(operation, -ETIMEDOUT)) {
                /*
@@ -540,8 +540,7 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
                        goto err_request;
                }
 
-               setup_timer(&operation->timer, gb_operation_timeout,
-                           (unsigned long)operation);
+               timer_setup(&operation->timer, gb_operation_timeout, 0);
        }
 
        operation->flags = op_flags;
index a6635f0afae9269501ff7ceee11b8459a8adf61d..6dab15f5dae1b99b33559e9914400d015cef5baf 100644 (file)
@@ -75,7 +75,7 @@ struct lap_cb;
 static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
                                    void (*callback)(struct timer_list *))
 {
-       ptimer->function = (TIMER_FUNC_TYPE) callback;
+       ptimer->function =  callback;
 
        /* Set new value for timer (update or add timer).
         * We use mod_timer() because it's more efficient and also
index 539a26444f3181279df5803a26c1e2e59c165067..7d49d4865298a304bff9dc11d9a1684621aead10 100644 (file)
@@ -71,16 +71,12 @@ lnet_sock_ioctl(int cmd, unsigned long arg)
        }
 
        sock_filp = sock_alloc_file(sock, 0, NULL);
-       if (IS_ERR(sock_filp)) {
-               sock_release(sock);
-               rc = PTR_ERR(sock_filp);
-               goto out;
-       }
+       if (IS_ERR(sock_filp))
+               return PTR_ERR(sock_filp);
 
        rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg);
 
        fput(sock_filp);
-out:
        return rc;
 }
 
index 3c83aa31e2c208426e3396ba9d02a2ac3f273f3f..5a5d1811ffbeac436dd02fd8fc5700569ae5ef25 100644 (file)
@@ -700,9 +700,9 @@ lnet_delay_rule_daemon(void *arg)
 }
 
 static void
-delay_timer_cb(unsigned long arg)
+delay_timer_cb(struct timer_list *t)
 {
-       struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+       struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer);
 
        spin_lock_bh(&delay_dd.dd_lock);
        if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
@@ -762,7 +762,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
                wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
        }
 
-       setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
+       timer_setup(&rule->dl_timer, delay_timer_cb, 0);
 
        spin_lock_init(&rule->dl_lock);
        INIT_LIST_HEAD(&rule->dl_msg_list);
index 2d6e64dea2660228855afd4ad0491c4a63e01b91..938b859b6650b2e4b48ebad34d75081a3595bd2e 100644 (file)
@@ -1016,7 +1016,7 @@ static bool file_is_noatime(const struct file *file)
        if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
                return true;
 
-       if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+       if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
                return true;
 
        return false;
index 65ac5128f0057468c8bada13282313005a566794..8666f1e81ade7ad24e2e60760ac03aa4c1d1dad0 100644 (file)
@@ -313,11 +313,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        }
 
        if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
                sbi->ll_flags |= LL_SBI_ACL;
        } else {
                LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
-               sb->s_flags &= ~MS_POSIXACL;
+               sb->s_flags &= ~SB_POSIXACL;
                sbi->ll_flags &= ~LL_SBI_ACL;
        }
 
@@ -660,7 +660,7 @@ void ll_kill_super(struct super_block *sb)
        struct ll_sb_info *sbi;
 
        /* not init sb ?*/
-       if (!(sb->s_flags & MS_ACTIVE))
+       if (!(sb->s_flags & SB_ACTIVE))
                return;
 
        sbi = ll_s2sbi(sb);
@@ -2039,8 +2039,8 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
        int err;
        __u32 read_only;
 
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
-               read_only = *flags & MS_RDONLY;
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+               read_only = *flags & SB_RDONLY;
                err = obd_set_info_async(NULL, sbi->ll_md_exp,
                                         sizeof(KEY_READ_ONLY),
                                         KEY_READ_ONLY, sizeof(read_only),
@@ -2053,9 +2053,9 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
                }
 
                if (read_only)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                else
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
 
                if (sbi->ll_flags & LL_SBI_VERBOSE)
                        LCONSOLE_WARN("Remounted %s %s\n", profilenm,
index 23cdb7c4476c9480b37e30a1ea2549724b51d319..63be6e7273f3548343c4b30991172a152599e90d 100644 (file)
@@ -329,11 +329,11 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
        return -1;
 }
 
-static void ptlrpc_at_timer(unsigned long castmeharder)
+static void ptlrpc_at_timer(struct timer_list *t)
 {
        struct ptlrpc_service_part *svcpt;
 
-       svcpt = (struct ptlrpc_service_part *)castmeharder;
+       svcpt = from_timer(svcpt, t, scp_at_timer);
 
        svcpt->scp_at_check = 1;
        svcpt->scp_at_checktime = cfs_time_current();
@@ -506,8 +506,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        if (!array->paa_reqs_count)
                goto free_reqs_array;
 
-       setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
-                   (unsigned long)svcpt);
+       timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0);
 
        /* At SOW, service time should be quick; 10s seems generous. If client
         * timeout is less than this, we'll be sending an early reply.
@@ -926,7 +925,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
        next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
                       at_early_margin);
        if (next <= 0) {
-               ptlrpc_at_timer((unsigned long)svcpt);
+               ptlrpc_at_timer(&svcpt->scp_at_timer);
        } else {
                mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
                CDEBUG(D_INFO, "armed %s at %+ds\n",
index b5533197226d4179f7d7c757167d60a4912fbcd3..15fa5679bae7d168d91a24e548a6873748c84720 100644 (file)
@@ -208,14 +208,14 @@ struct atomisp_dis_vector {
 };
 
 
-/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
  *  arrays that contain the coeffients for each type.
  */
 struct atomisp_dvs2_coef_types {
-       short __user *odd_real; /**< real part of the odd coefficients*/
-       short __user *odd_imag; /**< imaginary part of the odd coefficients*/
-       short __user *even_real;/**< real part of the even coefficients*/
-       short __user *even_imag;/**< imaginary part of the even coefficients*/
+       short __user *odd_real; /** real part of the odd coefficients*/
+       short __user *odd_imag; /** imaginary part of the odd coefficients*/
+       short __user *even_real;/** real part of the even coefficients*/
+       short __user *even_imag;/** imaginary part of the even coefficients*/
 };
 
 /*
@@ -223,10 +223,10 @@ struct atomisp_dvs2_coef_types {
  * arrays that contain the statistics for each type.
  */
 struct atomisp_dvs2_stat_types {
-       int __user *odd_real; /**< real part of the odd statistics*/
-       int __user *odd_imag; /**< imaginary part of the odd statistics*/
-       int __user *even_real;/**< real part of the even statistics*/
-       int __user *even_imag;/**< imaginary part of the even statistics*/
+       int __user *odd_real; /** real part of the odd statistics*/
+       int __user *odd_imag; /** imaginary part of the odd statistics*/
+       int __user *even_real;/** real part of the even statistics*/
+       int __user *even_imag;/** imaginary part of the even statistics*/
 };
 
 struct atomisp_dis_coefficients {
@@ -390,16 +390,16 @@ struct atomisp_metadata_config {
  * Generic resolution structure.
  */
 struct atomisp_resolution {
-       uint32_t width;  /**< Width */
-       uint32_t height; /**< Height */
+       uint32_t width;  /** Width */
+       uint32_t height; /** Height */
 };
 
 /*
  * This specifies the coordinates (x,y)
  */
 struct atomisp_zoom_point {
-       int32_t x; /**< x coordinate */
-       int32_t y; /**< y coordinate */
+       int32_t x; /** x coordinate */
+       int32_t y; /** y coordinate */
 };
 
 /*
@@ -411,9 +411,9 @@ struct atomisp_zoom_region {
 };
 
 struct atomisp_dz_config {
-       uint32_t dx; /**< Horizontal zoom factor */
-       uint32_t dy; /**< Vertical zoom factor */
-       struct atomisp_zoom_region zoom_region; /**< region for zoom */
+       uint32_t dx; /** Horizontal zoom factor */
+       uint32_t dy; /** Vertical zoom factor */
+       struct atomisp_zoom_region zoom_region; /** region for zoom */
 };
 
 struct atomisp_parm {
@@ -758,7 +758,7 @@ enum atomisp_acc_arg_type {
        ATOMISP_ACC_ARG_FRAME        /* Frame argument */
 };
 
-/** ISP memories, isp2400 */
+/* ISP memories, isp2400 */
 enum atomisp_acc_memory {
        ATOMISP_ACC_MEMORY_PMEM0 = 0,
        ATOMISP_ACC_MEMORY_DMEM0,
index 8a18c528cad4c73f6d387ae04a2e850059a600a9..debf0e3853ffa6bc213d9e6b8d3e058d71336914 100644 (file)
@@ -5187,7 +5187,7 @@ int get_frame_info_nop(struct atomisp_sub_device *asd,
        return 0;
 }
 
-/**
+/*
  * Resets CSS parameters that depend on input resolution.
  *
  * Update params like CSS RAW binning, 2ppc mode and pp_input
index 6e87aa5aab4c62c184ac1bece52649e4cf1e0b73..b7f9da014641c14f91d24b84750c76cb4b843d01 100644 (file)
@@ -4051,7 +4051,7 @@ int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
 int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
                                        unsigned int *zoom)
 {
-       struct ia_css_dz_config dz_config;  /**< Digital Zoom */
+       struct ia_css_dz_config dz_config;  /** Digital Zoom */
        struct ia_css_isp_config isp_config;
        struct atomisp_device *isp = asd->isp;
 
index 685da0f48bab36fc2ca2fd52ae87d1831f5180d8..95669eedaad1121ddba9885e425cf9d3c65c781f 100644 (file)
@@ -28,17 +28,17 @@ struct atomisp_histogram32 {
 };
 
 struct atomisp_dvs2_stat_types32 {
-       compat_uptr_t odd_real; /**< real part of the odd statistics*/
-       compat_uptr_t odd_imag; /**< imaginary part of the odd statistics*/
-       compat_uptr_t even_real;/**< real part of the even statistics*/
-       compat_uptr_t even_imag;/**< imaginary part of the even statistics*/
+       compat_uptr_t odd_real; /** real part of the odd statistics*/
+       compat_uptr_t odd_imag; /** imaginary part of the odd statistics*/
+       compat_uptr_t even_real;/** real part of the even statistics*/
+       compat_uptr_t even_imag;/** imaginary part of the even statistics*/
 };
 
 struct atomisp_dvs2_coef_types32 {
-       compat_uptr_t odd_real; /**< real part of the odd coefficients*/
-       compat_uptr_t odd_imag; /**< imaginary part of the odd coefficients*/
-       compat_uptr_t even_real;/**< real part of the even coefficients*/
-       compat_uptr_t even_imag;/**< imaginary part of the even coefficients*/
+       compat_uptr_t odd_real; /** real part of the odd coefficients*/
+       compat_uptr_t odd_imag; /** imaginary part of the odd coefficients*/
+       compat_uptr_t even_real;/** real part of the even coefficients*/
+       compat_uptr_t even_imag;/** imaginary part of the even coefficients*/
 };
 
 struct atomisp_dvs2_statistics32 {
index f3d61827ae8c034b034f08f69c886d9e5d8231a2..c3eba675da065d8833495b310f22f5dcc6805ca9 100644 (file)
@@ -223,7 +223,7 @@ struct atomisp_subdev_params {
 
        bool dis_proj_data_valid;
 
-       struct ia_css_dz_config   dz_config;  /**< Digital Zoom */
+       struct ia_css_dz_config   dz_config;  /** Digital Zoom */
        struct ia_css_capture_config   capture_config;
 
        struct atomisp_css_isp_config config;
index 19bae1610fb66099dd56c323c8a9eaed794cb7f3..050d60f0894f7641e3307a6e73997ad824dfcf72 100644 (file)
@@ -21,7 +21,7 @@
  * Forward declarations.
  *
  **********************************************************************/
-/**
+/*
  * @brief Read the oldest element from the circular buffer.
  * Read the oldest element WITHOUT checking whehter the
  * circular buffer is empty or not. The oldest element is
@@ -34,7 +34,7 @@
 static inline ia_css_circbuf_elem_t
 ia_css_circbuf_read(ia_css_circbuf_t *cb);
 
-/**
+/*
  * @brief Shift a chunk of elements in the circular buffer.
  * A chunk of elements (i.e. the ones from the "start" position
  * to the "chunk_src" position) are shifted in the circular buffer,
@@ -48,7 +48,7 @@ static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
                                                   uint32_t chunk_src,
                                                   uint32_t chunk_dest);
 
-/**
+/*
  * @brief Get the "val" field in the element.
  *
  * @param elem The pointer to the element.
@@ -63,7 +63,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem);
  * Non-inline functions.
  *
  **********************************************************************/
-/**
+/*
  * @brief Create the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -88,7 +88,7 @@ ia_css_circbuf_create(ia_css_circbuf_t *cb,
        cb->elems = elems;
 }
 
-/**
+/*
  * @brief Destroy the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -99,7 +99,7 @@ void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
        cb->elems = NULL;
 }
 
-/**
+/*
  * @brief Pop a value out of the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -116,7 +116,7 @@ uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
        return ret;
 }
 
-/**
+/*
  * @brief Extract a value out of the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -166,7 +166,7 @@ uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
        return val;
 }
 
-/**
+/*
  * @brief Peek an element from the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -180,7 +180,7 @@ uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
        return cb->elems[pos].val;
 }
 
-/**
+/*
  * @brief Get the value of an element from the circular buffer.
  * Refer to "ia_css_circbuf.h" for details.
  */
@@ -194,7 +194,7 @@ uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
        return cb->elems[pos].val;
 }
 
-/** @brief increase size of a circular buffer.
+/* @brief increase size of a circular buffer.
  * Use 'CAUTION' before using this function. This was added to
  * support / fix issue with increasing size for tagger only
  * Please refer to "ia_css_circbuf.h" for details.
@@ -252,7 +252,7 @@ bool ia_css_circbuf_increase_size(
  * Inline functions.
  *
  ****************************************************************/
-/**
+/*
  * @brief Get the "val" field in the element.
  * Refer to "Forward declarations" for details.
  */
@@ -262,7 +262,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem)
        return elem->val;
 }
 
-/**
+/*
  * @brief Read the oldest element from the circular buffer.
  * Refer to "Forward declarations" for details.
  */
@@ -282,7 +282,7 @@ ia_css_circbuf_read(ia_css_circbuf_t *cb)
        return elem;
 }
 
-/**
+/*
  * @brief Shift a chunk of elements in the circular buffer.
  * Refer to "Forward declarations" for details.
  */
index 616789d9b3f644086b1bf142f4ae2bc68d4416dc..a6d650a9a1f4232042d6d0594dec6c7bf1cc2c7f 100644 (file)
@@ -19,7 +19,7 @@
 #include <ia_css_frame_public.h>       /* ia_css_frame_info */
 #include <ia_css_binary.h>             /* ia_css_binary_descr */
 
-/** @brief Get a binary descriptor for copy.
+/* @brief Get a binary descriptor for copy.
  *
  * @param[in] pipe
  * @param[out] copy_desc
@@ -36,7 +36,7 @@ extern void ia_css_pipe_get_copy_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for vfpp.
+/* @brief Get a binary descriptor for vfpp.
  *
  * @param[in] pipe
  * @param[out] vfpp_descr
@@ -51,7 +51,7 @@ extern void ia_css_pipe_get_vfpp_binarydesc(
                struct ia_css_frame_info *in_info,
                struct ia_css_frame_info *out_info);
 
-/** @brief Get numerator and denominator of bayer downscaling factor.
+/* @brief Get numerator and denominator of bayer downscaling factor.
  *
  * @param[in] bds_factor: The bayer downscaling factor.
  *             (= The bds_factor member in the sh_css_bds_factor structure.)
@@ -67,7 +67,7 @@ extern enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
        unsigned int *bds_factor_numerator,
        unsigned int *bds_factor_denominator);
 
-/** @brief Get a binary descriptor for preview stage.
+/* @brief Get a binary descriptor for preview stage.
  *
  * @param[in] pipe
  * @param[out] preview_descr
@@ -86,7 +86,7 @@ extern enum ia_css_err ia_css_pipe_get_preview_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for video stage.
+/* @brief Get a binary descriptor for video stage.
  *
  * @param[in/out] pipe
  * @param[out] video_descr
@@ -105,7 +105,7 @@ extern enum ia_css_err ia_css_pipe_get_video_binarydesc(
        struct ia_css_frame_info *vf_info,
        int stream_config_left_padding);
 
-/** @brief Get a binary descriptor for yuv scaler stage.
+/* @brief Get a binary descriptor for yuv scaler stage.
  *
  * @param[in/out] pipe
  * @param[out] yuv_scaler_descr
@@ -124,7 +124,7 @@ void ia_css_pipe_get_yuvscaler_binarydesc(
        struct ia_css_frame_info *internal_out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for capture pp stage.
+/* @brief Get a binary descriptor for capture pp stage.
  *
  * @param[in/out] pipe
  * @param[out] capture_pp_descr
@@ -140,7 +140,7 @@ extern void ia_css_pipe_get_capturepp_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for primary capture.
+/* @brief Get a binary descriptor for primary capture.
  *
  * @param[in] pipe
  * @param[out] prim_descr
@@ -158,7 +158,7 @@ extern void ia_css_pipe_get_primary_binarydesc(
        struct ia_css_frame_info *vf_info,
        unsigned int stage_idx);
 
-/** @brief Get a binary descriptor for pre gdc stage.
+/* @brief Get a binary descriptor for pre gdc stage.
  *
  * @param[in] pipe
  * @param[out] pre_gdc_descr
@@ -173,7 +173,7 @@ extern void ia_css_pipe_get_pre_gdc_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for gdc stage.
+/* @brief Get a binary descriptor for gdc stage.
  *
  * @param[in] pipe
  * @param[out] gdc_descr
@@ -188,7 +188,7 @@ extern void ia_css_pipe_get_gdc_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for post gdc.
+/* @brief Get a binary descriptor for post gdc.
  *
  * @param[in] pipe
  * @param[out] post_gdc_descr
@@ -205,7 +205,7 @@ extern void ia_css_pipe_get_post_gdc_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for de.
+/* @brief Get a binary descriptor for de.
  *
  * @param[in] pipe
  * @param[out] pre_de_descr
@@ -220,7 +220,7 @@ extern void ia_css_pipe_get_pre_de_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for pre anr stage.
+/* @brief Get a binary descriptor for pre anr stage.
  *
  * @param[in] pipe
  * @param[out] pre_anr_descr
@@ -235,7 +235,7 @@ extern void ia_css_pipe_get_pre_anr_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for ANR stage.
+/* @brief Get a binary descriptor for ANR stage.
  *
  * @param[in] pipe
  * @param[out] anr_descr
@@ -250,7 +250,7 @@ extern void ia_css_pipe_get_anr_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Get a binary descriptor for post anr stage.
+/* @brief Get a binary descriptor for post anr stage.
  *
  * @param[in] pipe
  * @param[out] post_anr_descr
@@ -267,7 +267,7 @@ extern void ia_css_pipe_get_post_anr_binarydesc(
        struct ia_css_frame_info *out_info,
        struct ia_css_frame_info *vf_info);
 
-/** @brief Get a binary descriptor for ldc stage.
+/* @brief Get a binary descriptor for ldc stage.
  *
  * @param[in/out] pipe
  * @param[out] capture_pp_descr
@@ -282,7 +282,7 @@ extern void ia_css_pipe_get_ldc_binarydesc(
        struct ia_css_frame_info *in_info,
        struct ia_css_frame_info *out_info);
 
-/** @brief Calculates the required BDS factor
+/* @brief Calculates the required BDS factor
  *
  * @param[in] input_res
  * @param[in] output_res
index ba8858759b300d03e281da379253ff2edb2815a6..155b6fb4722bafb5d49d5905ea137f46be47ef29 100644 (file)
@@ -18,7 +18,7 @@
 #include <ia_css_types.h>
 #include <ia_css_frame_public.h>
 
-/** @brief Get Input format bits per pixel based on stream configuration of this
+/* @brief Get Input format bits per pixel based on stream configuration of this
  * pipe.
  *
  * @param[in] pipe
index f8b2e458f87669646cd5d75c91454fded7fbc9f7..a8c27676a38b353a67853dceecea81c737601390 100644 (file)
@@ -22,7 +22,7 @@
 #include <ia_css_stream_public.h>
 #include <ia_css_stream_format.h>
 
-/** @brief convert "errno" error code to "ia_css_err" error code
+/* @brief convert "errno" error code to "ia_css_err" error code
  *
  * @param[in]  "errno" error code
  * @return     "ia_css_err" error code
@@ -31,7 +31,7 @@
 enum ia_css_err ia_css_convert_errno(
        int in_err);
 
-/** @brief check vf frame info.
+/* @brief check vf frame info.
  *
  * @param[in] info
  * @return     IA_CSS_SUCCESS or error code upon error.
@@ -40,7 +40,7 @@ enum ia_css_err ia_css_convert_errno(
 extern enum ia_css_err ia_css_util_check_vf_info(
        const struct ia_css_frame_info * const info);
 
-/** @brief check input configuration.
+/* @brief check input configuration.
  *
  * @param[in] stream_config
  * @param[in] must_be_raw
@@ -52,7 +52,7 @@ extern enum ia_css_err ia_css_util_check_input(
        bool must_be_raw,
        bool must_be_yuv);
 
-/** @brief check vf and out frame info.
+/* @brief check vf and out frame info.
  *
  * @param[in] out_info
  * @param[in] vf_info
@@ -63,7 +63,7 @@ extern enum ia_css_err ia_css_util_check_vf_out_info(
        const struct ia_css_frame_info * const out_info,
        const struct ia_css_frame_info * const vf_info);
 
-/** @brief check width and height
+/* @brief check width and height
  *
  * @param[in] width
  * @param[in] height
@@ -75,7 +75,7 @@ extern enum ia_css_err ia_css_util_check_res(
        unsigned int height);
 
 #ifdef ISP2401
-/** @brief compare resolutions (less or equal)
+/* @brief compare resolutions (less or equal)
  *
  * @param[in] a resolution
  * @param[in] b resolution
@@ -108,7 +108,7 @@ extern bool ia_css_util_resolution_is_even(
                const struct ia_css_resolution resolution);
 
 #endif
-/** @brief check width and height
+/* @brief check width and height
  *
  * @param[in] stream_format
  * @param[in] two_ppc
@@ -119,7 +119,7 @@ extern unsigned int ia_css_util_input_format_bpp(
        enum ia_css_stream_format stream_format,
        bool two_ppc);
 
-/** @brief check if input format it raw
+/* @brief check if input format it raw
  *
  * @param[in] stream_format
  * @return true if the input format is raw or false otherwise
@@ -128,7 +128,7 @@ extern unsigned int ia_css_util_input_format_bpp(
 extern bool ia_css_util_is_input_format_raw(
        enum ia_css_stream_format stream_format);
 
-/** @brief check if input format it yuv
+/* @brief check if input format it yuv
  *
  * @param[in] stream_format
  * @return true if the input format is yuv or false otherwise
index 6720ab55d6f5bc3e84fc62ae00cd859d738c1c82..9c0cb4a63862e0f08b7f0db7bf8e6de815e6ff3f 100644 (file)
@@ -277,6 +277,6 @@ static inline void csi_rx_be_ctrl_reg_store(
 
        ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 
 #endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */
index 470c92d287fe550150c4027486ee00f5f1dc7e2d..4d07c2fe14696d344d7dc60091bb810531e83c92 100644 (file)
@@ -192,7 +192,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state(
                ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i, state->proc_state[i].isp_sync_state);
        }
 }
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -227,7 +227,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store(
 
        ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 
 
 #endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */
index 14d1d3b627a9dc6d8b10dfe109d43d6d31ec4c54..842ae340ae138b4d03cfe36e3e307db22d55f657 100644 (file)
@@ -26,7 +26,7 @@
 #include "isys_irq_private.h"
 #endif
 
-/** Public interface */
+/* Public interface */
 STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable(
        const isys_irq_ID_t     isys_irqc_id)
 {
index c17ce131c9e1c530fa405ba9e3ff6c128d7509f7..e69f39893bd2eab56fc61c9ffb5d7a7be311c714 100644 (file)
@@ -59,7 +59,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
                state->status, state->edge, state->mask, state->enable, state->level_no);
 }
 
-/** end of NCI */
+/* end of NCI */
 
 /* -------------------------------------------------------+
  |              Device level interface (DLI)              |
@@ -101,7 +101,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
        return value;
 }
 
-/** end of DLI */
+/* end of DLI */
 
 #endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
 
index 1603a09b621ad7c9e6d4532971e82d3e1f36182f..f946105ddf43acbeb4e97c09a5d1e898369ad9a3 100644 (file)
@@ -122,7 +122,7 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state(
                stream2mmio_print_sid_state(&(state->sid_state[i]));
        }
 }
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -163,6 +163,6 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store(
        ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] +
                reg * sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 
 #endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */
index 3f34b508f0bfe9178f401a6fd29622f83a4522fe..c5bf540eadf1120084237819688009428155ff1e 100644 (file)
@@ -160,5 +160,5 @@ STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
 
        ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
 }
-/** end of DLI */
+/* end of DLI */
 #endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
index e7a734a9fc43cc42d13f35d135c100693a19e532..1be5c6956d65b27407635795b99b8d576d5dbe7b 100644 (file)
@@ -46,7 +46,7 @@ struct isys2401_dma_port_cfg_s {
        uint32_t cropping;
        uint32_t width;
  };
-/** end of DMA Port */
+/* end of DMA Port */
 
 /************************************************
  *
@@ -79,7 +79,7 @@ struct isys2401_dma_cfg_s {
        isys2401_dma_extension  extension;
        uint32_t                height;
 };
-/** end of DMA Device */
+/* end of DMA Device */
 
 /* isys2401_dma_channel limits per DMA ID */
 extern const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID];
index 216813e42a0aa5bdc9b92435462f9dccb3f69c3d..0bf2feb8bbfb420f890f4eadf69c7ab9f188ef79 100644 (file)
@@ -86,6 +86,6 @@ struct pixelgen_prbs_cfg_s {
        sync_generator_cfg_t    sync_gen_cfg;
 };
 
-/** end of Pixel-generator: TPG. ("pixelgen_global.h") */
+/* end of Pixel-generator: TPG. ("pixelgen_global.h") */
 #endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */
 
index 9f7ecac4627361871ac27524aca54ed7ecf1e58e..d2e3a2deea2ecf373b4a05a2584bec76855e059a 100644 (file)
@@ -331,7 +331,7 @@ typedef enum {
        IBUF_CTRL2_ID,          /* map ISYS2401_IBUF_CNTRL_C */
        N_IBUF_CTRL_ID
 } ibuf_ctrl_ID_t;
-/** end of Input-buffer Controller */
+/* end of Input-buffer Controller */
 
 /*
  * Stream2MMIO.
@@ -364,7 +364,7 @@ typedef enum {
        STREAM2MMIO_SID7_ID,
        N_STREAM2MMIO_SID_ID
 } stream2mmio_sid_ID_t;
-/** end of Stream2MMIO */
+/* end of Stream2MMIO */
 
 /**
  * Input System 2401: CSI-MIPI recevier.
@@ -390,7 +390,7 @@ typedef enum {
        CSI_RX_DLANE3_ID,               /* map to DLANE3 in CSI RX */
        N_CSI_RX_DLANE_ID
 } csi_rx_fe_dlane_ID_t;
-/** end of CSI-MIPI receiver */
+/* end of CSI-MIPI receiver */
 
 typedef enum {
        ISYS2401_DMA0_ID = 0,
@@ -406,7 +406,7 @@ typedef enum {
        PIXELGEN2_ID,
        N_PIXELGEN_ID
 } pixelgen_ID_t;
-/** end of pixel-generator. ("system_global.h") */
+/* end of pixel-generator. ("system_global.h") */
 
 typedef enum {
        INPUT_SYSTEM_CSI_PORT0_ID = 0,
index 1f6a55ff5db88c77f827fd238aaeca47ec7576d5..efcd6e1679e8f5d4a0868505aa54d3208dd461ca 100644 (file)
@@ -31,7 +31,7 @@ more details.
 #ifndef __CSS_API_VERSION_H
 #define __CSS_API_VERSION_H
 
-/** @file
+/* @file
  * CSS API version file. This file contains the version number of the CSS-API.
  *
  * This file is generated from a set of input files describing the CSS-API
index 5a4eabf79ee238db51b16e51d88a0e259fab91e9..bcfd443f52022100158da33ed55c1ecc0d37dc0e 100644 (file)
@@ -21,7 +21,7 @@
 #endif /* __INLINE_GP_TIMER__ */
 #include "system_local.h"
 
-/** FIXME: not sure if reg_load(), reg_store() should be API.
+/* FIXME: not sure if reg_load(), reg_store() should be API.
  */
 static uint32_t
 gp_timer_reg_load(uint32_t reg);
index 3b5df85fc51082fc37428fc9de6ad2becb2df8d4..426d022d3a2604ad3c32712d0f5114d696f19de5 100644 (file)
@@ -73,7 +73,7 @@ extern void csi_rx_be_ctrl_get_state(
 extern void csi_rx_be_ctrl_dump_state(
                const csi_rx_backend_ID_t ID,
                csi_rx_be_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -130,6 +130,6 @@ extern void csi_rx_be_ctrl_reg_store(
        const csi_rx_backend_ID_t ID,
        const hrt_address reg,
        const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 #endif /* USE_INPUT_SYSTEM_VERSION_2401 */
 #endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
index 1ac0e64e539cb30154c62d5785c16ac2a108d477..98ee9947fb8ef513c41b13809c110c98a0b8a531 100644 (file)
@@ -54,7 +54,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state(
 STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state(
                const ibuf_ctrl_ID_t ID,
                ibuf_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -87,7 +87,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store(
        const ibuf_ctrl_ID_t ID,
        const hrt_address reg,
        const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 
 #endif /* USE_INPUT_SYSTEM_VERSION_2401 */
 #endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */
index a025ad562bd2a15f8dda642a4a70edb6f768c0a3..0d978e5911c0596666804181af9ec5d9b04145ef 100644 (file)
@@ -49,7 +49,7 @@
 
 /* Arithmetic */
 
-/** @brief bitwise AND
+/* @brief bitwise AND
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -63,7 +63,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_and(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bitwise OR
+/* @brief bitwise OR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -77,7 +77,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_or(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bitwise XOR
+/* @brief bitwise XOR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -91,7 +91,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_xor(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bitwise inverse
+/* @brief bitwise inverse
  *
  * @param[in] _a       first argument
  *
@@ -105,7 +105,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_inv(
 
 /* Additive */
 
-/** @brief addition
+/* @brief addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -120,7 +120,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_add(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief subtraction
+/* @brief subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -135,7 +135,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_sub(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturated addition
+/* @brief saturated addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -150,7 +150,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_addsat(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturated subtraction
+/* @brief saturated subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -166,7 +166,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subsat(
     const tvector1w     _b);
 
 #ifdef ISP2401
-/** @brief Unsigned saturated subtraction
+/* @brief Unsigned saturated subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -182,7 +182,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_subsat_u(
     const tvector1w_unsigned _b);
 
 #endif
-/** @brief subtraction with shift right and rounding
+/* @brief subtraction with shift right and rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -202,7 +202,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subasr1(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Subtraction with shift right and rounding
+/* @brief Subtraction with shift right and rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -217,7 +217,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalfrnd(
     const tvector1w    _a,
     const tvector1w    _b);
 
-/** @brief Subtraction with shift right and no rounding
+/* @brief Subtraction with shift right and no rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -233,7 +233,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
     const tvector1w    _b);
 
 
-/** @brief saturated absolute value
+/* @brief saturated absolute value
  *
  * @param[in] _a       input
  *
@@ -247,7 +247,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
 STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_abs(
     const tvector1w     _a);
 
-/** @brief saturated absolute difference
+/* @brief saturated absolute difference
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -264,7 +264,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subabssat(
 
 /* Multiplicative */
 
-/** @brief doubling multiply
+/* @brief doubling multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -281,7 +281,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_muld(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief integer multiply
+/* @brief integer multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -298,7 +298,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mul(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief fractional saturating multiply
+/* @brief fractional saturating multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -316,7 +316,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qmul(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief fractional saturating multiply with rounding
+/* @brief fractional saturating multiply with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -337,7 +337,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qrmul(
 
 /* Comparative */
 
-/** @brief equal
+/* @brief equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -351,7 +351,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_eq(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief not equal
+/* @brief not equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -365,7 +365,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ne(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief less or equal
+/* @brief less or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -379,7 +379,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_le(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief less then
+/* @brief less then
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -393,7 +393,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_lt(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief greater or equal
+/* @brief greater or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -407,7 +407,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ge(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief greater than
+/* @brief greater than
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -423,7 +423,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_gt(
 
 /* Shift */
 
-/** @brief aritmetic shift right
+/* @brief aritmetic shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -441,7 +441,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asr(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief aritmetic shift right with rounding
+/* @brief aritmetic shift right with rounding
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -460,7 +460,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asrrnd(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturating arithmetic shift left
+/* @brief saturating arithmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -480,7 +480,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asl(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -493,7 +493,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_aslsat(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief logical shift left
+/* @brief logical shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -510,7 +510,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsl(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief logical shift right
+/* @brief logical shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -528,7 +528,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsr(
     const tvector1w     _b);
 
 #ifdef ISP2401
-/** @brief bidirectional saturating arithmetic shift
+/* @brief bidirectional saturating arithmetic shift
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -546,7 +546,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift_sat(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief bidirectional non-saturating arithmetic shift
+/* @brief bidirectional non-saturating arithmetic shift
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -565,7 +565,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift(
     const tvector1w     _b);
 
 
-/** @brief bidirectional logical shift
+/* @brief bidirectional logical shift
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -588,7 +588,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
 #endif
 /* Cast */
 
-/** @brief Cast from int to 1w
+/* @brief Cast from int to 1w
  *
  * @param[in] _a       input
  *
@@ -601,7 +601,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
 STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
     const int           _a);
 
-/** @brief Cast from 1w to int
+/* @brief Cast from 1w to int
  *
  * @param[in] _a       input
  *
@@ -614,7 +614,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
 STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
     const tvector1w      _a);
 
-/** @brief Cast from 1w to 2w
+/* @brief Cast from 1w to 2w
  *
  * @param[in] _a       input
  *
@@ -627,7 +627,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
 STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_cast_to_2w(
     const tvector1w     _a);
 
-/** @brief Cast from 2w to 1w
+/* @brief Cast from 2w to 1w
  *
  * @param[in] _a       input
  *
@@ -641,7 +641,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_cast_to_1w(
     const tvector2w    _a);
 
 
-/** @brief Cast from 2w to 1w with saturation
+/* @brief Cast from 2w to 1w with saturation
  *
  * @param[in] _a       input
  *
@@ -657,7 +657,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_sat_cast_to_1w(
 
 /* clipping */
 
-/** @brief Clip asymmetrical
+/* @brief Clip asymmetrical
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -673,7 +673,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clip_asym(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Clip zero
+/* @brief Clip zero
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -691,7 +691,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clipz(
 
 /* division */
 
-/** @brief Truncated division
+/* @brief Truncated division
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -708,7 +708,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_div(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Fractional saturating divide
+/* @brief Fractional saturating divide
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -726,7 +726,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qdiv(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Modulo
+/* @brief Modulo
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -741,7 +741,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mod(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Unsigned integer Square root
+/* @brief Unsigned integer Square root
  *
  * @param[in] _a       input
  *
@@ -754,7 +754,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_sqrt_u(
 
 /* Miscellaneous */
 
-/** @brief Multiplexer
+/* @brief Multiplexer
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -770,7 +770,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mux(
     const tvector1w     _b,
     const tflags           _c);
 
-/** @brief Average without rounding
+/* @brief Average without rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -786,7 +786,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w  OP_1w_avg(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Average with rounding
+/* @brief Average with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -802,7 +802,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avgrnd(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Minimum
+/* @brief Minimum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -816,7 +816,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_min(
     const tvector1w     _a,
     const tvector1w     _b);
 
-/** @brief Maximum
+/* @brief Maximum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
index cf7e7314842dec46940ec2b354c1513789df3498..7575d260b83748b51aab51433bef85984763ccc1 100644 (file)
@@ -48,7 +48,7 @@
 
 /* Arithmetic */
 
-/** @brief bitwise AND
+/* @brief bitwise AND
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -62,7 +62,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_and(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief bitwise OR
+/* @brief bitwise OR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -76,7 +76,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_or(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief bitwise XOR
+/* @brief bitwise XOR
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -90,7 +90,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_xor(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief bitwise inverse
+/* @brief bitwise inverse
  *
  * @param[in] _a       first argument
  *
@@ -104,7 +104,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_inv(
 
 /* Additive */
 
-/** @brief addition
+/* @brief addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -119,7 +119,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_add(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief subtraction
+/* @brief subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -134,7 +134,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_sub(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturated addition
+/* @brief saturated addition
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -149,7 +149,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_addsat(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturated subtraction
+/* @brief saturated subtraction
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -164,7 +164,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subsat(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief subtraction with shift right and rounding
+/* @brief subtraction with shift right and rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -184,7 +184,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subasr1(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Subtraction with shift right and rounding
+/* @brief Subtraction with shift right and rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -199,7 +199,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalfrnd(
     const tvector2w    _a,
     const tvector2w    _b);
 
-/** @brief Subtraction with shift right and no rounding
+/* @brief Subtraction with shift right and no rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -214,7 +214,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
     const tvector2w    _a,
     const tvector2w    _b);
 
-/** @brief saturated absolute value
+/* @brief saturated absolute value
  *
  * @param[in] _a       input
  *
@@ -228,7 +228,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
 STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_abs(
     const tvector2w     _a);
 
-/** @brief saturated absolute difference
+/* @brief saturated absolute difference
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -245,7 +245,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subabssat(
 
 /* Multiplicative */
 
-/** @brief integer multiply
+/* @brief integer multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -262,7 +262,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mul(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief fractional saturating multiply
+/* @brief fractional saturating multiply
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -279,7 +279,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qmul(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief fractional saturating multiply with rounding
+/* @brief fractional saturating multiply with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -301,7 +301,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qrmul(
 
 /* Comparative */
 
-/** @brief equal
+/* @brief equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -315,7 +315,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_eq(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief not equal
+/* @brief not equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -329,7 +329,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ne(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief less or equal
+/* @brief less or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -343,7 +343,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_le(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief less then
+/* @brief less then
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -357,7 +357,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_lt(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief greater or equal
+/* @brief greater or equal
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -371,7 +371,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ge(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief greater than
+/* @brief greater than
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -387,7 +387,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_gt(
 
 /* Shift */
 
-/** @brief aritmetic shift right
+/* @brief aritmetic shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -404,7 +404,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asr(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief aritmetic shift right with rounding
+/* @brief aritmetic shift right with rounding
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -423,7 +423,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asrrnd(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -443,7 +443,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asl(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief saturating aritmetic shift left
+/* @brief saturating aritmetic shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -456,7 +456,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_aslsat(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief logical shift left
+/* @brief logical shift left
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -473,7 +473,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsl(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief logical shift right
+/* @brief logical shift right
  *
  * @param[in] _a       input
  * @param[in] _b       shift amount
@@ -492,7 +492,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsr(
 
 /* clipping */
 
-/** @brief Clip asymmetrical
+/* @brief Clip asymmetrical
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -507,7 +507,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clip_asym(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Clip zero
+/* @brief Clip zero
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -524,7 +524,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clipz(
 
 /* division */
 
-/** @brief Truncated division
+/* @brief Truncated division
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -541,7 +541,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_div(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Saturating truncated division
+/* @brief Saturating truncated division
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -559,7 +559,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w OP_2w_divh(
     const tvector2w     _a,
     const tvector1w     _b);
 
-/** @brief Modulo
+/* @brief Modulo
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -572,7 +572,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mod(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Unsigned Integer Square root
+/* @brief Unsigned Integer Square root
  *
  * @param[in] _a       input
  *
@@ -585,7 +585,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w_unsigned OP_2w_sqrt_u(
 
 /* Miscellaneous */
 
-/** @brief Multiplexer
+/* @brief Multiplexer
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -601,7 +601,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mux(
     const tvector2w     _b,
     const tflags           _c);
 
-/** @brief Average without rounding
+/* @brief Average without rounding
  *
  * @param[in] _a       first operand
  * @param[in] _b       second operand
@@ -617,7 +617,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w  OP_2w_avg(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Average with rounding
+/* @brief Average with rounding
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -633,7 +633,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avgrnd(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Minimum
+/* @brief Minimum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
@@ -647,7 +647,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_min(
     const tvector2w     _a,
     const tvector2w     _b);
 
-/** @brief Maximum
+/* @brief Maximum
  *
  * @param[in] _a       first argument
  * @param[in] _b       second argument
index 5624cfcfa015354fd1a7792032f6ba0e2c7fd819..6c53ca9df96c3ef4733b99a09d75872f3515eee5 100644 (file)
@@ -43,7 +43,7 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_sid_state(
                const stream2mmio_ID_t ID,
                const stream2mmio_sid_ID_t sid_id,
                stream2mmio_sid_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -96,6 +96,6 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_reg_store(
                const stream2mmio_ID_t ID,
                const hrt_address reg,
                const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 
 #endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */
index c0f3f3ea32d73c26aa7c3566629d5ab40b8a62c9..f597e07d7c4f9a61253db4954353bbcc67dd4c58 100644 (file)
@@ -41,7 +41,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_get_state(
 STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state(
                const pixelgen_ID_t ID,
                pixelgen_ctrl_state_t *state);
-/** end of NCI */
+/* end of NCI */
 
 /*****************************************************
  *
@@ -73,7 +73,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
        const pixelgen_ID_t ID,
        const hrt_address reg,
        const hrt_data value);
-/** end of DLI */
+/* end of DLI */
 
 #endif /* USE_INPUT_SYSTEM_VERSION_2401 */
 #endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
index a202d6dce1060bb10e896eae1cafdf154e4de795..c1638c06407d240d7af6286ee449ca7fca2df1fd 100644 (file)
@@ -27,7 +27,7 @@
 
 #include "ref_vector_func_types.h"
 
-/** @brief Doubling multiply accumulate with saturation
+/* @brief Doubling multiply accumulate with saturation
  *
  * @param[in] acc accumulator
  * @param[in] a multiply input
@@ -44,7 +44,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd_sat(
        tvector1w a,
        tvector1w b );
 
-/** @brief Doubling multiply accumulate
+/* @brief Doubling multiply accumulate
  *
  * @param[in] acc accumulator
  * @param[in] a multiply input
@@ -61,7 +61,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd(
        tvector1w a,
        tvector1w b );
 
-/** @brief Re-aligning multiply
+/* @brief Re-aligning multiply
  *
  * @param[in] a multiply input
  * @param[in] b multiply input
@@ -78,7 +78,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
        tvector1w b,
        tscalar1w shift );
 
-/** @brief Leading bit index
+/* @brief Leading bit index
  *
  * @param[in] a        input
  *
@@ -92,7 +92,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_lod(
                tvector1w a);
 
-/** @brief Config Unit Input Processing
+/* @brief Config Unit Input Processing
  *
  * @param[in] a            input
  * @param[in] input_scale   input scaling factor
@@ -111,7 +111,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_input_scaling_offset_clamping(
        tscalar1w_5bit_signed input_scale,
        tscalar1w_5bit_signed input_offset);
 
-/** @brief Config Unit Output Processing
+/* @brief Config Unit Output Processing
  *
  * @param[in] a             output
  * @param[in] output_scale   output scaling factor
@@ -127,7 +127,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_output_scaling_clamping(
        tvector1w a,
        tscalar1w_5bit_signed output_scale);
 
-/** @brief Config Unit Piecewiselinear estimation
+/* @brief Config Unit Piecewiselinear estimation
  *
  * @param[in] a                  input
  * @param[in] config_points   config parameter structure
@@ -143,7 +143,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_piecewise_estimation(
        tvector1w a,
        ref_config_points config_points);
 
-/** @brief Fast Config Unit
+/* @brief Fast Config Unit
  *
  * @param[in] x                input
  * @param[in] init_vectors     LUT data structure
@@ -161,7 +161,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H  tvector1w OP_1w_XCU(
        xcu_ref_init_vectors init_vectors);
 
 
-/** @brief LXCU
+/* @brief LXCU
  *
  * @param[in] x                input
  * @param[in] init_vectors     LUT data structure
@@ -180,7 +180,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_LXCU(
        tvector1w x,
        xcu_ref_init_vectors init_vectors);
 
-/** @brief Coring
+/* @brief Coring
  *
  * @param[in] coring_vec   Amount of coring based on brightness level
  * @param[in] filt_input   Vector of input pixels on which Coring is applied
@@ -196,7 +196,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
        tvector1w filt_input,
        tscalar1w m_CnrCoring0 );
 
-/** @brief Normalised FIR with coefficients [3,4,1]
+/* @brief Normalised FIR with coefficients [3,4,1]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -209,7 +209,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [1,4,3]
+/* @brief Normalised FIR with coefficients [1,4,3]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -222,7 +222,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [1,2,1]
+/* @brief Normalised FIR with coefficients [1,2,1]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -234,7 +234,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [13,16,3]
+/* @brief Normalised FIR with coefficients [13,16,3]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -246,7 +246,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [9,16,7]
+/* @brief Normalised FIR with coefficients [9,16,7]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -258,7 +258,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [5,16,11]
+/* @brief Normalised FIR with coefficients [5,16,11]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -270,7 +270,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with coefficients [1,16,15]
+/* @brief Normalised FIR with coefficients [1,16,15]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -282,7 +282,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
        const s_1w_1x3_matrix           m);
 
-/** @brief Normalised FIR with programable phase shift
+/* @brief Normalised FIR with programable phase shift
  *
  * @param[in] m        1x3 matrix with pixels
  * @param[in] coeff    phase shift
@@ -295,7 +295,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_calc_coeff (
        const s_1w_1x3_matrix           m, tscalar1w_3bit coeff);
 
-/** @brief 3 tap FIR with coefficients [1,1,1]
+/* @brief 3 tap FIR with coefficients [1,1,1]
  *
  * @param[in] m        1x3 matrix with pixels
  *
@@ -308,7 +308,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_9dB_nrm (
        const s_1w_1x3_matrix           m);
 
 #ifdef ISP2401
-/** @brief      symmetric 3 tap FIR acts as LPF or BSF
+/* @brief      symmetric 3 tap FIR acts as LPF or BSF
  *
  * @param[in] m 1x3 matrix with pixels
  * @param[in] k filter coefficient shift
@@ -336,7 +336,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
                    tscalar_bool bsf_flag);
 #endif
 
-/** @brief Normalised 2D FIR with coefficients  [1;2;1] * [1,2,1]
+/* @brief Normalised 2D FIR with coefficients  [1;2;1] * [1,2,1]
  *
  * @param[in] m        3x3 matrix with pixels
  *
@@ -353,7 +353,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
        const s_1w_3x3_matrix           m);
 
-/** @brief Normalised 2D FIR with coefficients  [1;1;1] * [1,1,1]
+/* @brief Normalised 2D FIR with coefficients  [1;1;1] * [1,1,1]
  *
  * @param[in] m        3x3 matrix with pixels
  *
@@ -371,7 +371,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
        const s_1w_3x3_matrix           m);
 
-/** @brief Normalised dual output 2D FIR with coefficients  [1;2;1] * [1,2,1]
+/* @brief Normalised dual output 2D FIR with coefficients  [1;2;1] * [1,2,1]
  *
  * @param[in] m        4x3 matrix with pixels
  *
@@ -391,7 +391,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
  STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_6dB_out2x1_nrm (
        const s_1w_4x3_matrix           m);
 
-/** @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
+/* @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
  *
  * @param[in] m        4x3 matrix with pixels
  *
@@ -411,7 +411,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
        const s_1w_4x3_matrix           m);
 
-/** @brief Normalised 2D FIR 5x5
+/* @brief Normalised 2D FIR 5x5
  *
  * @param[in] m        5x5 matrix with pixels
  *
@@ -429,7 +429,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
        const s_1w_5x5_matrix   m);
 
-/** @brief Normalised FIR 1x5
+/* @brief Normalised FIR 1x5
  *
  * @param[in] m        1x5 matrix with pixels
  *
@@ -447,7 +447,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
        const s_1w_1x5_matrix m);
 
-/** @brief Normalised 2D FIR 5x5
+/* @brief Normalised 2D FIR 5x5
  *
  * @param[in] m        5x5 matrix with pixels
  *
@@ -465,7 +465,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
        const s_1w_5x5_matrix m);
 
-/** @brief Approximate averaging FIR 1x5
+/* @brief Approximate averaging FIR 1x5
  *
  * @param[in] m        1x5 matrix with pixels
  *
@@ -479,7 +479,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
        s_1w_1x5_matrix m);
 
-/** @brief Approximate averaging FIR 1x9
+/* @brief Approximate averaging FIR 1x9
  *
  * @param[in] m        1x9 matrix with pixels
  *
@@ -493,7 +493,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
        s_1w_1x9_matrix m);
 
-/** @brief Approximate averaging FIR 1x11
+/* @brief Approximate averaging FIR 1x11
  *
  * @param[in] m        1x11 matrix with pixels
  *
@@ -507,7 +507,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x11m_box (
        s_1w_1x11_matrix m);
 
-/** @brief Symmetric 7 tap filter with normalization
+/* @brief Symmetric 7 tap filter with normalization
  *
  *  @param[in] in 1x7 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -528,7 +528,7 @@ fir1x7m_sym_nrm(s_1w_1x7_matrix in,
                s_1w_1x4_matrix coeff,
                tvector1w out_shift);
 
-/** @brief Symmetric 7 tap filter with normalization at input side
+/* @brief Symmetric 7 tap filter with normalization at input side
  *
  *  @param[in] in 1x7 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -549,7 +549,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x7m_sym_innrm_approx(s_1w_1x7_matrix in,
                         s_1w_1x4_matrix coeff);
 
-/** @brief Symmetric 7 tap filter with normalization at output side
+/* @brief Symmetric 7 tap filter with normalization at output side
  *
  *  @param[in] in 1x7 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -571,7 +571,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x7m_sym_outnrm_approx(s_1w_1x7_matrix in,
                         s_1w_1x4_matrix coeff);
 
-/** @brief 4 tap filter with normalization
+/* @brief 4 tap filter with normalization
  *
  *  @param[in] in 1x4 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -588,7 +588,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
                s_1w_1x4_matrix coeff,
                tvector1w out_shift);
 
-/** @brief 4 tap filter with normalization for half pixel interpolation
+/* @brief 4 tap filter with normalization for half pixel interpolation
  *
  *  @param[in] in 1x4 matrix with pixels
  *
@@ -604,7 +604,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x4m_bicubic_bezier_half(s_1w_1x4_matrix in);
 
-/** @brief 4 tap filter with normalization for quarter pixel interpolation
+/* @brief 4 tap filter with normalization for quarter pixel interpolation
  *
  *  @param[in] in 1x4 matrix with pixels
  *  @param[in] coeff 1x4 matrix with coefficients
@@ -626,7 +626,7 @@ fir1x4m_bicubic_bezier_quarter(s_1w_1x4_matrix in,
                        s_1w_1x4_matrix coeff);
 
 
-/** @brief Symmetric 3 tap filter with normalization
+/* @brief Symmetric 3 tap filter with normalization
  *
  *  @param[in] in 1x3 matrix with pixels
  *  @param[in] coeff 1x2 matrix with coefficients
@@ -646,7 +646,7 @@ fir1x3m_sym_nrm(s_1w_1x3_matrix in,
                s_1w_1x2_matrix coeff,
                tvector1w out_shift);
 
-/** @brief Symmetric 3 tap filter with normalization
+/* @brief Symmetric 3 tap filter with normalization
  *
  *  @param[in] in 1x3 matrix with pixels
  *  @param[in] coeff 1x2 matrix with coefficients
@@ -666,7 +666,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
                       s_1w_1x2_matrix coeff);
 
-/** @brief Mean of 1x3 matrix
+/* @brief Mean of 1x3 matrix
  *
  *  @param[in] m 1x3 matrix with pixels
  *
@@ -678,7 +678,7 @@ fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
        s_1w_1x3_matrix m);
 
-/** @brief Mean of 3x3 matrix
+/* @brief Mean of 3x3 matrix
  *
  *  @param[in] m 3x3 matrix with pixels
  *
@@ -690,7 +690,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
        s_1w_3x3_matrix m);
 
-/** @brief Mean of 1x4 matrix
+/* @brief Mean of 1x4 matrix
  *
  *  @param[in] m 1x4 matrix with pixels
  *
@@ -701,7 +701,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
        s_1w_1x4_matrix m);
 
-/** @brief Mean of 4x4 matrix
+/* @brief Mean of 4x4 matrix
  *
  *  @param[in] m 4x4 matrix with pixels
  *
@@ -712,7 +712,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
        s_1w_4x4_matrix m);
 
-/** @brief Mean of 2x3 matrix
+/* @brief Mean of 2x3 matrix
  *
  *  @param[in] m 2x3 matrix with pixels
  *
@@ -724,7 +724,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
        s_1w_2x3_matrix m);
 
-/** @brief Mean of 1x5 matrix
+/* @brief Mean of 1x5 matrix
  *
  *  @param[in] m 1x5 matrix with pixels
  *
@@ -735,7 +735,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
 */
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
 
-/** @brief Mean of 1x6 matrix
+/* @brief Mean of 1x6 matrix
  *
  *  @param[in] m 1x6 matrix with pixels
  *
@@ -747,7 +747,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
        s_1w_1x6_matrix m);
 
-/** @brief Mean of 5x5 matrix
+/* @brief Mean of 5x5 matrix
  *
  *  @param[in] m 5x5 matrix with pixels
  *
@@ -759,7 +759,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
        s_1w_5x5_matrix m);
 
-/** @brief Mean of 6x6 matrix
+/* @brief Mean of 6x6 matrix
  *
  *  @param[in] m 6x6 matrix with pixels
  *
@@ -771,7 +771,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
        s_1w_6x6_matrix m);
 
-/** @brief Minimum of 4x4 matrix
+/* @brief Minimum of 4x4 matrix
  *
  *  @param[in] m 4x4 matrix with pixels
  *
@@ -783,7 +783,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
        s_1w_4x4_matrix m);
 
-/** @brief Maximum of 4x4 matrix
+/* @brief Maximum of 4x4 matrix
  *
  *  @param[in] m 4x4 matrix with pixels
  *
@@ -795,7 +795,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w max4x4m(
        s_1w_4x4_matrix m);
 
-/** @brief SAD between two 3x3 matrices
+/* @brief SAD between two 3x3 matrices
  *
  *  @param[in] a 3x3 matrix with pixels
  *
@@ -813,7 +813,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m_precise(
        s_1w_3x3_matrix a,
        s_1w_3x3_matrix b);
 
-/** @brief SAD between two 3x3 matrices
+/* @brief SAD between two 3x3 matrices
  *
  *  @param[in] a 3x3 matrix with pixels
  *
@@ -833,7 +833,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m(
        s_1w_3x3_matrix a,
        s_1w_3x3_matrix b);
 
-/** @brief SAD between two 5x5 matrices
+/* @brief SAD between two 5x5 matrices
  *
  *  @param[in] a 5x5 matrix with pixels
  *
@@ -847,7 +847,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
        s_1w_5x5_matrix a,
        s_1w_5x5_matrix b);
 
-/** @brief Absolute gradient between two sets of 1x5 matrices
+/* @brief Absolute gradient between two sets of 1x5 matrices
  *
  *  @param[in] m0 first set of 1x5 matrix with pixels
  *  @param[in] m1 second set of 1x5 matrix with pixels
@@ -860,7 +860,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
 STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
 absgrad1x5m(s_1w_1x5_matrix m0, s_1w_1x5_matrix m1);
 
-/** @brief Bi-linear Interpolation optimized(approximate)
+/* @brief Bi-linear Interpolation optimized(approximate)
  *
  * @param[in] a input0
  * @param[in] b input1
@@ -882,7 +882,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx_c(
        tvector1w b,
        tscalar1w_weight c);
 
-/** @brief Bi-linear Interpolation optimized(approximate)
+/* @brief Bi-linear Interpolation optimized(approximate)
  *
  * @param[in] a input0
  * @param[in] b input1
@@ -904,7 +904,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx(
        tvector1w b,
        tvector1w_weight c);
 
-/** @brief Bi-linear Interpolation
+/* @brief Bi-linear Interpolation
  *
  * @param[in] a input0
  * @param[in] b input1
@@ -925,7 +925,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol(
        tvector1w b,
        tscalar1w_weight c);
 
-/** @brief Generic Block Matching Algorithm
+/* @brief Generic Block Matching Algorithm
  * @param[in] search_window pointer to input search window of 16x16 pixels
  * @param[in] ref_block pointer to input reference block of 8x8 pixels, where N<=M
  * @param[in] output pointer to output sads
@@ -954,9 +954,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H int generic_block_matching_algorithm(
        tscalar1w_4bit_bma_shift shift);
 
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_16_1_32way
+/* @brief OP_1w_asp_bma_16_1_32way
 #else
-/** @brief OP_1w_asp_bma_16_1_32way_nomask
+/* @brief OP_1w_asp_bma_16_1_32way_nomask
 #endif
  *
  * @param[in] search_area input search window of 16x16 pixels
@@ -984,9 +984,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_1 OP_1w_asp_bma_16_1_32way_nomask(
        tscalar1w_4bit_bma_shift shift);
 
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_16_2_32way
+/* @brief OP_1w_asp_bma_16_2_32way
 #else
-/** @brief OP_1w_asp_bma_16_2_32way_nomask
+/* @brief OP_1w_asp_bma_16_2_32way_nomask
 #endif
  *
  * @param[in] search_area input search window of 16x16 pixels
@@ -1011,9 +1011,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_2 OP_1w_asp_bma_16_2_32way_nomask(
        ref_block_8x8 input_block,
        tscalar1w_4bit_bma_shift shift);
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_14_1_32way
+/* @brief OP_1w_asp_bma_14_1_32way
 #else
-/** @brief OP_1w_asp_bma_14_1_32way_nomask
+/* @brief OP_1w_asp_bma_14_1_32way_nomask
 #endif
  *
  * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1041,9 +1041,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_1 OP_1w_asp_bma_14_1_32way_nomask(
        tscalar1w_4bit_bma_shift shift);
 
 #ifndef ISP2401
-/** @brief OP_1w_asp_bma_14_2_32way
+/* @brief OP_1w_asp_bma_14_2_32way
 #else
-/** @brief OP_1w_asp_bma_14_2_32way_nomask
+/* @brief OP_1w_asp_bma_14_2_32way_nomask
 #endif
  *
  * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1069,7 +1069,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_2 OP_1w_asp_bma_14_2_32way_nomask(
        tscalar1w_4bit_bma_shift shift);
 
 #ifdef ISP2401
-/** @brief multiplex addition and passing
+/* @brief multiplex addition and passing
  *
  *  @param[in] _a first pixel
  *  @param[in] _b second pixel
@@ -1087,7 +1087,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_cond_add(
 
 #endif
 #ifdef HAS_bfa_unit
-/** @brief OP_1w_single_bfa_7x7
+/* @brief OP_1w_single_bfa_7x7
  *
  * @param[in] weights - spatial and range weight lut
  * @param[in] threshold - threshold plane, for range weight scaling
@@ -1115,7 +1115,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_single_bfa_7x7(
        tvector1w central_pix,
        s_1w_7x7_matrix src_plane);
 
-/** @brief OP_1w_joint_bfa_7x7
+/* @brief OP_1w_joint_bfa_7x7
  *
  * @param[in] weights - spatial and range weight lut
  * @param[in] threshold0 - 1st threshold plane, for range weight scaling
@@ -1149,7 +1149,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_joint_bfa_7x7(
        tvector1w central_pix1,
        s_1w_7x7_matrix src1_plane);
 
-/** @brief bbb_bfa_gen_spatial_weight_lut
+/* @brief bbb_bfa_gen_spatial_weight_lut
  *
  * @param[in] in - 7x7 matrix of spatial weights
  * @param[in] out - generated LUT
@@ -1163,7 +1163,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_spatial_weight_lut(
        s_1w_7x7_matrix in,
        tvector1w out[BFA_MAX_KWAY]);
 
-/** @brief bbb_bfa_gen_range_weight_lut
+/* @brief bbb_bfa_gen_range_weight_lut
  *
  * @param[in] in - input range weight,
  * @param[in] out - generated LUT
@@ -1184,7 +1184,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
 #endif
 
 #ifdef ISP2401
-/** @brief OP_1w_imax32
+/* @brief OP_1w_imax32
  *
  * @param[in] src - structure that holds an array of 32 elements.
  *
@@ -1195,7 +1195,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
 STORAGE_CLASS_REF_VECTOR_FUNC_H int OP_1w_imax32(
        imax32_ref_in_vector src);
 
-/** @brief OP_1w_imaxidx32
+/* @brief OP_1w_imaxidx32
  *
  * @param[in] src - structure that holds a vector of elements.
  *
index e85e5c889c1551ff402ff8458bbc00f237557633..6436dae0007e1c89b4b1a5a78e9032d900a60e1e 100644 (file)
@@ -168,7 +168,7 @@ static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b)
 }
 #endif
 
-/** @brief Next Power of Two
+/* @brief Next Power of Two
  *
  *  @param[in] unsigned number
  *
index d80437c58bdecfa78368ae7e09d668c63e3b6ac6..f4d9674cdab64f47b8749260b97e27a6e495037c 100644 (file)
@@ -23,7 +23,7 @@
  */
 
 
-/** @brief Copy from src_buf to dest_buf.
+/* @brief Copy from src_buf to dest_buf.
  *
  * @param[out] dest_buf. Destination buffer to copy to
  * @param[in]  dest_size. The size of the destination buffer in bytes
@@ -53,7 +53,7 @@ static inline int memcpy_s(
        return 0;
 }
 
-/** @brief Get the length of the string, excluding the null terminator
+/* @brief Get the length of the string, excluding the null terminator
  *
  * @param[in]  src_str. The source string
  * @param[in]  max_len. Look only for max_len bytes in the string
@@ -78,7 +78,7 @@ static size_t strnlen_s(
        return ix;
 }
 
-/** @brief Copy string from src_str to dest_str
+/* @brief Copy string from src_str to dest_str
  *
  * @param[out] dest_str. Destination buffer to copy to
  * @param[in]  dest_size. The size of the destination buffer in bytes
@@ -120,7 +120,7 @@ static inline int strncpy_s(
        return 0;
 }
 
-/** @brief Copy string from src_str to dest_str
+/* @brief Copy string from src_str to dest_str
  *
  * @param[out] dest_str. Destination buffer to copy to
  * @param[in]  dest_size. The size of the destination buffer in bytes
index 9aa8c168a8036df07f16182125afa298e6ed43f6..2cf1d58941bfc5f806c1f0920c9587a5f4f1ec59 100644 (file)
@@ -17,7 +17,7 @@
 #include <assert_support.h>
 #include "tag_local.h"
 
-/**
+/*
  * @brief      Creates the tag description from the given parameters.
  * @param[in]  num_captures
  * @param[in]  skip
@@ -39,7 +39,7 @@ sh_css_create_tag_descr(int num_captures,
        tag_descr->exp_id       = exp_id;
 }
 
-/**
+/*
  * @brief      Encodes the members of tag description into a 32-bit value.
  * @param[in]  tag             Pointer to the tag description
  * @return     (unsigned int)  Encoded 32-bit tag-info
index 2458b3767c90c77a3d065be1a03ee651b082f813..e44df6916d900b377cabd16470eac28ccbfbb13c 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef _IA_CSS_H_
 #define _IA_CSS_H_
 
-/** @file
+/* @file
  * This file is the starting point of the CSS-API. It includes all CSS-API
  * header files.
  */
index a80a7dbaf712e333fc0543eeb468bf416723850e..080198796ad0d4848456e74fd8dc736915cea957 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_3A_H
 #define __IA_CSS_3A_H
 
-/** @file
+/* @file
  * This file contains types used for 3A statistics
  */
 
@@ -31,7 +31,7 @@ enum ia_css_3a_tables {
        IA_CSS_NUM_3A_TABLES
 };
 
-/** Structure that holds 3A statistics in the ISP internal
+/* Structure that holds 3A statistics in the ISP internal
  * format. Use ia_css_get_3a_statistics() to translate
  * this to the format used on the host (3A library).
  * */
@@ -48,13 +48,13 @@ struct ia_css_isp_3a_statistics {
        struct {
                ia_css_ptr rgby_tbl;
        } data_hmem;
-       uint32_t exp_id;     /**< exposure id, to match statistics to a frame,
+       uint32_t exp_id;     /** exposure id, to match statistics to a frame,
                                  see ia_css_event_public.h for more detail. */
-       uint32_t isp_config_id;/**< Unique ID to track which config was actually applied to a particular frame */
-       ia_css_ptr data_ptr; /**< pointer to base of all data */
-       uint32_t   size;     /**< total size of all data */
+       uint32_t isp_config_id;/** Unique ID to track which config was actually applied to a particular frame */
+       ia_css_ptr data_ptr; /** pointer to base of all data */
+       uint32_t   size;     /** total size of all data */
        uint32_t   dmem_size;
-       uint32_t   vmem_size; /**< both lo and hi have this size */
+       uint32_t   vmem_size; /** both lo and hi have this size */
        uint32_t   hmem_size;
 };
 #define SIZE_OF_DMEM_STRUCT                                            \
@@ -77,7 +77,7 @@ struct ia_css_isp_3a_statistics {
         SIZE_OF_IA_CSS_PTR +                                           \
         4 * sizeof(uint32_t))
 
-/** Map with host-side pointers to ISP-format statistics.
+/* Map with host-side pointers to ISP-format statistics.
  * These pointers can either be copies of ISP data or memory mapped
  * ISP pointers.
  * All of the data behind these pointers is allocated contiguously, the
@@ -85,17 +85,17 @@ struct ia_css_isp_3a_statistics {
  * point into this one block of data.
  */
 struct ia_css_isp_3a_statistics_map {
-       void                    *data_ptr; /**< Pointer to start of memory */
+       void                    *data_ptr; /** Pointer to start of memory */
        struct ia_css_3a_output *dmem_stats;
        uint16_t                *vmem_stats_hi;
        uint16_t                *vmem_stats_lo;
        struct ia_css_bh_table  *hmem_stats;
-       uint32_t                 size; /**< total size in bytes of data_ptr */
-       uint32_t                 data_allocated; /**< indicate whether data_ptr
+       uint32_t                 size; /** total size in bytes of data_ptr */
+       uint32_t                 data_allocated; /** indicate whether data_ptr
                                                    was allocated or not. */
 };
 
-/** @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
+/* @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
  * @param[out] host_stats Host buffer.
  * @param[in]  isp_stats ISP buffer.
  * @return     error value if temporary memory cannot be allocated
@@ -109,7 +109,7 @@ enum ia_css_err
 ia_css_get_3a_statistics(struct ia_css_3a_statistics           *host_stats,
                         const struct ia_css_isp_3a_statistics *isp_stats);
 
-/** @brief Translate 3A statistics from ISP format to host format.
+/* @brief Translate 3A statistics from ISP format to host format.
  * @param[out] host_stats host-format statistics
  * @param[in]  isp_stats  ISP-format statistics
  * @return     None
@@ -125,35 +125,35 @@ ia_css_translate_3a_statistics(
 
 /* Convenience functions for alloc/free of certain datatypes */
 
-/** @brief Allocate memory for the 3a statistics on the ISP
+/* @brief Allocate memory for the 3a statistics on the ISP
  * @param[in]  grid The grid.
  * @return             Pointer to the allocated 3a statistics buffer on the ISP
 */
 struct ia_css_isp_3a_statistics *
 ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
 
-/** @brief Free the 3a statistics memory on the isp
+/* @brief Free the 3a statistics memory on the isp
  * @param[in]  me Pointer to the 3a statistics buffer on the ISP.
  * @return             None
 */
 void
 ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me);
 
-/** @brief Allocate memory for the 3a statistics on the host
+/* @brief Allocate memory for the 3a statistics on the host
  * @param[in]  grid The grid.
  * @return             Pointer to the allocated 3a statistics buffer on the host
 */
 struct ia_css_3a_statistics *
 ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
 
-/** @brief Free the 3a statistics memory on the host
+/* @brief Free the 3a statistics memory on the host
  * @param[in]  me Pointer to the 3a statistics buffer on the host.
  * @return             None
  */
 void
 ia_css_3a_statistics_free(struct ia_css_3a_statistics *me);
 
-/** @brief Allocate a 3a statistics map structure
+/* @brief Allocate a 3a statistics map structure
  * @param[in]  isp_stats pointer to ISP 3a statistis struct
  * @param[in]  data_ptr  host-side pointer to ISP 3a statistics.
  * @return             Pointer to the allocated 3a statistics map
@@ -174,7 +174,7 @@ ia_css_isp_3a_statistics_map_allocate(
        const struct ia_css_isp_3a_statistics *isp_stats,
        void *data_ptr);
 
-/** @brief Free the 3a statistics map
+/* @brief Free the 3a statistics map
  * @param[in]  me Pointer to the 3a statistics map
  * @return             None
  *
index a2a1873aca83e09cddb7257b681cd198738d63db..138bc3bb4627e5056f1fc1e828142f3ab1f430f3 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef _IA_CSS_ACC_TYPES_H
 #define _IA_CSS_ACC_TYPES_H
 
-/** @file
+/* @file
  * This file contains types used for acceleration
  */
 
  * in the kernel and HAL.
 */
 
-/** Type of acceleration.
+/* Type of acceleration.
  */
 enum ia_css_acc_type {
-       IA_CSS_ACC_NONE,        /**< Normal binary */
-       IA_CSS_ACC_OUTPUT,      /**< Accelerator stage on output frame */
-       IA_CSS_ACC_VIEWFINDER,  /**< Accelerator stage on viewfinder frame */
-       IA_CSS_ACC_STANDALONE,  /**< Stand-alone acceleration */
+       IA_CSS_ACC_NONE,        /** Normal binary */
+       IA_CSS_ACC_OUTPUT,      /** Accelerator stage on output frame */
+       IA_CSS_ACC_VIEWFINDER,  /** Accelerator stage on viewfinder frame */
+       IA_CSS_ACC_STANDALONE,  /** Stand-alone acceleration */
 };
 
-/** Cells types
+/* Cells types
  */
 enum ia_css_cell_type {
        IA_CSS_SP0 = 0,
@@ -58,45 +58,45 @@ enum ia_css_cell_type {
        MAX_NUM_OF_CELLS
 };
 
-/** Firmware types.
+/* Firmware types.
  */
 enum ia_css_fw_type {
-       ia_css_sp_firmware,             /**< Firmware for the SP */
-       ia_css_isp_firmware,    /**< Firmware for the ISP */
-       ia_css_bootloader_firmware, /**< Firmware for the BootLoader */
-       ia_css_acc_firmware             /**< Firmware for accelrations */
+       ia_css_sp_firmware,             /** Firmware for the SP */
+       ia_css_isp_firmware,    /** Firmware for the ISP */
+       ia_css_bootloader_firmware, /** Firmware for the BootLoader */
+       ia_css_acc_firmware             /** Firmware for accelrations */
 };
 
 struct ia_css_blob_descr;
 
-/** Blob descriptor.
+/* Blob descriptor.
  * This structure describes an SP or ISP blob.
  * It describes the test, data and bss sections as well as position in a
  * firmware file.
  * For convenience, it contains dynamic data after loading.
  */
 struct ia_css_blob_info {
-       /**< Static blob data */
-       uint32_t offset;                /**< Blob offset in fw file */
-       struct ia_css_isp_param_memory_offsets memory_offsets;  /**< offset wrt hdr in bytes */
-       uint32_t prog_name_offset;  /**< offset wrt hdr in bytes */
-       uint32_t size;                  /**< Size of blob */
-       uint32_t padding_size;  /**< total cummulative of bytes added due to section alignment */
-       uint32_t icache_source; /**< Position of icache in blob */
-       uint32_t icache_size;   /**< Size of icache section */
-       uint32_t icache_padding;/**< bytes added due to icache section alignment */
-       uint32_t text_source;   /**< Position of text in blob */
-       uint32_t text_size;             /**< Size of text section */
-       uint32_t text_padding;  /**< bytes added due to text section alignment */
-       uint32_t data_source;   /**< Position of data in blob */
-       uint32_t data_target;   /**< Start of data in SP dmem */
-       uint32_t data_size;             /**< Size of text section */
-       uint32_t data_padding;  /**< bytes added due to data section alignment */
-       uint32_t bss_target;    /**< Start position of bss in SP dmem */
-       uint32_t bss_size;              /**< Size of bss section */
-       /**< Dynamic data filled by loader */
-       CSS_ALIGN(const void  *code, 8);                /**< Code section absolute pointer within fw, code = icache + text */
-       CSS_ALIGN(const void  *data, 8);                /**< Data section absolute pointer within fw, data = data + bss */
+       /** Static blob data */
+       uint32_t offset;                /** Blob offset in fw file */
+       struct ia_css_isp_param_memory_offsets memory_offsets;  /** offset wrt hdr in bytes */
+       uint32_t prog_name_offset;  /** offset wrt hdr in bytes */
+       uint32_t size;                  /** Size of blob */
+       uint32_t padding_size;  /** total cummulative of bytes added due to section alignment */
+       uint32_t icache_source; /** Position of icache in blob */
+       uint32_t icache_size;   /** Size of icache section */
+       uint32_t icache_padding;/** bytes added due to icache section alignment */
+       uint32_t text_source;   /** Position of text in blob */
+       uint32_t text_size;             /** Size of text section */
+       uint32_t text_padding;  /** bytes added due to text section alignment */
+       uint32_t data_source;   /** Position of data in blob */
+       uint32_t data_target;   /** Start of data in SP dmem */
+       uint32_t data_size;             /** Size of text section */
+       uint32_t data_padding;  /** bytes added due to data section alignment */
+       uint32_t bss_target;    /** Start position of bss in SP dmem */
+       uint32_t bss_size;              /** Size of bss section */
+       /** Dynamic data filled by loader */
+       CSS_ALIGN(const void  *code, 8);                /** Code section absolute pointer within fw, code = icache + text */
+       CSS_ALIGN(const void  *data, 8);                /** Data section absolute pointer within fw, data = data + bss */
 };
 
 struct ia_css_binary_input_info {
@@ -140,9 +140,9 @@ struct ia_css_binary_s3a_info {
        uint32_t                fixed_s3a_deci_log;
 };
 
-/** DPC related binary info */
+/* DPC related binary info */
 struct ia_css_binary_dpc_info {
-       uint32_t                bnr_lite; /**< bnr lite enable flag */
+       uint32_t                bnr_lite; /** bnr lite enable flag */
 };
 
 struct ia_css_binary_iterator_info {
@@ -193,7 +193,7 @@ struct ia_css_binary_block_info {
        uint32_t        output_block_height;
 };
 
-/** Structure describing an ISP binary.
+/* Structure describing an ISP binary.
  * It describes the capabilities of a binary, like the maximum resolution,
  * support features, dma channels, uds features, etc.
  * This part is to be used by the SP.
@@ -210,7 +210,7 @@ struct ia_css_binary_info {
        struct ia_css_binary_dvs_info           dvs;
        struct ia_css_binary_vf_dec_info        vf_dec;
        struct ia_css_binary_s3a_info           s3a;
-       struct ia_css_binary_dpc_info           dpc_bnr; /**< DPC related binary info */
+       struct ia_css_binary_dpc_info           dpc_bnr; /** DPC related binary info */
        struct ia_css_binary_iterator_info      iterator;
        struct ia_css_binary_address_info       addresses;
        struct ia_css_binary_uds_info           uds;
@@ -269,7 +269,7 @@ struct ia_css_binary_info {
        } dma;
 };
 
-/** Structure describing an ISP binary.
+/* Structure describing an ISP binary.
  * It describes the capabilities of a binary, like the maximum resolution,
  * support features, dma channels, uds features, etc.
  */
@@ -281,8 +281,8 @@ struct ia_css_binary_xinfo {
        enum ia_css_acc_type         type;
        CSS_ALIGN(int32_t            num_output_formats, 8);
        enum ia_css_frame_format     output_formats[IA_CSS_FRAME_FORMAT_NUM];
-       CSS_ALIGN(int32_t            num_vf_formats, 8); /**< number of supported vf formats */
-       enum ia_css_frame_format     vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /**< types of supported vf formats */
+       CSS_ALIGN(int32_t            num_vf_formats, 8); /** number of supported vf formats */
+       enum ia_css_frame_format     vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /** types of supported vf formats */
        uint8_t                      num_output_pins;
        ia_css_ptr                   xmem_addr;
        CSS_ALIGN(const struct ia_css_blob_descr *blob, 8);
@@ -291,55 +291,55 @@ struct ia_css_binary_xinfo {
        CSS_ALIGN(struct ia_css_binary_xinfo *next, 8);
 };
 
-/** Structure describing the Bootloader (an ISP binary).
+/* Structure describing the Bootloader (an ISP binary).
  * It contains several address, either in ddr, isp_dmem or
  * the entry function in icache.
  */
 struct ia_css_bl_info {
-       uint32_t num_dma_cmds;  /**< Number of cmds sent by CSS */
-       uint32_t dma_cmd_list;  /**< Dma command list sent by CSS */
-       uint32_t sw_state;      /**< Polled from css */
+       uint32_t num_dma_cmds;  /** Number of cmds sent by CSS */
+       uint32_t dma_cmd_list;  /** Dma command list sent by CSS */
+       uint32_t sw_state;      /** Polled from css */
        /* Entry functions */
-       uint32_t bl_entry;      /**< The SP entry function */
+       uint32_t bl_entry;      /** The SP entry function */
 };
 
-/** Structure describing the SP binary.
+/* Structure describing the SP binary.
  * It contains several address, either in ddr, sp_dmem or
  * the entry function in pmem.
  */
 struct ia_css_sp_info {
-       uint32_t init_dmem_data; /**< data sect config, stored to dmem */
-       uint32_t per_frame_data; /**< Per frame data, stored to dmem */
-       uint32_t group;         /**< Per pipeline data, loaded by dma */
-       uint32_t output;                /**< SP output data, loaded by dmem */
-       uint32_t host_sp_queue; /**< Host <-> SP queues */
-       uint32_t host_sp_com;/**< Host <-> SP commands */
-       uint32_t isp_started;   /**< Polled from sensor thread, csim only */
-       uint32_t sw_state;      /**< Polled from css */
-       uint32_t host_sp_queues_initialized; /**< Polled from the SP */
-       uint32_t sleep_mode;  /**< different mode to halt SP */
-       uint32_t invalidate_tlb;                /**< inform SP to invalidate mmu TLB */
+       uint32_t init_dmem_data; /** data sect config, stored to dmem */
+       uint32_t per_frame_data; /** Per frame data, stored to dmem */
+       uint32_t group;         /** Per pipeline data, loaded by dma */
+       uint32_t output;                /** SP output data, loaded by dmem */
+       uint32_t host_sp_queue; /** Host <-> SP queues */
+       uint32_t host_sp_com;/** Host <-> SP commands */
+       uint32_t isp_started;   /** Polled from sensor thread, csim only */
+       uint32_t sw_state;      /** Polled from css */
+       uint32_t host_sp_queues_initialized; /** Polled from the SP */
+       uint32_t sleep_mode;  /** different mode to halt SP */
+       uint32_t invalidate_tlb;                /** inform SP to invalidate mmu TLB */
 #ifndef ISP2401
-       uint32_t stop_copy_preview;       /**< suspend copy and preview pipe when capture */
+       uint32_t stop_copy_preview;       /** suspend copy and preview pipe when capture */
 #endif
-       uint32_t debug_buffer_ddr_address;      /**< inform SP the address
+       uint32_t debug_buffer_ddr_address;      /** inform SP the address
        of DDR debug queue */
-       uint32_t perf_counter_input_system_error; /**< input system perf
+       uint32_t perf_counter_input_system_error; /** input system perf
        counter array */
 #ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-       uint32_t debug_wait; /**< thread/pipe post mortem debug */
-       uint32_t debug_stage; /**< thread/pipe post mortem debug */
-       uint32_t debug_stripe; /**< thread/pipe post mortem debug */
+       uint32_t debug_wait; /** thread/pipe post mortem debug */
+       uint32_t debug_stage; /** thread/pipe post mortem debug */
+       uint32_t debug_stripe; /** thread/pipe post mortem debug */
 #endif
-       uint32_t threads_stack; /**< sp thread's stack pointers */
-       uint32_t threads_stack_size; /**< sp thread's stack sizes */
-       uint32_t curr_binary_id;        /**< current binary id */
-       uint32_t raw_copy_line_count;   /**< raw copy line counter */
-       uint32_t ddr_parameter_address; /**< acc param ddrptr, sp dmem */
-       uint32_t ddr_parameter_size;    /**< acc param size, sp dmem */
+       uint32_t threads_stack; /** sp thread's stack pointers */
+       uint32_t threads_stack_size; /** sp thread's stack sizes */
+       uint32_t curr_binary_id;        /** current binary id */
+       uint32_t raw_copy_line_count;   /** raw copy line counter */
+       uint32_t ddr_parameter_address; /** acc param ddrptr, sp dmem */
+       uint32_t ddr_parameter_size;    /** acc param size, sp dmem */
        /* Entry functions */
-       uint32_t sp_entry;      /**< The SP entry function */
-       uint32_t tagger_frames_addr;   /**< Base address of tagger state */
+       uint32_t sp_entry;      /** The SP entry function */
+       uint32_t tagger_frames_addr;   /** Base address of tagger state */
 };
 
 /* The following #if is there because this header file is also included
@@ -348,37 +348,37 @@ struct ia_css_sp_info {
    More permanent solution will be to refactor this include.
 */
 #if !defined(__ISP)
-/** Accelerator firmware information.
+/* Accelerator firmware information.
  */
 struct ia_css_acc_info {
-       uint32_t per_frame_data; /**< Dummy for now */
+       uint32_t per_frame_data; /** Dummy for now */
 };
 
-/** Firmware information.
+/* Firmware information.
  */
 union ia_css_fw_union {
-       struct ia_css_binary_xinfo      isp; /**< ISP info */
-       struct ia_css_sp_info           sp;  /**< SP info */
-       struct ia_css_bl_info           bl;  /**< Bootloader info */
-       struct ia_css_acc_info          acc; /**< Accelerator info */
+       struct ia_css_binary_xinfo      isp; /** ISP info */
+       struct ia_css_sp_info           sp;  /** SP info */
+       struct ia_css_bl_info           bl;  /** Bootloader info */
+       struct ia_css_acc_info          acc; /** Accelerator info */
 };
 
-/** Firmware information.
+/* Firmware information.
  */
 struct ia_css_fw_info {
-       size_t                   header_size; /**< size of fw header */
+       size_t                   header_size; /** size of fw header */
        CSS_ALIGN(uint32_t type, 8);
-       union ia_css_fw_union    info; /**< Binary info */
-       struct ia_css_blob_info  blob; /**< Blob info */
+       union ia_css_fw_union    info; /** Binary info */
+       struct ia_css_blob_info  blob; /** Blob info */
        /* Dynamic part */
        struct ia_css_fw_info   *next;
-       CSS_ALIGN(uint32_t       loaded, 8);    /**< Firmware has been loaded */
-       CSS_ALIGN(const uint8_t *isp_code, 8);  /**< ISP pointer to code */
-       /**< Firmware handle between user space and kernel */
+       CSS_ALIGN(uint32_t       loaded, 8);    /** Firmware has been loaded */
+       CSS_ALIGN(const uint8_t *isp_code, 8);  /** ISP pointer to code */
+       /** Firmware handle between user space and kernel */
        CSS_ALIGN(uint32_t      handle, 8);
-       /**< Sections to copy from/to ISP */
+       /** Sections to copy from/to ISP */
        struct ia_css_isp_param_css_segments mem_initializers;
-       /**< Initializer for local ISP memories */
+       /** Initializer for local ISP memories */
 };
 
 struct ia_css_blob_descr {
@@ -390,39 +390,39 @@ struct ia_css_blob_descr {
 
 struct ia_css_acc_fw;
 
-/** Structure describing the SP binary of a stand-alone accelerator.
+/* Structure describing the SP binary of a stand-alone accelerator.
  */
 struct ia_css_acc_sp {
-       void (*init)(struct ia_css_acc_fw *);   /**< init for crun */
-       uint32_t sp_prog_name_offset;           /**< program name offset wrt hdr in bytes */
-       uint32_t sp_blob_offset;                /**< blob offset wrt hdr in bytes */
-       void     *entry;                        /**< Address of sp entry point */
-       uint32_t *css_abort;                    /**< SP dmem abort flag */
-       void     *isp_code;                     /**< SP dmem address holding xmem
+       void (*init)(struct ia_css_acc_fw *);   /** init for crun */
+       uint32_t sp_prog_name_offset;           /** program name offset wrt hdr in bytes */
+       uint32_t sp_blob_offset;                /** blob offset wrt hdr in bytes */
+       void     *entry;                        /** Address of sp entry point */
+       uint32_t *css_abort;                    /** SP dmem abort flag */
+       void     *isp_code;                     /** SP dmem address holding xmem
                                                     address of isp code */
-       struct ia_css_fw_info fw;               /**< SP fw descriptor */
-       const uint8_t *code;                    /**< ISP pointer of allocated SP code */
+       struct ia_css_fw_info fw;               /** SP fw descriptor */
+       const uint8_t *code;                    /** ISP pointer of allocated SP code */
 };
 
-/** Acceleration firmware descriptor.
+/* Acceleration firmware descriptor.
   * This descriptor descibes either SP code (stand-alone), or
   * ISP code (a separate pipeline stage).
   */
 struct ia_css_acc_fw_hdr {
-       enum ia_css_acc_type type;      /**< Type of accelerator */
-       uint32_t        isp_prog_name_offset; /**< program name offset wrt
+       enum ia_css_acc_type type;      /** Type of accelerator */
+       uint32_t        isp_prog_name_offset; /** program name offset wrt
                                                   header in bytes */
-       uint32_t        isp_blob_offset;      /**< blob offset wrt header
+       uint32_t        isp_blob_offset;      /** blob offset wrt header
                                                   in bytes */
-       uint32_t        isp_size;             /**< Size of isp blob */
-       const uint8_t  *isp_code;             /**< ISP pointer to code */
-       struct ia_css_acc_sp  sp;  /**< Standalone sp code */
-       /**< Firmware handle between user space and kernel */
+       uint32_t        isp_size;             /** Size of isp blob */
+       const uint8_t  *isp_code;             /** ISP pointer to code */
+       struct ia_css_acc_sp  sp;  /** Standalone sp code */
+       /** Firmware handle between user space and kernel */
        uint32_t        handle;
-       struct ia_css_data parameters; /**< Current SP parameters */
+       struct ia_css_data parameters; /** Current SP parameters */
 };
 
-/** Firmware structure.
+/* Firmware structure.
   * This contains the header and actual blobs.
   * For standalone, it contains SP and ISP blob.
   * For a pipeline stage accelerator, it contains ISP code only.
@@ -430,7 +430,7 @@ struct ia_css_acc_fw_hdr {
   * header and computed using the access macros below.
   */
 struct ia_css_acc_fw {
-       struct ia_css_acc_fw_hdr header; /**< firmware header */
+       struct ia_css_acc_fw_hdr header; /** firmware header */
        /*
        int8_t   isp_progname[];          **< ISP program name
        int8_t   sp_progname[];   **< SP program name, stand-alone only
index b2ecf3618c1524dce6b902876f997b28b7772321..a0058eac7d5a12e67ce2c0d7a908acbfebd4a539 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_BUFFER_H
 #define __IA_CSS_BUFFER_H
 
-/** @file
+/* @file
  * This file contains datastructures and types for buffers used in CSS
  */
 
@@ -23,7 +23,7 @@
 #include "ia_css_types.h"
 #include "ia_css_timer.h"
 
-/** Enumeration of buffer types. Buffers can be queued and de-queued
+/* Enumeration of buffer types. Buffers can be queued and de-queued
  *  to hand them over between IA and ISP.
  */
 enum ia_css_buffer_type {
@@ -48,28 +48,28 @@ enum ia_css_buffer_type {
 
 /* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */
 #if !defined(__ISP)
-/** Buffer structure. This is a container structure that enables content
+/* Buffer structure. This is a container structure that enables content
  *  independent buffer queues and access functions.
  */
 struct ia_css_buffer {
-       enum ia_css_buffer_type type; /**< Buffer type. */
+       enum ia_css_buffer_type type; /** Buffer type. */
        unsigned int exp_id;
-       /**< exposure id for this buffer; 0 = not available
+       /** exposure id for this buffer; 0 = not available
             see ia_css_event_public.h for more detail. */
        union {
-               struct ia_css_isp_3a_statistics  *stats_3a;    /**< 3A statistics & optionally RGBY statistics. */
-               struct ia_css_isp_dvs_statistics *stats_dvs;   /**< DVS statistics. */
-               struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs;  /**< SKC DVS statistics. */
-               struct ia_css_frame              *frame;       /**< Frame buffer. */
-               struct ia_css_acc_param          *custom_data; /**< Custom buffer. */
-               struct ia_css_metadata           *metadata;    /**< Sensor metadata. */
-       } data; /**< Buffer data pointer. */
-       uint64_t driver_cookie; /**< cookie for the driver */
-       struct ia_css_time_meas timing_data; /**< timing data (readings from the timer) */
-       struct ia_css_clock_tick isys_eof_clock_tick; /**< ISYS's end of frame timer tick*/
+               struct ia_css_isp_3a_statistics  *stats_3a;    /** 3A statistics & optionally RGBY statistics. */
+               struct ia_css_isp_dvs_statistics *stats_dvs;   /** DVS statistics. */
+               struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs;  /** SKC DVS statistics. */
+               struct ia_css_frame              *frame;       /** Frame buffer. */
+               struct ia_css_acc_param          *custom_data; /** Custom buffer. */
+               struct ia_css_metadata           *metadata;    /** Sensor metadata. */
+       } data; /** Buffer data pointer. */
+       uint64_t driver_cookie; /** cookie for the driver */
+       struct ia_css_time_meas timing_data; /** timing data (readings from the timer) */
+       struct ia_css_clock_tick isys_eof_clock_tick; /** ISYS's end of frame timer tick*/
 };
 
-/** @brief Dequeue param buffers from sp2host_queue
+/* @brief Dequeue param buffers from sp2host_queue
  *
  * @return                                       None
  *
index a15d3e3683419b30f494406b0e8a2f2f37a7ad1d..021a313fab857079fba9f714da58f4621a263859 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_CONTROL_H
 #define __IA_CSS_CONTROL_H
 
-/** @file
+/* @file
  * This file contains functionality for starting and controlling CSS
  */
 
@@ -24,7 +24,7 @@
 #include <ia_css_firmware.h>
 #include <ia_css_irq.h>
 
-/** @brief Initialize the CSS API.
+/* @brief Initialize the CSS API.
  * @param[in]  env             Environment, provides functions to access the
  *                             environment in which the CSS code runs. This is
  *                             used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err ia_css_init(
        uint32_t                 l1_base,
        enum ia_css_irq_type     irq_type);
 
-/** @brief Un-initialize the CSS API.
+/* @brief Un-initialize the CSS API.
  * @return     None
  *
  * This function deallocates all memory that has been allocated by the CSS API
@@ -66,7 +66,7 @@ enum ia_css_err ia_css_init(
 void
 ia_css_uninit(void);
 
-/** @brief Suspend CSS API for power down
+/* @brief Suspend CSS API for power down
  * @return     success or faulure code
  *
  * suspend shuts down the system by:
@@ -80,7 +80,7 @@ ia_css_uninit(void);
 enum ia_css_err
 ia_css_suspend(void);
 
-/** @brief Resume CSS API from power down
+/* @brief Resume CSS API from power down
  * @return     success or failure code
  *
  * After a power cycle, this function will bring the CSS API back into
@@ -91,7 +91,7 @@ ia_css_suspend(void);
 enum ia_css_err
 ia_css_resume(void);
 
-/** @brief Enable use of a separate queue for ISYS events.
+/* @brief Enable use of a separate queue for ISYS events.
  *
  * @param[in]  enable: enable or disable use of separate ISYS event queues.
  * @return             error if called when SP is running.
@@ -105,7 +105,7 @@ ia_css_resume(void);
 enum ia_css_err
 ia_css_enable_isys_event_queue(bool enable);
 
-/** @brief Test whether the ISP has started.
+/* @brief Test whether the ISP has started.
  *
  * @return     Boolean flag true if the ISP has started or false otherwise.
  *
@@ -114,7 +114,7 @@ ia_css_enable_isys_event_queue(bool enable);
 bool
 ia_css_isp_has_started(void);
 
-/** @brief Test whether the SP has initialized.
+/* @brief Test whether the SP has initialized.
  *
  * @return     Boolean flag true if the SP has initialized or false otherwise.
  *
@@ -123,7 +123,7 @@ ia_css_isp_has_started(void);
 bool
 ia_css_sp_has_initialized(void);
 
-/** @brief Test whether the SP has terminated.
+/* @brief Test whether the SP has terminated.
  *
  * @return     Boolean flag true if the SP has terminated or false otherwise.
  *
@@ -132,7 +132,7 @@ ia_css_sp_has_initialized(void);
 bool
 ia_css_sp_has_terminated(void);
 
-/** @brief start SP hardware
+/* @brief start SP hardware
  *
  * @return                     IA_CSS_SUCCESS or error code upon error.
  *
@@ -144,7 +144,7 @@ enum ia_css_err
 ia_css_start_sp(void);
 
 
-/** @brief stop SP hardware
+/* @brief stop SP hardware
  *
  * @return                     IA_CSS_SUCCESS or error code upon error.
  *
index 59459f7a9876ea6b7ad1f357891c0456ca72ea88..84a960b7abbc046169a92a44bac4e499bd8285d3 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef _IA_CSS_DEVICE_ACCESS_H
 #define _IA_CSS_DEVICE_ACCESS_H
 
-/** @file
+/* @file
  * File containing internal functions for the CSS-API to access the CSS device.
  */
 
index 147bf81959d3b1a5e113ac01497c778b49f62253..1f01534964e3e851035f417404bf7619d319517a 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_DVS_H
 #define __IA_CSS_DVS_H
 
-/** @file
+/* @file
  * This file contains types for DVS statistics
  */
 
@@ -31,7 +31,7 @@ enum dvs_statistics_type {
 };
 
 
-/** Structure that holds DVS statistics in the ISP internal
+/* Structure that holds DVS statistics in the ISP internal
  * format. Use ia_css_get_dvs_statistics() to translate
  * this to the format used on the host (DVS engine).
  * */
@@ -40,12 +40,12 @@ struct ia_css_isp_dvs_statistics {
        ia_css_ptr ver_proj;
        uint32_t   hor_size;
        uint32_t   ver_size;
-       uint32_t   exp_id;   /**< see ia_css_event_public.h for more detail */
+       uint32_t   exp_id;   /** see ia_css_event_public.h for more detail */
        ia_css_ptr data_ptr; /* base pointer containing all memory */
        uint32_t   size;     /* size of allocated memory in data_ptr */
 };
 
-/** Structure that holds SKC DVS statistics in the ISP internal
+/* Structure that holds SKC DVS statistics in the ISP internal
  * format. Use ia_css_dvs_statistics_get() to translate this to
  * the format used on the host.
  * */
@@ -82,7 +82,7 @@ union ia_css_dvs_statistics_host {
        struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host;
 };
 
-/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return     error value if temporary memory cannot be allocated
@@ -100,7 +100,7 @@ enum ia_css_err
 ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats,
                          const struct ia_css_isp_dvs_statistics *isp_stats);
 
-/** @brief Translate DVS statistics from ISP format to host format
+/* @brief Translate DVS statistics from ISP format to host format
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return     None
@@ -116,7 +116,7 @@ ia_css_translate_dvs_statistics(
                struct ia_css_dvs_statistics *host_stats,
                const struct ia_css_isp_dvs_statistics_map *isp_stats);
 
-/** @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return     error value if temporary memory cannot be allocated
@@ -134,7 +134,7 @@ enum ia_css_err
 ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats,
                           const struct ia_css_isp_dvs_statistics *isp_stats);
 
-/** @brief Translate DVS2 statistics from ISP format to host format
+/* @brief Translate DVS2 statistics from ISP format to host format
  * @param[in]  host_stats Host buffer
  * @param[in]  isp_stats ISP buffer
  * @return             None
@@ -150,7 +150,7 @@ ia_css_translate_dvs2_statistics(
                struct ia_css_dvs2_statistics      *host_stats,
                const struct ia_css_isp_dvs_statistics_map *isp_stats);
 
-/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
  * @param[in] type - DVS statistics type
  * @param[in] host_stats Host buffer
  * @param[in] isp_stats ISP buffer
@@ -161,105 +161,105 @@ ia_css_dvs_statistics_get(enum dvs_statistics_type type,
                          union ia_css_dvs_statistics_host  *host_stats,
                          const union ia_css_dvs_statistics_isp *isp_stats);
 
-/** @brief Allocate the DVS statistics memory on the ISP
+/* @brief Allocate the DVS statistics memory on the ISP
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS statistics buffer on the ISP
 */
 struct ia_css_isp_dvs_statistics *
 ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS statistics memory on the ISP
+/* @brief Free the DVS statistics memory on the ISP
  * @param[in]  me Pointer to the DVS statistics buffer on the ISP.
  * @return     None
 */
 void
 ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me);
 
-/** @brief Allocate the DVS 2.0 statistics memory
+/* @brief Allocate the DVS 2.0 statistics memory
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS statistics buffer on the ISP
 */
 struct ia_css_isp_dvs_statistics *
 ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS 2.0 statistics memory
+/* @brief Free the DVS 2.0 statistics memory
  * @param[in]  me Pointer to the DVS statistics buffer on the ISP.
  * @return     None
 */
 void
 ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me);
 
-/** @brief Allocate the DVS statistics memory on the host
+/* @brief Allocate the DVS statistics memory on the host
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS statistics buffer on the host
 */
 struct ia_css_dvs_statistics *
 ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS statistics memory on the host
+/* @brief Free the DVS statistics memory on the host
  * @param[in]  me Pointer to the DVS statistics buffer on the host.
  * @return     None
 */
 void
 ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me);
 
-/** @brief Allocate the DVS coefficients memory
+/* @brief Allocate the DVS coefficients memory
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS coefficients buffer
 */
 struct ia_css_dvs_coefficients *
 ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS coefficients memory
+/* @brief Free the DVS coefficients memory
  * @param[in]  me Pointer to the DVS coefficients buffer.
  * @return     None
  */
 void
 ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me);
 
-/** @brief Allocate the DVS 2.0 statistics memory on the host
+/* @brief Allocate the DVS 2.0 statistics memory on the host
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS 2.0 statistics buffer on the host
  */
 struct ia_css_dvs2_statistics *
 ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS 2.0 statistics memory
+/* @brief Free the DVS 2.0 statistics memory
  * @param[in]  me Pointer to the DVS 2.0 statistics buffer on the host.
  * @return     None
 */
 void
 ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me);
 
-/** @brief Allocate the DVS 2.0 coefficients memory
+/* @brief Allocate the DVS 2.0 coefficients memory
  * @param[in]  grid The grid.
  * @return     Pointer to the allocated DVS 2.0 coefficients buffer
 */
 struct ia_css_dvs2_coefficients *
 ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
 
-/** @brief Free the DVS 2.0 coefficients memory
+/* @brief Free the DVS 2.0 coefficients memory
  * @param[in]  me Pointer to the DVS 2.0 coefficients buffer.
  * @return     None
 */
 void
 ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me);
 
-/** @brief Allocate the DVS 2.0 6-axis config memory
+/* @brief Allocate the DVS 2.0 6-axis config memory
  * @param[in]  stream The stream.
  * @return     Pointer to the allocated DVS 6axis configuration buffer
 */
 struct ia_css_dvs_6axis_config *
 ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream);
 
-/** @brief Free the DVS 2.0 6-axis config memory
+/* @brief Free the DVS 2.0 6-axis config memory
  * @param[in]  dvs_6axis_config Pointer to the DVS 6axis configuration buffer
  * @return     None
  */
 void
 ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config);
 
-/** @brief Allocate a dvs statistics map structure
+/* @brief Allocate a dvs statistics map structure
  * @param[in]  isp_stats pointer to ISP dvs statistis struct
  * @param[in]  data_ptr  host-side pointer to ISP dvs statistics.
  * @return     Pointer to the allocated dvs statistics map
@@ -280,7 +280,7 @@ ia_css_isp_dvs_statistics_map_allocate(
        const struct ia_css_isp_dvs_statistics *isp_stats,
        void *data_ptr);
 
-/** @brief Free the dvs statistics map
+/* @brief Free the dvs statistics map
  * @param[in]  me Pointer to the dvs statistics map
  * @return     None
  *
@@ -291,7 +291,7 @@ ia_css_isp_dvs_statistics_map_allocate(
 void
 ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me);
 
-/** @brief Allocate memory for the SKC DVS statistics on the ISP
+/* @brief Allocate memory for the SKC DVS statistics on the ISP
  * @return             Pointer to the allocated ACC DVS statistics buffer on the ISP
 */
 struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void);
index 1ae9daf0be767cf50e34037e05766261561a0ba8..8b0218ee658de09fd67e47e984c283090f07b2af 100644 (file)
 #include "ia_css_types.h"
 #include "ia_css_acc_types.h"
 
-/** @file
+/* @file
  * This file contains prototypes for functions that need to be provided to the
  * CSS-API host-code by the environment in which the CSS-API code runs.
  */
 
-/** Memory allocation attributes, for use in ia_css_css_mem_env. */
+/* Memory allocation attributes, for use in ia_css_css_mem_env. */
 enum ia_css_mem_attr {
        IA_CSS_MEM_ATTR_CACHED = 1 << 0,
        IA_CSS_MEM_ATTR_ZEROED = 1 << 1,
@@ -33,62 +33,62 @@ enum ia_css_mem_attr {
        IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3,
 };
 
-/** Environment with function pointers for local IA memory allocation.
+/* Environment with function pointers for local IA memory allocation.
  *  This provides the CSS code with environment specific functionality
  *  for memory allocation of small local buffers such as local data structures.
  *  This is never expected to allocate more than one page of memory (4K bytes).
  */
 struct ia_css_cpu_mem_env {
        void (*flush)(struct ia_css_acc_fw *fw);
-       /**< Flush function to flush the cache for given accelerator. */
+       /** Flush function to flush the cache for given accelerator. */
 };
 
-/** Environment with function pointers to access the CSS hardware. This includes
+/* Environment with function pointers to access the CSS hardware. This includes
  *  registers and local memories.
  */
 struct ia_css_hw_access_env {
        void (*store_8)(hrt_address addr, uint8_t data);
-       /**< Store an 8 bit value into an address in the CSS HW address space.
+       /** Store an 8 bit value into an address in the CSS HW address space.
             The address must be an 8 bit aligned address. */
        void (*store_16)(hrt_address addr, uint16_t data);
-       /**< Store a 16 bit value into an address in the CSS HW address space.
+       /** Store a 16 bit value into an address in the CSS HW address space.
             The address must be a 16 bit aligned address. */
        void (*store_32)(hrt_address addr, uint32_t data);
-       /**< Store a 32 bit value into an address in the CSS HW address space.
+       /** Store a 32 bit value into an address in the CSS HW address space.
             The address must be a 32 bit aligned address. */
        uint8_t (*load_8)(hrt_address addr);
-       /**< Load an 8 bit value from an address in the CSS HW address
+       /** Load an 8 bit value from an address in the CSS HW address
             space. The address must be an 8 bit aligned address. */
        uint16_t (*load_16)(hrt_address addr);
-       /**< Load a 16 bit value from an address in the CSS HW address
+       /** Load a 16 bit value from an address in the CSS HW address
             space. The address must be a 16 bit aligned address. */
        uint32_t (*load_32)(hrt_address addr);
-       /**< Load a 32 bit value from an address in the CSS HW address
+       /** Load a 32 bit value from an address in the CSS HW address
             space. The address must be a 32 bit aligned address. */
        void (*store)(hrt_address addr, const void *data, uint32_t bytes);
-       /**< Store a number of bytes into a byte-aligned address in the CSS HW address space. */
+       /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */
        void (*load)(hrt_address addr, void *data, uint32_t bytes);
-       /**< Load a number of bytes from a byte-aligned address in the CSS HW address space. */
+       /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
 };
 
-/** Environment with function pointers to print error and debug messages.
+/* Environment with function pointers to print error and debug messages.
  */
 struct ia_css_print_env {
        int (*debug_print)(const char *fmt, va_list args);
-       /**< Print a debug message. */
+       /** Print a debug message. */
        int (*error_print)(const char *fmt, va_list args);
-       /**< Print an error message.*/
+       /** Print an error message.*/
 };
 
-/** Environment structure. This includes function pointers to access several
+/* Environment structure. This includes function pointers to access several
  *  features provided by the environment in which the CSS API is used.
  *  This is used to run the camera IP in multiple platforms such as Linux,
  *  Windows and several simulation environments.
  */
 struct ia_css_env {
-       struct ia_css_cpu_mem_env   cpu_mem_env;   /**< local flush. */
-       struct ia_css_hw_access_env hw_access_env; /**< CSS HW access functions */
-       struct ia_css_print_env     print_env;     /**< Message printing env. */
+       struct ia_css_cpu_mem_env   cpu_mem_env;   /** local flush. */
+       struct ia_css_hw_access_env hw_access_env; /** CSS HW access functions */
+       struct ia_css_print_env     print_env;     /** Message printing env. */
 };
 
 #endif /* __IA_CSS_ENV_H */
index 572e4e55c69e29cb405746d0894e4de4cd8046f4..cf895815ea31e0f598bb8480a332b57378ebfae5 100644 (file)
 #ifndef __IA_CSS_ERR_H
 #define __IA_CSS_ERR_H
 
-/** @file
+/* @file
  * This file contains possible return values for most
  * functions in the CSS-API.
  */
 
-/** Errors, these values are used as the return value for most
+/* Errors, these values are used as the return value for most
  *  functions in this API.
  */
 enum ia_css_err {
@@ -41,22 +41,22 @@ enum ia_css_err {
        IA_CSS_ERR_NOT_SUPPORTED
 };
 
-/** FW warnings. This enum contains a value for each warning that
+/* FW warnings. This enum contains a value for each warning that
  * the SP FW could indicate potential performance issue
  */
 enum ia_css_fw_warning {
        IA_CSS_FW_WARNING_NONE,
-       IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the ISys queue.
+       IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the ISys queue.
                This warning can be avoided by de-queing ISYS buffers more timely. */
-       IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the PSys queue.
+       IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the PSys queue.
                This warning can be avoided by de-queing PSYS buffers more timely. */
-       IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /** < CSS system delayed because of insufficient available buffers.
+       IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /* < CSS system delayed because of insufficient available buffers.
                This warning can be avoided by unlocking locked frame-buffers more timely. */
-       IA_CSS_FW_WARNING_EXP_ID_LOCKED, /** < Exposure ID skipped because the frame associated to it was still locked.
+       IA_CSS_FW_WARNING_EXP_ID_LOCKED, /* < Exposure ID skipped because the frame associated to it was still locked.
                This warning can be avoided by unlocking locked frame-buffers more timely. */
-       IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /** < Exposure ID cannot be found on the circular buffer.
+       IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /* < Exposure ID cannot be found on the circular buffer.
                This warning can be avoided by unlocking locked frame-buffers more timely. */
-       IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /** < Frame and param pair mismatched in tagger.
+       IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /* < Frame and param pair mismatched in tagger.
                This warning can be avoided by providing a param set for each frame. */
 };
 
index aaf349772abef324a7a296f065b0ece97346cc3d..036a2f03d3bd416850e253ade9951fe1f6cec9b4 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_EVENT_PUBLIC_H
 #define __IA_CSS_EVENT_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains CSS-API events functionality
  */
 
@@ -24,7 +24,7 @@
 #include <ia_css_types.h>      /* ia_css_pipe */
 #include <ia_css_timer.h>      /* ia_css_timer */
 
-/** The event type, distinguishes the kind of events that
+/* The event type, distinguishes the kind of events that
  * can are generated by the CSS system.
  *
  * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
  */
 enum ia_css_event_type {
        IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE             = 1 << 0,
-       /**< Output frame ready. */
+       /** Output frame ready. */
        IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE      = 1 << 1,
-       /**< Second output frame ready. */
+       /** Second output frame ready. */
        IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE          = 1 << 2,
-       /**< Viewfinder Output frame ready. */
+       /** Viewfinder Output frame ready. */
        IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE   = 1 << 3,
-       /**< Second viewfinder Output frame ready. */
+       /** Second viewfinder Output frame ready. */
        IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE            = 1 << 4,
-       /**< Indication that 3A statistics are available. */
+       /** Indication that 3A statistics are available. */
        IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE           = 1 << 5,
-       /**< Indication that DIS statistics are available. */
+       /** Indication that DIS statistics are available. */
        IA_CSS_EVENT_TYPE_PIPELINE_DONE                 = 1 << 6,
-       /**< Pipeline Done event, sent after last pipeline stage. */
+       /** Pipeline Done event, sent after last pipeline stage. */
        IA_CSS_EVENT_TYPE_FRAME_TAGGED                  = 1 << 7,
-       /**< Frame tagged. */
+       /** Frame tagged. */
        IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE              = 1 << 8,
-       /**< Input frame ready. */
+       /** Input frame ready. */
        IA_CSS_EVENT_TYPE_METADATA_DONE                 = 1 << 9,
-       /**< Metadata ready. */
+       /** Metadata ready. */
        IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE          = 1 << 10,
-       /**< Indication that LACE statistics are available. */
+       /** Indication that LACE statistics are available. */
        IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE            = 1 << 11,
-       /**< Extension stage complete. */
+       /** Extension stage complete. */
        IA_CSS_EVENT_TYPE_TIMER                         = 1 << 12,
-       /**< Timer event for measuring the SP side latencies. It contains the
+       /** Timer event for measuring the SP side latencies. It contains the
              32-bit timer value from the SP */
        IA_CSS_EVENT_TYPE_PORT_EOF                      = 1 << 13,
-       /**< End Of Frame event, sent when in buffered sensor mode. */
+       /** End Of Frame event, sent when in buffered sensor mode. */
        IA_CSS_EVENT_TYPE_FW_WARNING                    = 1 << 14,
-       /**< Performance warning encounter by FW */
+       /** Performance warning encounter by FW */
        IA_CSS_EVENT_TYPE_FW_ASSERT                     = 1 << 15,
-       /**< Assertion hit by FW */
+       /** Assertion hit by FW */
 };
 
 #define IA_CSS_EVENT_TYPE_NONE 0
 
-/** IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
+/* IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
  * The other events (such as PORT_EOF) cannot be enabled/disabled
  * and are hence excluded from this macro.
  */
@@ -89,7 +89,7 @@ enum ia_css_event_type {
         IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE         | \
         IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE)
 
-/** The event struct, container for the event type and its related values.
+/* The event struct, container for the event type and its related values.
  * Depending on the event type, either pipe or port will be filled.
  * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle.
  * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be
@@ -97,14 +97,14 @@ enum ia_css_event_type {
  */
 struct ia_css_event {
        struct ia_css_pipe    *pipe;
-       /**< Pipe handle on which event happened, NULL for non pipe related
+       /** Pipe handle on which event happened, NULL for non pipe related
             events. */
        enum ia_css_event_type type;
-       /**< Type of Event, always valid/filled. */
+       /** Type of Event, always valid/filled. */
        uint8_t                port;
-       /**< Port number for EOF event (not valid for other events). */
+       /** Port number for EOF event (not valid for other events). */
        uint8_t                exp_id;
-       /**< Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
+       /** Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
             The exposure ID is unique only within a logical stream and it is
             only generated on systems that have an input system (such as 2400
             and 2401).
@@ -120,26 +120,26 @@ struct ia_css_event {
             in the exposure IDs. Therefor applications should not use this
             to detect frame drops. */
        uint32_t               fw_handle;
-       /**< Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
+       /** Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
             events). */
        enum ia_css_fw_warning fw_warning;
-       /**< Firmware warning code, only for WARNING events. */
+       /** Firmware warning code, only for WARNING events. */
        uint8_t                fw_assert_module_id;
-       /**< Firmware module id, only for ASSERT events, should be logged by driver. */
+       /** Firmware module id, only for ASSERT events, should be logged by driver. */
        uint16_t               fw_assert_line_no;
-       /**< Firmware line number, only for ASSERT events, should be logged by driver. */
+       /** Firmware line number, only for ASSERT events, should be logged by driver. */
        clock_value_t          timer_data;
-       /**< For storing the full 32-bit of the timer value. Valid only for TIMER
+       /** For storing the full 32-bit of the timer value. Valid only for TIMER
             event */
        uint8_t                timer_code;
-       /**< For storing the code of the TIMER event. Valid only for
+       /** For storing the code of the TIMER event. Valid only for
             TIMER event */
        uint8_t                timer_subcode;
-       /**< For storing the subcode of the TIMER event. Valid only
+       /** For storing the subcode of the TIMER event. Valid only
             for TIMER event */
 };
 
-/** @brief Dequeue a PSYS event from the CSS system.
+/* @brief Dequeue a PSYS event from the CSS system.
  *
  * @param[out] event   Pointer to the event struct which will be filled by
  *                      this function if an event is available.
@@ -156,7 +156,7 @@ struct ia_css_event {
 enum ia_css_err
 ia_css_dequeue_psys_event(struct ia_css_event *event);
 
-/** @brief Dequeue an event from the CSS system.
+/* @brief Dequeue an event from the CSS system.
  *
  * @param[out] event   Pointer to the event struct which will be filled by
  *                      this function if an event is available.
@@ -171,7 +171,7 @@ ia_css_dequeue_psys_event(struct ia_css_event *event);
 enum ia_css_err
 ia_css_dequeue_event(struct ia_css_event *event);
 
-/** @brief Dequeue an ISYS event from the CSS system.
+/* @brief Dequeue an ISYS event from the CSS system.
  *
  * @param[out] event   Pointer to the event struct which will be filled by
  *                      this function if an event is available.
index 06d375a09be27b83705adc9feae8b1eaac263e79..d7d7f0a995e5c0370f3ab89c1c8381279f3abbc5 100644 (file)
 #ifndef __IA_CSS_FIRMWARE_H
 #define __IA_CSS_FIRMWARE_H
 
-/** @file
+/* @file
  * This file contains firmware loading/unloading support functionality
  */
 
 #include "ia_css_err.h"
 #include "ia_css_env.h"
 
-/** CSS firmware package structure.
+/* CSS firmware package structure.
  */
 struct ia_css_fw {
-       void        *data;  /**< pointer to the firmware data */
-       unsigned int bytes; /**< length in bytes of firmware data */
+       void        *data;  /** pointer to the firmware data */
+       unsigned int bytes; /** length in bytes of firmware data */
 };
 
-/** @brief Loads the firmware
+/* @brief Loads the firmware
  * @param[in]  env             Environment, provides functions to access the
  *                             environment in which the CSS code runs. This is
  *                             used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err
 ia_css_load_firmware(const struct ia_css_env *env,
            const struct ia_css_fw  *fw);
 
-/** @brief Unloads the firmware
+/* @brief Unloads the firmware
  * @return     None
  *
  * This function unloads the firmware loaded by ia_css_load_firmware.
@@ -61,7 +61,7 @@ ia_css_load_firmware(const struct ia_css_env *env,
 void
 ia_css_unload_firmware(void);
 
-/** @brief Checks firmware version
+/* @brief Checks firmware version
  * @param[in]  fw      Firmware package containing the firmware for all
  *                     predefined ISP binaries.
  * @return             Returns true when the firmware version matches with the CSS
index da9c60144c6dcaf6fe075ee626e7c49ed60ec301..e5ffc579aef1e09969dcc14fd4e1e5b220d7c044 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef _IA_CSS_FRAC_H
 #define _IA_CSS_FRAC_H
 
-/** @file
+/* @file
  * This file contains typedefs used for fractional numbers
  */
 
  * NOTE: the 16 bit fixed point types actually occupy 32 bits
  * to save on extension operations in the ISP code.
  */
-/** Unsigned fixed point value, 0 integer bits, 16 fractional bits */
+/* Unsigned fixed point value, 0 integer bits, 16 fractional bits */
 typedef uint32_t ia_css_u0_16;
-/** Unsigned fixed point value, 5 integer bits, 11 fractional bits */
+/* Unsigned fixed point value, 5 integer bits, 11 fractional bits */
 typedef uint32_t ia_css_u5_11;
-/** Unsigned fixed point value, 8 integer bits, 8 fractional bits */
+/* Unsigned fixed point value, 8 integer bits, 8 fractional bits */
 typedef uint32_t ia_css_u8_8;
-/** Signed fixed point value, 0 integer bits, 15 fractional bits */
+/* Signed fixed point value, 0 integer bits, 15 fractional bits */
 typedef int32_t ia_css_s0_15;
 
 #endif /* _IA_CSS_FRAC_H */
index d534fbd913803ab37812a4b09f98396d56ab00e3..2f177edc36ac5e7ec9b4311671ae51b643682ac0 100644 (file)
 #ifndef __IA_CSS_FRAME_FORMAT_H
 #define __IA_CSS_FRAME_FORMAT_H
 
-/** @file
+/* @file
  * This file contains information about formats supported in the ISP
  */
 
-/** Frame formats, some of these come from fourcc.org, others are
+/* Frame formats, some of these come from fourcc.org, others are
    better explained by video4linux2. The NV11 seems to be described only
    on MSDN pages, but even those seem to be gone now.
    Frames can come in many forms, the main categories are RAW, RGB and YUV
        - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h
 */
 enum ia_css_frame_format {
-       IA_CSS_FRAME_FORMAT_NV11 = 0,   /**< 12 bit YUV 411, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV12,       /**< 12 bit YUV 420, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV12_16,    /**< 16 bit YUV 420, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV12_TILEY, /**< 12 bit YUV 420, Intel proprietary tiled format, TileY */
-       IA_CSS_FRAME_FORMAT_NV16,       /**< 16 bit YUV 422, Y, UV plane */
-       IA_CSS_FRAME_FORMAT_NV21,       /**< 12 bit YUV 420, Y, VU plane */
-       IA_CSS_FRAME_FORMAT_NV61,       /**< 16 bit YUV 422, Y, VU plane */
-       IA_CSS_FRAME_FORMAT_YV12,       /**< 12 bit YUV 420, Y, V, U plane */
-       IA_CSS_FRAME_FORMAT_YV16,       /**< 16 bit YUV 422, Y, V, U plane */
-       IA_CSS_FRAME_FORMAT_YUV420,     /**< 12 bit YUV 420, Y, U, V plane */
-       IA_CSS_FRAME_FORMAT_YUV420_16,  /**< yuv420, 16 bits per subpixel */
-       IA_CSS_FRAME_FORMAT_YUV422,     /**< 16 bit YUV 422, Y, U, V plane */
-       IA_CSS_FRAME_FORMAT_YUV422_16,  /**< yuv422, 16 bits per subpixel */
-       IA_CSS_FRAME_FORMAT_UYVY,       /**< 16 bit YUV 422, UYVY interleaved */
-       IA_CSS_FRAME_FORMAT_YUYV,       /**< 16 bit YUV 422, YUYV interleaved */
-       IA_CSS_FRAME_FORMAT_YUV444,     /**< 24 bit YUV 444, Y, U, V plane */
-       IA_CSS_FRAME_FORMAT_YUV_LINE,   /**< Internal format, 2 y lines followed
+       IA_CSS_FRAME_FORMAT_NV11 = 0,   /** 12 bit YUV 411, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV12,       /** 12 bit YUV 420, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV12_16,    /** 16 bit YUV 420, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV12_TILEY, /** 12 bit YUV 420, Intel proprietary tiled format, TileY */
+       IA_CSS_FRAME_FORMAT_NV16,       /** 16 bit YUV 422, Y, UV plane */
+       IA_CSS_FRAME_FORMAT_NV21,       /** 12 bit YUV 420, Y, VU plane */
+       IA_CSS_FRAME_FORMAT_NV61,       /** 16 bit YUV 422, Y, VU plane */
+       IA_CSS_FRAME_FORMAT_YV12,       /** 12 bit YUV 420, Y, V, U plane */
+       IA_CSS_FRAME_FORMAT_YV16,       /** 16 bit YUV 422, Y, V, U plane */
+       IA_CSS_FRAME_FORMAT_YUV420,     /** 12 bit YUV 420, Y, U, V plane */
+       IA_CSS_FRAME_FORMAT_YUV420_16,  /** yuv420, 16 bits per subpixel */
+       IA_CSS_FRAME_FORMAT_YUV422,     /** 16 bit YUV 422, Y, U, V plane */
+       IA_CSS_FRAME_FORMAT_YUV422_16,  /** yuv422, 16 bits per subpixel */
+       IA_CSS_FRAME_FORMAT_UYVY,       /** 16 bit YUV 422, UYVY interleaved */
+       IA_CSS_FRAME_FORMAT_YUYV,       /** 16 bit YUV 422, YUYV interleaved */
+       IA_CSS_FRAME_FORMAT_YUV444,     /** 24 bit YUV 444, Y, U, V plane */
+       IA_CSS_FRAME_FORMAT_YUV_LINE,   /** Internal format, 2 y lines followed
                                             by a uvinterleaved line */
-       IA_CSS_FRAME_FORMAT_RAW,        /**< RAW, 1 plane */
-       IA_CSS_FRAME_FORMAT_RGB565,     /**< 16 bit RGB, 1 plane. Each 3 sub
+       IA_CSS_FRAME_FORMAT_RAW,        /** RAW, 1 plane */
+       IA_CSS_FRAME_FORMAT_RGB565,     /** 16 bit RGB, 1 plane. Each 3 sub
                                             pixels are packed into one 16 bit
                                             value, 5 bits for R, 6 bits for G
                                             and 5 bits for B. */
-       IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /**< 24 bit RGB, 3 planes */
-       IA_CSS_FRAME_FORMAT_RGBA888,    /**< 32 bit RGBA, 1 plane, A=Alpha
+       IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /** 24 bit RGB, 3 planes */
+       IA_CSS_FRAME_FORMAT_RGBA888,    /** 32 bit RGBA, 1 plane, A=Alpha
                                             (alpha is unused) */
-       IA_CSS_FRAME_FORMAT_QPLANE6, /**< Internal, for advanced ISP */
-       IA_CSS_FRAME_FORMAT_BINARY_8,   /**< byte stream, used for jpeg. For
+       IA_CSS_FRAME_FORMAT_QPLANE6, /** Internal, for advanced ISP */
+       IA_CSS_FRAME_FORMAT_BINARY_8,   /** byte stream, used for jpeg. For
                                             frames of this type, we set the
                                             height to 1 and the width to the
                                             number of allocated bytes. */
-       IA_CSS_FRAME_FORMAT_MIPI,       /**< MIPI frame, 1 plane */
-       IA_CSS_FRAME_FORMAT_RAW_PACKED, /**< RAW, 1 plane, packed */
-       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,        /**< 8 bit per Y/U/V.
+       IA_CSS_FRAME_FORMAT_MIPI,       /** MIPI frame, 1 plane */
+       IA_CSS_FRAME_FORMAT_RAW_PACKED, /** RAW, 1 plane, packed */
+       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,        /** 8 bit per Y/U/V.
                                                           Y odd line; UYVY
                                                           interleaved even line */
-       IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /**< Legacy YUV420. UY odd
+       IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /** Legacy YUV420. UY odd
                                                           line; VY even line */
-       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10       /**< 10 bit per Y/U/V. Y odd
+       IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10       /** 10 bit per Y/U/V. Y odd
                                                           line; UYVY interleaved
                                                           even line */
 };
@@ -95,7 +95,7 @@ enum ia_css_frame_format {
 /*       because of issues this would cause with the Clockwork code checking tool.               */
 #define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1)
 
-/** Number of valid output frame formats for ISP **/
+/* Number of valid output frame formats for ISP **/
 #define IA_CSS_FRAME_OUT_FORMAT_NUM    (IA_CSS_FRAME_FORMAT_RGBA888 + 1)
 
 #endif /* __IA_CSS_FRAME_FORMAT_H */
index 92f2389176b2e0f825cdc94cb954340fb8ead303..ba7a076c3afa7a251878821526ce9bd53ef35025 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_FRAME_PUBLIC_H
 #define __IA_CSS_FRAME_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains structs to describe various frame-formats supported by the ISP.
  */
 
 #include "ia_css_frame_format.h"
 #include "ia_css_buffer.h"
 
-/** For RAW input, the bayer order needs to be specified separately. There
+/* For RAW input, the bayer order needs to be specified separately. There
  *  are 4 possible orders. The name is constructed by taking the first two
  *  colors on the first line and the first two colors from the second line.
  */
 enum ia_css_bayer_order {
-       IA_CSS_BAYER_ORDER_GRBG, /**< GRGRGRGRGR .. BGBGBGBGBG */
-       IA_CSS_BAYER_ORDER_RGGB, /**< RGRGRGRGRG .. GBGBGBGBGB */
-       IA_CSS_BAYER_ORDER_BGGR, /**< BGBGBGBGBG .. GRGRGRGRGR */
-       IA_CSS_BAYER_ORDER_GBRG, /**< GBGBGBGBGB .. RGRGRGRGRG */
+       IA_CSS_BAYER_ORDER_GRBG, /** GRGRGRGRGR .. BGBGBGBGBG */
+       IA_CSS_BAYER_ORDER_RGGB, /** RGRGRGRGRG .. GBGBGBGBGB */
+       IA_CSS_BAYER_ORDER_BGGR, /** BGBGBGBGBG .. GRGRGRGRGR */
+       IA_CSS_BAYER_ORDER_GBRG, /** GBGBGBGBGB .. RGRGRGRGRG */
 };
 #define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1)
 
-/** Frame plane structure. This describes one plane in an image
+/* Frame plane structure. This describes one plane in an image
  *  frame buffer.
  */
 struct ia_css_frame_plane {
-       unsigned int height; /**< height of a plane in lines */
-       unsigned int width;  /**< width of a line, in DMA elements, note that
+       unsigned int height; /** height of a plane in lines */
+       unsigned int width;  /** width of a line, in DMA elements, note that
                                  for RGB565 the three subpixels are stored in
                                  one element. For all other formats this is
                                  the number of subpixels per line. */
-       unsigned int stride; /**< stride of a line in bytes */
-       unsigned int offset; /**< offset in bytes to start of frame data.
+       unsigned int stride; /** stride of a line in bytes */
+       unsigned int offset; /** offset in bytes to start of frame data.
                                  offset is wrt data field in ia_css_frame */
 };
 
-/** Binary "plane". This is used to story binary streams such as jpeg
+/* Binary "plane". This is used to story binary streams such as jpeg
  *  images. This is not actually a real plane.
  */
 struct ia_css_frame_binary_plane {
-       unsigned int              size; /**< number of bytes in the stream */
-       struct ia_css_frame_plane data; /**< plane */
+       unsigned int              size; /** number of bytes in the stream */
+       struct ia_css_frame_plane data; /** plane */
 };
 
-/** Container for planar YUV frames. This contains 3 planes.
+/* Container for planar YUV frames. This contains 3 planes.
  */
 struct ia_css_frame_yuv_planes {
-       struct ia_css_frame_plane y; /**< Y plane */
-       struct ia_css_frame_plane u; /**< U plane */
-       struct ia_css_frame_plane v; /**< V plane */
+       struct ia_css_frame_plane y; /** Y plane */
+       struct ia_css_frame_plane u; /** U plane */
+       struct ia_css_frame_plane v; /** V plane */
 };
 
-/** Container for semi-planar YUV frames.
+/* Container for semi-planar YUV frames.
   */
 struct ia_css_frame_nv_planes {
-       struct ia_css_frame_plane y;  /**< Y plane */
-       struct ia_css_frame_plane uv; /**< UV plane */
+       struct ia_css_frame_plane y;  /** Y plane */
+       struct ia_css_frame_plane uv; /** UV plane */
 };
 
-/** Container for planar RGB frames. Each color has its own plane.
+/* Container for planar RGB frames. Each color has its own plane.
  */
 struct ia_css_frame_rgb_planes {
-       struct ia_css_frame_plane r; /**< Red plane */
-       struct ia_css_frame_plane g; /**< Green plane */
-       struct ia_css_frame_plane b; /**< Blue plane */
+       struct ia_css_frame_plane r; /** Red plane */
+       struct ia_css_frame_plane g; /** Green plane */
+       struct ia_css_frame_plane b; /** Blue plane */
 };
 
-/** Container for 6-plane frames. These frames are used internally
+/* Container for 6-plane frames. These frames are used internally
  *  in the advanced ISP only.
  */
 struct ia_css_frame_plane6_planes {
-       struct ia_css_frame_plane r;      /**< Red plane */
-       struct ia_css_frame_plane r_at_b; /**< Red at blue plane */
-       struct ia_css_frame_plane gr;     /**< Red-green plane */
-       struct ia_css_frame_plane gb;     /**< Blue-green plane */
-       struct ia_css_frame_plane b;      /**< Blue plane */
-       struct ia_css_frame_plane b_at_r; /**< Blue at red plane */
+       struct ia_css_frame_plane r;      /** Red plane */
+       struct ia_css_frame_plane r_at_b; /** Red at blue plane */
+       struct ia_css_frame_plane gr;     /** Red-green plane */
+       struct ia_css_frame_plane gb;     /** Blue-green plane */
+       struct ia_css_frame_plane b;      /** Blue plane */
+       struct ia_css_frame_plane b_at_r; /** Blue at red plane */
 };
 
 /* Crop info struct - stores the lines to be cropped in isp */
@@ -103,15 +103,15 @@ struct ia_css_crop_info {
        unsigned int start_line;
 };
 
-/** Frame info struct. This describes the contents of an image frame buffer.
+/* Frame info struct. This describes the contents of an image frame buffer.
   */
 struct ia_css_frame_info {
-       struct ia_css_resolution res; /**< Frame resolution (valid data) */
-       unsigned int padded_width; /**< stride of line in memory (in pixels) */
-       enum ia_css_frame_format format; /**< format of the frame data */
-       unsigned int raw_bit_depth; /**< number of valid bits per pixel,
+       struct ia_css_resolution res; /** Frame resolution (valid data) */
+       unsigned int padded_width; /** stride of line in memory (in pixels) */
+       enum ia_css_frame_format format; /** format of the frame data */
+       unsigned int raw_bit_depth; /** number of valid bits per pixel,
                                         only valid for RAW bayer frames */
-       enum ia_css_bayer_order raw_bayer_order; /**< bayer order, only valid
+       enum ia_css_bayer_order raw_bayer_order; /** bayer order, only valid
                                                      for RAW bayer frames */
        /* the params below are computed based on bayer_order
         * we can remove the raw_bayer_order if it is redundant
@@ -136,9 +136,9 @@ struct ia_css_frame_info {
  *  Specifies the DVS loop delay in "frame periods"
  */
 enum ia_css_frame_delay {
-       IA_CSS_FRAME_DELAY_0, /**< Frame delay = 0 */
-       IA_CSS_FRAME_DELAY_1, /**< Frame delay = 1 */
-       IA_CSS_FRAME_DELAY_2  /**< Frame delay = 2 */
+       IA_CSS_FRAME_DELAY_0, /** Frame delay = 0 */
+       IA_CSS_FRAME_DELAY_1, /** Frame delay = 1 */
+       IA_CSS_FRAME_DELAY_2  /** Frame delay = 2 */
 };
 
 enum ia_css_frame_flash_state {
@@ -147,13 +147,13 @@ enum ia_css_frame_flash_state {
        IA_CSS_FRAME_FLASH_STATE_FULL
 };
 
-/** Frame structure. This structure describes an image buffer or frame.
+/* Frame structure. This structure describes an image buffer or frame.
  *  This is the main structure used for all input and output images.
  */
 struct ia_css_frame {
-       struct ia_css_frame_info info; /**< info struct describing the frame */
-       ia_css_ptr   data;             /**< pointer to start of image data */
-       unsigned int data_bytes;       /**< size of image data in bytes */
+       struct ia_css_frame_info info; /** info struct describing the frame */
+       ia_css_ptr   data;             /** pointer to start of image data */
+       unsigned int data_bytes;       /** size of image data in bytes */
        /* LA: move this to ia_css_buffer */
        /*
         * -1 if data address is static during life time of pipeline
@@ -171,10 +171,10 @@ struct ia_css_frame {
        enum ia_css_buffer_type buf_type;
        enum ia_css_frame_flash_state flash_state;
        unsigned int exp_id;
-       /**< exposure id, see ia_css_event_public.h for more detail */
-       uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */
-       bool valid; /**< First video output frame is not valid */
-       bool contiguous; /**< memory is allocated physically contiguously */
+       /** exposure id, see ia_css_event_public.h for more detail */
+       uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
+       bool valid; /** First video output frame is not valid */
+       bool contiguous; /** memory is allocated physically contiguously */
        union {
                unsigned int    _initialisation_dummy;
                struct ia_css_frame_plane raw;
@@ -185,7 +185,7 @@ struct ia_css_frame {
                struct ia_css_frame_nv_planes nv;
                struct ia_css_frame_plane6_planes plane6;
                struct ia_css_frame_binary_plane binary;
-       } planes; /**< frame planes, select the right one based on
+       } planes; /** frame planes, select the right one based on
                       info.format */
 };
 
@@ -204,7 +204,7 @@ struct ia_css_frame {
        { 0 }                                   /* planes */ \
 }
 
-/** @brief Fill a frame with zeros
+/* @brief Fill a frame with zeros
  *
  * @param      frame           The frame.
  * @return     None
@@ -213,7 +213,7 @@ struct ia_css_frame {
  */
 void ia_css_frame_zero(struct ia_css_frame *frame);
 
-/** @brief Allocate a CSS frame structure
+/* @brief Allocate a CSS frame structure
  *
  * @param      frame           The allocated frame.
  * @param      width           The width (in pixels) of the frame.
@@ -234,7 +234,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
                      unsigned int stride,
                      unsigned int raw_bit_depth);
 
-/** @brief Allocate a CSS frame structure using a frame info structure.
+/* @brief Allocate a CSS frame structure using a frame info structure.
  *
  * @param      frame   The allocated frame.
  * @param[in]  info    The frame info structure.
@@ -247,7 +247,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
 enum ia_css_err
 ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
                                const struct ia_css_frame_info *info);
-/** @brief Free a CSS frame structure.
+/* @brief Free a CSS frame structure.
  *
  * @param[in]  frame   Pointer to the frame.
  * @return     None
@@ -258,7 +258,7 @@ ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
 void
 ia_css_frame_free(struct ia_css_frame *frame);
 
-/** @brief Allocate a contiguous CSS frame structure
+/* @brief Allocate a contiguous CSS frame structure
  *
  * @param      frame           The allocated frame.
  * @param      width           The width (in pixels) of the frame.
@@ -280,7 +280,7 @@ ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
                                 unsigned int stride,
                                 unsigned int raw_bit_depth);
 
-/** @brief Allocate a contiguous CSS frame from a frame info structure.
+/* @brief Allocate a contiguous CSS frame from a frame info structure.
  *
  * @param      frame   The allocated frame.
  * @param[in]  info    The frame info structure.
@@ -296,7 +296,7 @@ enum ia_css_err
 ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame,
                                          const struct ia_css_frame_info *info);
 
-/** @brief Allocate a CSS frame structure using a frame info structure.
+/* @brief Allocate a CSS frame structure using a frame info structure.
  *
  * @param      frame   The allocated frame.
  * @param[in]  info    The frame info structure.
@@ -309,7 +309,7 @@ enum ia_css_err
 ia_css_frame_create_from_info(struct ia_css_frame **frame,
        const struct ia_css_frame_info *info);
 
-/** @brief Set a mapped data buffer to a CSS frame
+/* @brief Set a mapped data buffer to a CSS frame
  *
  * @param[in]  frame       Valid CSS frame pointer
  * @param[in]  mapped_data  Mapped data buffer to be assigned to the CSS frame
@@ -327,7 +327,7 @@ ia_css_frame_set_data(struct ia_css_frame *frame,
        const ia_css_ptr   mapped_data,
        size_t data_size_bytes);
 
-/** @brief Map an existing frame data pointer to a CSS frame.
+/* @brief Map an existing frame data pointer to a CSS frame.
  *
  * @param      frame           Pointer to the frame to be initialized
  * @param[in]  info            The frame info.
@@ -350,7 +350,7 @@ ia_css_frame_map(struct ia_css_frame **frame,
                 uint16_t attribute,
                 void *context);
 
-/** @brief Unmap a CSS frame structure.
+/* @brief Unmap a CSS frame structure.
  *
  * @param[in]  frame   Pointer to the CSS frame.
  * @return     None
index 8a17c3346caa0eb097d54f80a52c1bf4d21c6ba1..f415570a3da98a3e4080cb46b8c11270ec3a762c 100644 (file)
 #ifndef __IA_CSS_INPUT_PORT_H
 #define __IA_CSS_INPUT_PORT_H
 
-/** @file
+/* @file
  * This file contains information about the possible input ports for CSS
  */
 
-/** Enumeration of the physical input ports on the CSS hardware.
+/* Enumeration of the physical input ports on the CSS hardware.
  *  There are 3 MIPI CSI-2 ports.
  */
 enum ia_css_csi2_port {
@@ -28,39 +28,39 @@ enum ia_css_csi2_port {
        IA_CSS_CSI2_PORT2  /* Implicitly map to MIPI_PORT2_ID */
 };
 
-/** Backward compatible for CSS API 2.0 only
+/* Backward compatible for CSS API 2.0 only
  *  TO BE REMOVED when all drivers move to CSS API 2.1
  */
 #define        IA_CSS_CSI2_PORT_4LANE IA_CSS_CSI2_PORT0
 #define        IA_CSS_CSI2_PORT_1LANE IA_CSS_CSI2_PORT1
 #define        IA_CSS_CSI2_PORT_2LANE IA_CSS_CSI2_PORT2
 
-/** The CSI2 interface supports 2 types of compression or can
+/* The CSI2 interface supports 2 types of compression or can
  *  be run without compression.
  */
 enum ia_css_csi2_compression_type {
-       IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /**< No compression */
-       IA_CSS_CSI2_COMPRESSION_TYPE_1,    /**< Compression scheme 1 */
-       IA_CSS_CSI2_COMPRESSION_TYPE_2     /**< Compression scheme 2 */
+       IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /** No compression */
+       IA_CSS_CSI2_COMPRESSION_TYPE_1,    /** Compression scheme 1 */
+       IA_CSS_CSI2_COMPRESSION_TYPE_2     /** Compression scheme 2 */
 };
 
 struct ia_css_csi2_compression {
        enum ia_css_csi2_compression_type type;
-       /**< Compression used */
+       /** Compression used */
        unsigned int                      compressed_bits_per_pixel;
-       /**< Compressed bits per pixel (only when compression is enabled) */
+       /** Compressed bits per pixel (only when compression is enabled) */
        unsigned int                      uncompressed_bits_per_pixel;
-       /**< Uncompressed bits per pixel (only when compression is enabled) */
+       /** Uncompressed bits per pixel (only when compression is enabled) */
 };
 
-/** Input port structure.
+/* Input port structure.
  */
 struct ia_css_input_port {
-       enum ia_css_csi2_port port; /**< Physical CSI-2 port */
-       unsigned int num_lanes; /**< Number of lanes used (4-lane port only) */
-       unsigned int timeout;   /**< Timeout value */
-       unsigned int rxcount;   /**< Register value, should include all lanes */
-       struct ia_css_csi2_compression compression; /**< Compression used */
+       enum ia_css_csi2_port port; /** Physical CSI-2 port */
+       unsigned int num_lanes; /** Number of lanes used (4-lane port only) */
+       unsigned int timeout;   /** Timeout value */
+       unsigned int rxcount;   /** Register value, should include all lanes */
+       struct ia_css_csi2_compression compression; /** Compression used */
 };
 
 #endif /* __IA_CSS_INPUT_PORT_H */
index 416ca4d28732ea7c3dadd598653200a8c40104d7..10ef61178bb2bae95a1e65d873c973f75da2b7b2 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_IRQ_H
 #define __IA_CSS_IRQ_H
 
-/** @file
+/* @file
  * This file contains information for Interrupts/IRQs from CSS
  */
 
 #include "ia_css_pipe_public.h"
 #include "ia_css_input_port.h"
 
-/** Interrupt types, these enumerate all supported interrupt types.
+/* Interrupt types, these enumerate all supported interrupt types.
  */
 enum ia_css_irq_type {
-       IA_CSS_IRQ_TYPE_EDGE,  /**< Edge (level) sensitive interrupt */
-       IA_CSS_IRQ_TYPE_PULSE  /**< Pulse-shaped interrupt */
+       IA_CSS_IRQ_TYPE_EDGE,  /** Edge (level) sensitive interrupt */
+       IA_CSS_IRQ_TYPE_PULSE  /** Pulse-shaped interrupt */
 };
 
-/** Interrupt request type.
+/* Interrupt request type.
  *  When the CSS hardware generates an interrupt, a function in this API
  *  needs to be called to retrieve information about the interrupt.
  *  This interrupt type is part of this information and indicates what
@@ -46,55 +46,55 @@ enum ia_css_irq_type {
  */
 enum ia_css_irq_info {
        IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR            = 1 << 0,
-       /**< the css receiver has encountered an error */
+       /** the css receiver has encountered an error */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW    = 1 << 1,
-       /**< the FIFO in the csi receiver has overflown */
+       /** the FIFO in the csi receiver has overflown */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF              = 1 << 2,
-       /**< the css receiver received the start of frame */
+       /** the css receiver received the start of frame */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF              = 1 << 3,
-       /**< the css receiver received the end of frame */
+       /** the css receiver received the end of frame */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL              = 1 << 4,
-       /**< the css receiver received the start of line */
+       /** the css receiver received the start of line */
        IA_CSS_IRQ_INFO_PSYS_EVENTS_READY             = 1 << 5,
-       /**< One or more events are available in the PSYS event queue */
+       /** One or more events are available in the PSYS event queue */
        IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
-       /**< deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
+       /** deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
         * same functionality.} */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL              = 1 << 6,
-       /**< the css receiver received the end of line */
+       /** the css receiver received the end of line */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7,
-       /**< the css receiver received a change in side band signals */
+       /** the css receiver received a change in side band signals */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0      = 1 << 8,
-       /**< generic short packets (0) */
+       /** generic short packets (0) */
        IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1      = 1 << 9,
-       /**< generic short packets (1) */
+       /** generic short packets (1) */
        IA_CSS_IRQ_INFO_IF_PRIM_ERROR                 = 1 << 10,
-       /**< the primary input formatter (A) has encountered an error */
+       /** the primary input formatter (A) has encountered an error */
        IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR               = 1 << 11,
-       /**< the primary input formatter (B) has encountered an error */
+       /** the primary input formatter (B) has encountered an error */
        IA_CSS_IRQ_INFO_IF_SEC_ERROR                  = 1 << 12,
-       /**< the secondary input formatter has encountered an error */
+       /** the secondary input formatter has encountered an error */
        IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR           = 1 << 13,
-       /**< the stream-to-memory device has encountered an error */
+       /** the stream-to-memory device has encountered an error */
        IA_CSS_IRQ_INFO_SW_0                          = 1 << 14,
-       /**< software interrupt 0 */
+       /** software interrupt 0 */
        IA_CSS_IRQ_INFO_SW_1                          = 1 << 15,
-       /**< software interrupt 1 */
+       /** software interrupt 1 */
        IA_CSS_IRQ_INFO_SW_2                          = 1 << 16,
-       /**< software interrupt 2 */
+       /** software interrupt 2 */
        IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY   = 1 << 17,
-       /**< ISP binary statistics are ready */
+       /** ISP binary statistics are ready */
        IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR            = 1 << 18,
-       /**< the input system in in error */
+       /** the input system in in error */
        IA_CSS_IRQ_INFO_IF_ERROR                      = 1 << 19,
-       /**< the input formatter in in error */
+       /** the input formatter in in error */
        IA_CSS_IRQ_INFO_DMA_ERROR                     = 1 << 20,
-       /**< the dma in in error */
+       /** the dma in in error */
        IA_CSS_IRQ_INFO_ISYS_EVENTS_READY             = 1 << 21,
-       /**< end-of-frame events are ready in the isys_event queue */
+       /** end-of-frame events are ready in the isys_event queue */
 };
 
-/** CSS receiver error types. Whenever the CSS receiver has encountered
+/* CSS receiver error types. Whenever the CSS receiver has encountered
  *  an error, this enumeration is used to indicate which errors have occurred.
  *
  *  Note that multiple error flags can be enabled at once and that this is in
@@ -105,39 +105,39 @@ enum ia_css_irq_info {
  * different receiver types, or possibly none in case of tests systems.
  */
 enum ia_css_rx_irq_info {
-       IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN   = 1U << 0, /**< buffer overrun */
-       IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /**< entering sleep mode */
-       IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE  = 1U << 2, /**< exited sleep mode */
-       IA_CSS_RX_IRQ_INFO_ECC_CORRECTED    = 1U << 3, /**< ECC corrected */
+       IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN   = 1U << 0, /** buffer overrun */
+       IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /** entering sleep mode */
+       IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE  = 1U << 2, /** exited sleep mode */
+       IA_CSS_RX_IRQ_INFO_ECC_CORRECTED    = 1U << 3, /** ECC corrected */
        IA_CSS_RX_IRQ_INFO_ERR_SOT          = 1U << 4,
-                                               /**< Start of transmission */
-       IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC     = 1U << 5, /**< SOT sync (??) */
-       IA_CSS_RX_IRQ_INFO_ERR_CONTROL      = 1U << 6, /**< Control (??) */
-       IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE   = 1U << 7, /**< Double ECC */
-       IA_CSS_RX_IRQ_INFO_ERR_CRC          = 1U << 8, /**< CRC error */
-       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID   = 1U << 9, /**< Unknown ID */
-       IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC   = 1U << 10,/**< Frame sync error */
-       IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA   = 1U << 11,/**< Frame data error */
-       IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/**< Timeout occurred */
-       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC  = 1U << 13,/**< Unknown escape seq. */
-       IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC    = 1U << 14,/**< Line Sync error */
+                                               /** Start of transmission */
+       IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC     = 1U << 5, /** SOT sync (??) */
+       IA_CSS_RX_IRQ_INFO_ERR_CONTROL      = 1U << 6, /** Control (??) */
+       IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE   = 1U << 7, /** Double ECC */
+       IA_CSS_RX_IRQ_INFO_ERR_CRC          = 1U << 8, /** CRC error */
+       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID   = 1U << 9, /** Unknown ID */
+       IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC   = 1U << 10,/** Frame sync error */
+       IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA   = 1U << 11,/** Frame data error */
+       IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/** Timeout occurred */
+       IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC  = 1U << 13,/** Unknown escape seq. */
+       IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC    = 1U << 14,/** Line Sync error */
        IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT     = 1U << 15,
 };
 
-/** Interrupt info structure. This structure contains information about an
+/* Interrupt info structure. This structure contains information about an
  *  interrupt. This needs to be used after an interrupt is received on the IA
  *  to perform the correct action.
  */
 struct ia_css_irq {
-       enum ia_css_irq_info type; /**< Interrupt type. */
-       unsigned int sw_irq_0_val; /**< In case of SW interrupt 0, value. */
-       unsigned int sw_irq_1_val; /**< In case of SW interrupt 1, value. */
-       unsigned int sw_irq_2_val; /**< In case of SW interrupt 2, value. */
+       enum ia_css_irq_info type; /** Interrupt type. */
+       unsigned int sw_irq_0_val; /** In case of SW interrupt 0, value. */
+       unsigned int sw_irq_1_val; /** In case of SW interrupt 1, value. */
+       unsigned int sw_irq_2_val; /** In case of SW interrupt 2, value. */
        struct ia_css_pipe *pipe;
-       /**< The image pipe that generated the interrupt. */
+       /** The image pipe that generated the interrupt. */
 };
 
-/** @brief Obtain interrupt information.
+/* @brief Obtain interrupt information.
  *
  * @param[out] info    Pointer to the interrupt info. The interrupt
  *                     information wil be written to this info.
@@ -154,7 +154,7 @@ struct ia_css_irq {
 enum ia_css_err
 ia_css_irq_translate(unsigned int *info);
 
-/** @brief Get CSI receiver error info.
+/* @brief Get CSI receiver error info.
  *
  * @param[out] irq_bits        Pointer to the interrupt bits. The interrupt
  *                     bits will be written this info.
@@ -172,7 +172,7 @@ ia_css_irq_translate(unsigned int *info);
 void
 ia_css_rx_get_irq_info(unsigned int *irq_bits);
 
-/** @brief Get CSI receiver error info.
+/* @brief Get CSI receiver error info.
  *
  * @param[in]  port     Input port identifier.
  * @param[out] irq_bits        Pointer to the interrupt bits. The interrupt
@@ -188,7 +188,7 @@ ia_css_rx_get_irq_info(unsigned int *irq_bits);
 void
 ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
 
-/** @brief Clear CSI receiver error info.
+/* @brief Clear CSI receiver error info.
  *
  * @param[in] irq_bits The bits that should be cleared from the CSI receiver
  *                     interrupt bits register.
@@ -205,7 +205,7 @@ ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
 void
 ia_css_rx_clear_irq_info(unsigned int irq_bits);
 
-/** @brief Clear CSI receiver error info.
+/* @brief Clear CSI receiver error info.
  *
  * @param[in] port      Input port identifier.
  * @param[in] irq_bits The bits that should be cleared from the CSI receiver
@@ -220,7 +220,7 @@ ia_css_rx_clear_irq_info(unsigned int irq_bits);
 void
 ia_css_rx_port_clear_irq_info(enum ia_css_csi2_port port, unsigned int irq_bits);
 
-/** @brief Enable or disable specific interrupts.
+/* @brief Enable or disable specific interrupts.
  *
  * @param[in] type     The interrupt type that will be enabled/disabled.
  * @param[in] enable   enable or disable.
index c40c5a19bfe190a5580f10872489ccf2d8191485..8b674c98224c2b73111c664c071fd1f442557100 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_METADATA_H
 #define __IA_CSS_METADATA_H
 
-/** @file
+/* @file
  * This file contains structure for processing sensor metadata.
  */
 
 #include "ia_css_types.h"
 #include "ia_css_stream_format.h"
 
-/** Metadata configuration. This data structure contains necessary info
+/* Metadata configuration. This data structure contains necessary info
  *  to process sensor metadata.
  */
 struct ia_css_metadata_config {
-       enum ia_css_stream_format data_type; /**< Data type of CSI-2 embedded
+       enum ia_css_stream_format data_type; /** Data type of CSI-2 embedded
                        data. The default value is IA_CSS_STREAM_FORMAT_EMBEDDED. For
                        certain sensors, user can choose non-default data type for embedded
                        data. */
-       struct ia_css_resolution  resolution; /**< Resolution */
+       struct ia_css_resolution  resolution; /** Resolution */
 };
 
 struct ia_css_metadata_info {
-       struct ia_css_resolution resolution; /**< Resolution */
-       uint32_t                 stride;     /**< Stride in bytes */
-       uint32_t                 size;       /**< Total size in bytes */
+       struct ia_css_resolution resolution; /** Resolution */
+       uint32_t                 stride;     /** Stride in bytes */
+       uint32_t                 size;       /** Total size in bytes */
 };
 
 struct ia_css_metadata {
-       struct ia_css_metadata_info info;    /**< Layout info */
-       ia_css_ptr                  address; /**< CSS virtual address */
+       struct ia_css_metadata_info info;    /** Layout info */
+       ia_css_ptr                  address; /** CSS virtual address */
        uint32_t                    exp_id;
-       /**< Exposure ID, see ia_css_event_public.h for more detail */
+       /** Exposure ID, see ia_css_event_public.h for more detail */
 };
 #define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata)
 
-/** @brief Allocate a metadata buffer.
+/* @brief Allocate a metadata buffer.
  * @param[in]   metadata_info Metadata info struct, contains details on metadata buffers.
  * @return      Pointer of metadata buffer or NULL (if error)
  *
@@ -58,7 +58,7 @@ struct ia_css_metadata {
 struct ia_css_metadata *
 ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info);
 
-/** @brief Free a metadata buffer.
+/* @brief Free a metadata buffer.
  *
  * @param[in]  metadata        Pointer of metadata buffer.
  * @return     None
index fd2c01b60b28127b55928a664d24a2ebe53b53ca..f9c9cd76be97c7c52318701bbf413b8a9ff7c2f9 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_MIPI_H
 #define __IA_CSS_MIPI_H
 
-/** @file
+/* @file
  * This file contains MIPI support functionality
  */
 
 #include "ia_css_stream_format.h"
 #include "ia_css_input_port.h"
 
-/** Backward compatible for CSS API 2.0 only
+/* Backward compatible for CSS API 2.0 only
  * TO BE REMOVED when all drivers move to CSS API 2.1.
  */
-/** @brief Specify a CSS MIPI frame buffer.
+/* @brief Specify a CSS MIPI frame buffer.
  *
  * @param[in]  size_mem_words  The frame size in memory words (32B).
  * @param[in]  contiguous      Allocate memory physically contiguously or not.
@@ -42,7 +42,7 @@ ia_css_mipi_frame_specify(const unsigned int  size_mem_words,
                                const bool contiguous);
 
 #if !defined(HAS_NO_INPUT_SYSTEM)
-/** @brief Register size of a CSS MIPI frame for check during capturing.
+/* @brief Register size of a CSS MIPI frame for check during capturing.
  *
  * @param[in]  port    CSI-2 port this check is registered.
  * @param[in]  size_mem_words  The frame size in memory words (32B).
@@ -59,7 +59,7 @@ ia_css_mipi_frame_enable_check_on_size(const enum ia_css_csi2_port port,
                                const unsigned int      size_mem_words);
 #endif
 
-/** @brief Calculate the size of a mipi frame.
+/* @brief Calculate the size of a mipi frame.
  *
  * @param[in]  width           The width (in pixels) of the frame.
  * @param[in]  height          The height (in lines) of the frame.
index 48f8855d61f6eb99422cb0aa35919c8562a01ebe..13c21056bfbf77fca1e71a8ceb37f41a79a6027c 100644 (file)
 #ifndef __IA_CSS_MMU_H
 #define __IA_CSS_MMU_H
 
-/** @file
+/* @file
  * This file contains one support function for invalidating the CSS MMU cache
  */
 
-/** @brief Invalidate the MMU internal cache.
+/* @brief Invalidate the MMU internal cache.
  * @return     None
  *
  * This function triggers an invalidation of the translate-look-aside
index 969840da52b2199af47b73b5fa8d8ac10c003a0a..de409638d009bc502f1ca522442dbcb48dcd1c0d 100644 (file)
 #ifndef __IA_CSS_MORPH_H
 #define __IA_CSS_MORPH_H
 
-/** @file
+/* @file
  * This file contains supporting for morphing table
  */
 
 #include <ia_css_types.h>
 
-/** @brief Morphing table
+/* @brief Morphing table
  * @param[in]  width Width of the morphing table.
  * @param[in]  height Height of the morphing table.
  * @return             Pointer to the morphing table
@@ -29,7 +29,7 @@
 struct ia_css_morph_table *
 ia_css_morph_table_allocate(unsigned int width, unsigned int height);
 
-/** @brief Free the morph table
+/* @brief Free the morph table
  * @param[in]  me Pointer to the morph table.
  * @return             None
 */
index 733e0ef3afe8bdc0c430ec9a33bf73afd992a8ab..df0aad9a6ab91a1656360bddbcca0ce0d0bfd17c 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_PIPE_PUBLIC_H
 #define __IA_CSS_PIPE_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains the public interface for CSS pipes.
  */
 
@@ -34,7 +34,7 @@ enum {
        IA_CSS_PIPE_MAX_OUTPUT_STAGE,
 };
 
-/** Enumeration of pipe modes. This mode can be used to create
+/* Enumeration of pipe modes. This mode can be used to create
  *  an image pipe for this mode. These pipes can be combined
  *  to configure and run streams on the ISP.
  *
@@ -42,12 +42,12 @@ enum {
  *  create a continuous capture stream.
  */
 enum ia_css_pipe_mode {
-       IA_CSS_PIPE_MODE_PREVIEW,       /**< Preview pipe */
-       IA_CSS_PIPE_MODE_VIDEO,         /**< Video pipe */
-       IA_CSS_PIPE_MODE_CAPTURE,       /**< Still capture pipe */
-       IA_CSS_PIPE_MODE_ACC,           /**< Accelerated pipe */
-       IA_CSS_PIPE_MODE_COPY,          /**< Copy pipe, only used for embedded/image data copying */
-       IA_CSS_PIPE_MODE_YUVPP,         /**< YUV post processing pipe, used for all use cases with YUV input,
+       IA_CSS_PIPE_MODE_PREVIEW,       /** Preview pipe */
+       IA_CSS_PIPE_MODE_VIDEO,         /** Video pipe */
+       IA_CSS_PIPE_MODE_CAPTURE,       /** Still capture pipe */
+       IA_CSS_PIPE_MODE_ACC,           /** Accelerated pipe */
+       IA_CSS_PIPE_MODE_COPY,          /** Copy pipe, only used for embedded/image data copying */
+       IA_CSS_PIPE_MODE_YUVPP,         /** YUV post processing pipe, used for all use cases with YUV input,
                                                                        for SoC sensor and external ISP */
 };
 /* Temporary define  */
@@ -58,10 +58,10 @@ enum ia_css_pipe_mode {
  * the order should match with definition in sh_css_defs.h
  */
 enum ia_css_pipe_version {
-       IA_CSS_PIPE_VERSION_1 = 1,              /**< ISP1.0 pipe */
-       IA_CSS_PIPE_VERSION_2_2 = 2,            /**< ISP2.2 pipe */
-       IA_CSS_PIPE_VERSION_2_6_1 = 3,          /**< ISP2.6.1 pipe */
-       IA_CSS_PIPE_VERSION_2_7 = 4             /**< ISP2.7 pipe */
+       IA_CSS_PIPE_VERSION_1 = 1,              /** ISP1.0 pipe */
+       IA_CSS_PIPE_VERSION_2_2 = 2,            /** ISP2.2 pipe */
+       IA_CSS_PIPE_VERSION_2_6_1 = 3,          /** ISP2.6.1 pipe */
+       IA_CSS_PIPE_VERSION_2_7 = 4             /** ISP2.7 pipe */
 };
 
 /**
@@ -71,79 +71,79 @@ enum ia_css_pipe_version {
  */
 struct ia_css_pipe_config {
        enum ia_css_pipe_mode mode;
-       /**< mode, indicates which mode the pipe should use. */
+       /** mode, indicates which mode the pipe should use. */
        enum ia_css_pipe_version isp_pipe_version;
-       /**< pipe version, indicates which imaging pipeline the pipe should use. */
+       /** pipe version, indicates which imaging pipeline the pipe should use. */
        struct ia_css_resolution input_effective_res;
-       /**< input effective resolution */
+       /** input effective resolution */
        struct ia_css_resolution bayer_ds_out_res;
-       /**< bayer down scaling */
+       /** bayer down scaling */
        struct ia_css_resolution capt_pp_in_res;
 #ifndef ISP2401
-       /**< bayer down scaling */
+       /** bayer down scaling */
 #else
-       /**< capture post processing input resolution */
+       /** capture post processing input resolution */
 #endif
        struct ia_css_resolution vf_pp_in_res;
 #ifndef ISP2401
-       /**< bayer down scaling */
+       /** bayer down scaling */
 #else
-       /**< view finder post processing input resolution */
+       /** view finder post processing input resolution */
        struct ia_css_resolution output_system_in_res;
-       /**< For IPU3 only: use output_system_in_res to specify what input resolution
+       /** For IPU3 only: use output_system_in_res to specify what input resolution
             will OSYS receive, this resolution is equal to the output resolution of GDC
             if not determined CSS will set output_system_in_res with main osys output pin resolution
             All other IPUs may ignore this property */
 #endif
        struct ia_css_resolution dvs_crop_out_res;
-       /**< dvs crop, video only, not in use yet. Use dvs_envelope below. */
+       /** dvs crop, video only, not in use yet. Use dvs_envelope below. */
        struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< output of YUV scaling */
+       /** output of YUV scaling */
        struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< output of VF YUV scaling */
+       /** output of VF YUV scaling */
        struct ia_css_fw_info *acc_extension;
-       /**< Pipeline extension accelerator */
+       /** Pipeline extension accelerator */
        struct ia_css_fw_info **acc_stages;
-       /**< Standalone accelerator stages */
+       /** Standalone accelerator stages */
        uint32_t num_acc_stages;
-       /**< Number of standalone accelerator stages */
+       /** Number of standalone accelerator stages */
        struct ia_css_capture_config default_capture_config;
-       /**< Default capture config for initial capture pipe configuration. */
-       struct ia_css_resolution dvs_envelope; /**< temporary */
+       /** Default capture config for initial capture pipe configuration. */
+       struct ia_css_resolution dvs_envelope; /** temporary */
        enum ia_css_frame_delay dvs_frame_delay;
-       /**< indicates the DVS loop delay in frame periods */
+       /** indicates the DVS loop delay in frame periods */
        int acc_num_execs;
-       /**< For acceleration pipes only: determine how many times the pipe
+       /** For acceleration pipes only: determine how many times the pipe
             should be run. Setting this to -1 means it will run until
             stopped. */
        bool enable_dz;
-       /**< Disabling digital zoom for a pipeline, if this is set to false,
+       /** Disabling digital zoom for a pipeline, if this is set to false,
             then setting a zoom factor will have no effect.
             In some use cases this provides better performance. */
        bool enable_dpc;
-       /**< Disabling "Defect Pixel Correction" for a pipeline, if this is set
+       /** Disabling "Defect Pixel Correction" for a pipeline, if this is set
             to false. In some use cases this provides better performance. */
        bool enable_vfpp_bci;
-       /**< Enabling BCI mode will cause yuv_scale binary to be picked up
+       /** Enabling BCI mode will cause yuv_scale binary to be picked up
             instead of vf_pp. This only applies to viewfinder post
             processing stages. */
 #ifdef ISP2401
        bool enable_luma_only;
-       /**< Enabling of monochrome mode for a pipeline. If enabled only luma processing
+       /** Enabling of monochrome mode for a pipeline. If enabled only luma processing
             will be done. */
        bool enable_tnr;
-       /**< Enabling of TNR (temporal noise reduction). This is only applicable to video
+       /** Enabling of TNR (temporal noise reduction). This is only applicable to video
             pipes. Non video-pipes should always set this parameter to false. */
 #endif
        struct ia_css_isp_config *p_isp_config;
-       /**< Pointer to ISP configuration */
+       /** Pointer to ISP configuration */
        struct ia_css_resolution gdc_in_buffer_res;
-       /**< GDC in buffer resolution. */
+       /** GDC in buffer resolution. */
        struct ia_css_point gdc_in_buffer_offset;
-       /**< GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
+       /** GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
 #ifdef ISP2401
        struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl;
-       /**< Origin of internal frame positioned on shading table at shading correction in ISP.
+       /** Origin of internal frame positioned on shading table at shading correction in ISP.
             NOTE: Shading table is larger than or equal to internal frame.
                   Shading table has shading gains and internal frame has bayer data.
                   The origin of internal frame is used in shading correction in ISP
@@ -228,20 +228,20 @@ struct ia_css_pipe_config {
 
 #endif
 
-/** Pipe info, this struct describes properties of a pipe after it's stream has
+/* Pipe info, this struct describes properties of a pipe after it's stream has
  * been created.
  * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated.
  *           - On the Behalf of CSS-API Committee.
  */
 struct ia_css_pipe_info {
        struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< Info about output resolution. This contains the stride which
+       /** Info about output resolution. This contains the stride which
             should be used for memory allocation. */
        struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
-       /**< Info about viewfinder output resolution (optional). This contains
+       /** Info about viewfinder output resolution (optional). This contains
             the stride that should be used for memory allocation. */
        struct ia_css_frame_info raw_output_info;
-       /**< Raw output resolution. This indicates the resolution of the
+       /** Raw output resolution. This indicates the resolution of the
             RAW bayer output for pipes that support this. Currently, only the
             still capture pipes support this feature. When this resolution is
             smaller than the input resolution, cropping will be performed by
@@ -252,17 +252,17 @@ struct ia_css_pipe_info {
             the input resolution - 8x8. */
 #ifdef ISP2401
        struct ia_css_resolution output_system_in_res_info;
-       /**< For IPU3 only. Info about output system in resolution which is considered
+       /** For IPU3 only. Info about output system in resolution which is considered
             as gdc out resolution. */
 #endif
        struct ia_css_shading_info shading_info;
-       /**< After an image pipe is created, this field will contain the info
+       /** After an image pipe is created, this field will contain the info
             for the shading correction. */
        struct ia_css_grid_info  grid_info;
-       /**< After an image pipe is created, this field will contain the grid
+       /** After an image pipe is created, this field will contain the grid
             info for 3A and DVS. */
        int num_invalid_frames;
-       /**< The very first frames in a started stream do not contain valid data.
+       /** The very first frames in a started stream do not contain valid data.
             In this field, the CSS-firmware communicates to the host-driver how
             many initial frames will contain invalid data; this allows the
             host-driver to discard those initial invalid frames and start it's
@@ -299,7 +299,7 @@ struct ia_css_pipe_info {
 
 #endif
 
-/** @brief Load default pipe configuration
+/* @brief Load default pipe configuration
  * @param[out] pipe_config The pipe configuration.
  * @return     None
  *
@@ -334,7 +334,7 @@ struct ia_css_pipe_info {
  */
 void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config);
 
-/** @brief Create a pipe
+/* @brief Create a pipe
  * @param[in]  config The pipe configuration.
  * @param[out] pipe The pipe.
  * @return     IA_CSS_SUCCESS or the error code.
@@ -346,7 +346,7 @@ enum ia_css_err
 ia_css_pipe_create(const struct ia_css_pipe_config *config,
                   struct ia_css_pipe **pipe);
 
-/** @brief Destroy a pipe
+/* @brief Destroy a pipe
  * @param[in]  pipe The pipe.
  * @return     IA_CSS_SUCCESS or the error code.
  *
@@ -355,7 +355,7 @@ ia_css_pipe_create(const struct ia_css_pipe_config *config,
 enum ia_css_err
 ia_css_pipe_destroy(struct ia_css_pipe *pipe);
 
-/** @brief Provides information about a pipe
+/* @brief Provides information about a pipe
  * @param[in]  pipe The pipe.
  * @param[out] pipe_info The pipe information.
  * @return     IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS.
@@ -366,7 +366,7 @@ enum ia_css_err
 ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
                     struct ia_css_pipe_info *pipe_info);
 
-/** @brief Configure a pipe with filter coefficients.
+/* @brief Configure a pipe with filter coefficients.
  * @param[in]  pipe    The pipe.
  * @param[in]  config  The pointer to ISP configuration.
  * @return             IA_CSS_SUCCESS or error code upon error.
@@ -378,7 +378,7 @@ enum ia_css_err
 ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
                                                   struct ia_css_isp_config *config);
 
-/** @brief Controls when the Event generator raises an IRQ to the Host.
+/* @brief Controls when the Event generator raises an IRQ to the Host.
  *
  * @param[in]  pipe    The pipe.
  * @param[in]  or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe
@@ -455,7 +455,7 @@ ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
                         unsigned int or_mask,
                         unsigned int and_mask);
 
-/** @brief Reads the current event IRQ mask from the CSS.
+/* @brief Reads the current event IRQ mask from the CSS.
  *
  * @param[in]  pipe The pipe.
  * @param[out] or_mask Current or_mask. The bits in this mask are a binary or
@@ -476,7 +476,7 @@ ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
                          unsigned int *or_mask,
                          unsigned int *and_mask);
 
-/** @brief Queue a buffer for an image pipe.
+/* @brief Queue a buffer for an image pipe.
  *
  * @param[in] pipe     The pipe that will own the buffer.
  * @param[in] buffer   Pointer to the buffer.
@@ -498,7 +498,7 @@ enum ia_css_err
 ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
                           const struct ia_css_buffer *buffer);
 
-/** @brief Dequeue a buffer from an image pipe.
+/* @brief Dequeue a buffer from an image pipe.
  *
  * @param[in]    pipe   The pipeline that the buffer queue belongs to.
  * @param[in,out] buffer The buffer is used to lookup the type which determines
@@ -519,7 +519,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
                           struct ia_css_buffer *buffer);
 
 
-/** @brief  Set the state (Enable or Disable) of the Extension stage in the
+/* @brief  Set the state (Enable or Disable) of the Extension stage in the
  *          given pipe.
  * @param[in] pipe         Pipe handle.
  * @param[in] fw_handle    Extension firmware Handle (ia_css_fw_info.handle)
@@ -546,7 +546,7 @@ ia_css_pipe_set_qos_ext_state (struct ia_css_pipe *pipe,
                            uint32_t fw_handle,
                            bool  enable);
 
-/** @brief  Get the state (Enable or Disable) of the Extension stage in the
+/* @brief  Get the state (Enable or Disable) of the Extension stage in the
  *          given pipe.
  * @param[in]  pipe        Pipe handle.
  * @param[in]  fw_handle   Extension firmware Handle (ia_css_fw_info.handle)
@@ -573,7 +573,7 @@ ia_css_pipe_get_qos_ext_state (struct ia_css_pipe *pipe,
                            bool * enable);
 
 #ifdef ISP2401
-/** @brief  Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
+/* @brief  Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
  * @param[in] pipe             Pipe handle.
  * @param[in] fw_handle        Extension firmware Handle (ia_css_fw_info.handle).
  * @param[in] css_seg          Parameter memory descriptors for CSS segments.
@@ -595,7 +595,7 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_hand
                        struct ia_css_isp_param_isp_segments *isp_seg);
 
 #endif
-/** @brief Get selected configuration settings
+/* @brief Get selected configuration settings
  * @param[in]  pipe    The pipe.
  * @param[out] config  Configuration settings.
  * @return             None
@@ -604,7 +604,7 @@ void
 ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
                             struct ia_css_isp_config *config);
 
-/** @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
+/* @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
  *         address space. So the LUT can be freed by caller.
  * @param[in]  pipe        Pipe handle.
  * @param[in]  lut         Look up tabel
@@ -623,7 +623,7 @@ ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
 enum ia_css_err
 ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
                                const void *lut);
-/** @brief Checking of DVS statistics ability
+/* @brief Checking of DVS statistics ability
  * @param[in]  pipe_info       The pipe info.
  * @return             true - has DVS statistics ability
  *                     false - otherwise
@@ -631,7 +631,7 @@ ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
 bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info);
 
 #ifdef ISP2401
-/** @brief Override the frameformat set on the output pins.
+/* @brief Override the frameformat set on the output pins.
  * @param[in]  pipe        Pipe handle.
  * @param[in]  output_pin  Pin index to set the format on
  *                         0 - main output pin
index 9b0eeb08ca0493bdb48e39030a041dc463506a34..6f24656b6cb46c0d675cd68f72743723455a5ad4 100644 (file)
 #ifndef __IA_CSS_PRBS_H
 #define __IA_CSS_PRBS_H
 
-/** @file
+/* @file
  * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs
  */
 
-/** Enumerate the PRBS IDs.
+/* Enumerate the PRBS IDs.
  */
 enum ia_css_prbs_id {
        IA_CSS_PRBS_ID0,
@@ -44,10 +44,10 @@ enum ia_css_prbs_id {
  */
 struct ia_css_prbs_config {
        enum ia_css_prbs_id     id;
-       unsigned int            h_blank;        /**< horizontal blank */
-       unsigned int            v_blank;        /**< vertical blank */
-       int                     seed;   /**< random seed for the 1st 2-pixel-components/clock */
-       int                     seed1;  /**< random seed for the 2nd 2-pixel-components/clock */
+       unsigned int            h_blank;        /** horizontal blank */
+       unsigned int            v_blank;        /** vertical blank */
+       int                     seed;   /** random seed for the 1st 2-pixel-components/clock */
+       int                     seed1;  /** random seed for the 2nd 2-pixel-components/clock */
 };
 
 #endif /* __IA_CSS_PRBS_H */
index 19af4021b24c511091d98a7e591714e2c72501b5..9a167306611c9a95602ad30f2bf82bb2d92f78d6 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_PROPERTIES_H
 #define __IA_CSS_PROPERTIES_H
 
-/** @file
+/* @file
  * This file contains support for retrieving properties of some hardware the CSS system
  */
 
 
 struct ia_css_properties {
        int  gdc_coord_one;
-       bool l1_base_is_index; /**< Indicate whether the L1 page base
+       bool l1_base_is_index; /** Indicate whether the L1 page base
                                    is a page index or a byte address. */
        enum ia_css_vamem_type vamem_type;
 };
 
-/** @brief Get hardware properties
+/* @brief Get hardware properties
  * @param[in,out]      properties The hardware properties
  * @return     None
  *
index cb0f249e98c8b8dde2649e9297d433a49e058fd4..588f53d32b723688a4bbe5697839369eeac4c98d 100644 (file)
 #ifndef __IA_CSS_SHADING_H
 #define __IA_CSS_SHADING_H
 
-/** @file
+/* @file
  * This file contains support for setting the shading table for CSS
  */
 
 #include <ia_css_types.h>
 
-/** @brief Shading table
+/* @brief Shading table
  * @param[in]  width Width of the shading table.
  * @param[in]  height Height of the shading table.
  * @return             Pointer to the shading table
@@ -30,7 +30,7 @@ struct ia_css_shading_table *
 ia_css_shading_table_alloc(unsigned int width,
                           unsigned int height);
 
-/** @brief Free shading table
+/* @brief Free shading table
  * @param[in]  table Pointer to the shading table.
  * @return             None
 */
index 453fe4db0133ad1568b1ce6410902c580de69398..fb6e8c2ca8bfb011023a2c3488f66742c854719c 100644 (file)
@@ -48,7 +48,7 @@ struct ia_css_stream {
        bool                           started;
 };
 
-/** @brief Get a binary in the stream, which binary has the shading correction.
+/* @brief Get a binary in the stream, which binary has the shading correction.
  *
  * @param[in] stream: The stream.
  * @return     The binary which has the shading correction.
@@ -76,7 +76,7 @@ sh_css_invalidate_params(struct ia_css_stream *stream);
 const struct ia_css_fpn_table *
 ia_css_get_fpn_table(struct ia_css_stream *stream);
 
-/** @brief Get a pointer to the shading table.
+/* @brief Get a pointer to the shading table.
  *
  * @param[in] stream: The stream.
  * @return     The pointer to the shading table.
index ae608a9c9051f14759f3ee0e710559091238c9ce..f7e9020a86e14d979015ba46c87554d7e7efe30b 100644 (file)
 #ifndef __IA_CSS_STREAM_FORMAT_H
 #define __IA_CSS_STREAM_FORMAT_H
 
-/** @file
+/* @file
  * This file contains formats usable for ISP streaming input
  */
 
 #include <type_support.h> /* bool */
 
-/** The ISP streaming input interface supports the following formats.
+/* The ISP streaming input interface supports the following formats.
  *  These match the corresponding MIPI formats.
  */
 enum ia_css_stream_format {
-       IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY,    /**< 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV420_8,  /**< 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV420_10, /**< 10 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV420_16, /**< 16 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV422_8,  /**< UYVY..UYVY, 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV422_10, /**< UYVY..UYVY, 10 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_YUV422_16, /**< UYVY..UYVY, 16 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_444,  /**< BGR..BGR, 4 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_555,  /**< BGR..BGR, 5 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_565,  /**< BGR..BGR, 5 bits B and R, 6 bits G */
-       IA_CSS_STREAM_FORMAT_RGB_666,  /**< BGR..BGR, 6 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RGB_888,  /**< BGR..BGR, 8 bits per subpixel */
-       IA_CSS_STREAM_FORMAT_RAW_6,    /**< RAW data, 6 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_7,    /**< RAW data, 7 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_8,    /**< RAW data, 8 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_10,   /**< RAW data, 10 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_12,   /**< RAW data, 12 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_14,   /**< RAW data, 14 bits per pixel */
-       IA_CSS_STREAM_FORMAT_RAW_16,   /**< RAW data, 16 bits per pixel, which is
+       IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY,    /** 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV420_8,  /** 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV420_10, /** 10 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV420_16, /** 16 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV422_8,  /** UYVY..UYVY, 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV422_10, /** UYVY..UYVY, 10 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_YUV422_16, /** UYVY..UYVY, 16 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_444,  /** BGR..BGR, 4 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_555,  /** BGR..BGR, 5 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_565,  /** BGR..BGR, 5 bits B and R, 6 bits G */
+       IA_CSS_STREAM_FORMAT_RGB_666,  /** BGR..BGR, 6 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RGB_888,  /** BGR..BGR, 8 bits per subpixel */
+       IA_CSS_STREAM_FORMAT_RAW_6,    /** RAW data, 6 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_7,    /** RAW data, 7 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_8,    /** RAW data, 8 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_10,   /** RAW data, 10 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_12,   /** RAW data, 12 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_14,   /** RAW data, 14 bits per pixel */
+       IA_CSS_STREAM_FORMAT_RAW_16,   /** RAW data, 16 bits per pixel, which is
                                            not specified in CSI-MIPI standard*/
-       IA_CSS_STREAM_FORMAT_BINARY_8, /**< Binary byte stream, which is target at
+       IA_CSS_STREAM_FORMAT_BINARY_8, /** Binary byte stream, which is target at
                                            JPEG. */
 
-       /** CSI2-MIPI specific format: Generic short packet data. It is used to
+       /* CSI2-MIPI specific format: Generic short packet data. It is used to
         *  keep the timing information for the opening/closing of shutters,
         *  triggering of flashes and etc.
         */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT1,  /**< Generic Short Packet Code 1 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT2,  /**< Generic Short Packet Code 2 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT3,  /**< Generic Short Packet Code 3 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT4,  /**< Generic Short Packet Code 4 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT5,  /**< Generic Short Packet Code 5 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT6,  /**< Generic Short Packet Code 6 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT7,  /**< Generic Short Packet Code 7 */
-       IA_CSS_STREAM_FORMAT_GENERIC_SHORT8,  /**< Generic Short Packet Code 8 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT1,  /** Generic Short Packet Code 1 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT2,  /** Generic Short Packet Code 2 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT3,  /** Generic Short Packet Code 3 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT4,  /** Generic Short Packet Code 4 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT5,  /** Generic Short Packet Code 5 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT6,  /** Generic Short Packet Code 6 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT7,  /** Generic Short Packet Code 7 */
+       IA_CSS_STREAM_FORMAT_GENERIC_SHORT8,  /** Generic Short Packet Code 8 */
 
-       /** CSI2-MIPI specific format: YUV data.
+       /* CSI2-MIPI specific format: YUV data.
         */
-       IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT,  /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
-       IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+       IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT,  /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+       IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
 
-       /** CSI2-MIPI specific format: Generic long packet data
+       /* CSI2-MIPI specific format: Generic long packet data
         */
-       IA_CSS_STREAM_FORMAT_EMBEDDED, /**< Embedded 8-bit non Image Data */
+       IA_CSS_STREAM_FORMAT_EMBEDDED, /** Embedded 8-bit non Image Data */
 
-       /** CSI2-MIPI specific format: User defined byte-based data. For example,
+       /* CSI2-MIPI specific format: User defined byte-based data. For example,
         *  the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
         *  the User Defined Data Type 4 and the MPEG data as the
         *  User Defined Data Type 7.
         */
-       IA_CSS_STREAM_FORMAT_USER_DEF1,  /**< User defined 8-bit data type 1 */
-       IA_CSS_STREAM_FORMAT_USER_DEF2,  /**< User defined 8-bit data type 2 */
-       IA_CSS_STREAM_FORMAT_USER_DEF3,  /**< User defined 8-bit data type 3 */
-       IA_CSS_STREAM_FORMAT_USER_DEF4,  /**< User defined 8-bit data type 4 */
-       IA_CSS_STREAM_FORMAT_USER_DEF5,  /**< User defined 8-bit data type 5 */
-       IA_CSS_STREAM_FORMAT_USER_DEF6,  /**< User defined 8-bit data type 6 */
-       IA_CSS_STREAM_FORMAT_USER_DEF7,  /**< User defined 8-bit data type 7 */
-       IA_CSS_STREAM_FORMAT_USER_DEF8,  /**< User defined 8-bit data type 8 */
+       IA_CSS_STREAM_FORMAT_USER_DEF1,  /** User defined 8-bit data type 1 */
+       IA_CSS_STREAM_FORMAT_USER_DEF2,  /** User defined 8-bit data type 2 */
+       IA_CSS_STREAM_FORMAT_USER_DEF3,  /** User defined 8-bit data type 3 */
+       IA_CSS_STREAM_FORMAT_USER_DEF4,  /** User defined 8-bit data type 4 */
+       IA_CSS_STREAM_FORMAT_USER_DEF5,  /** User defined 8-bit data type 5 */
+       IA_CSS_STREAM_FORMAT_USER_DEF6,  /** User defined 8-bit data type 6 */
+       IA_CSS_STREAM_FORMAT_USER_DEF7,  /** User defined 8-bit data type 7 */
+       IA_CSS_STREAM_FORMAT_USER_DEF8,  /** User defined 8-bit data type 8 */
 };
 
 #define        IA_CSS_STREAM_FORMAT_NUM        IA_CSS_STREAM_FORMAT_USER_DEF8
index 2c8d9de10a59fdd337c463729c11fc9c6e3e894f..ca3203357ff5f2e68f36dc837781ca8b1a7f0ff6 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_STREAM_PUBLIC_H
 #define __IA_CSS_STREAM_PUBLIC_H
 
-/** @file
+/* @file
  * This file contains support for configuring and controlling streams
  */
 
 #include "ia_css_prbs.h"
 #include "ia_css_input_port.h"
 
-/** Input modes, these enumerate all supported input modes.
+/* Input modes, these enumerate all supported input modes.
  *  Note that not all ISP modes support all input modes.
  */
 enum ia_css_input_mode {
-       IA_CSS_INPUT_MODE_SENSOR, /**< data from sensor */
-       IA_CSS_INPUT_MODE_FIFO,   /**< data from input-fifo */
-       IA_CSS_INPUT_MODE_TPG,    /**< data from test-pattern generator */
-       IA_CSS_INPUT_MODE_PRBS,   /**< data from pseudo-random bit stream */
-       IA_CSS_INPUT_MODE_MEMORY, /**< data from a frame in memory */
-       IA_CSS_INPUT_MODE_BUFFERED_SENSOR /**< data is sent through mipi buffer */
+       IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */
+       IA_CSS_INPUT_MODE_FIFO,   /** data from input-fifo */
+       IA_CSS_INPUT_MODE_TPG,    /** data from test-pattern generator */
+       IA_CSS_INPUT_MODE_PRBS,   /** data from pseudo-random bit stream */
+       IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */
+       IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
 };
 
-/** Structure of the MIPI buffer configuration
+/* Structure of the MIPI buffer configuration
  */
 struct ia_css_mipi_buffer_config {
-       unsigned int size_mem_words; /**< The frame size in the system memory
+       unsigned int size_mem_words; /** The frame size in the system memory
                                          words (32B) */
-       bool contiguous;             /**< Allocated memory physically
+       bool contiguous;             /** Allocated memory physically
                                          contiguously or not. \deprecated{Will be false always.}*/
-       unsigned int nof_mipi_buffers; /**< The number of MIPI buffers required for this
+       unsigned int nof_mipi_buffers; /** The number of MIPI buffers required for this
                                        stream */
 };
 
@@ -57,44 +57,44 @@ enum {
        IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH
 };
 
-/** This is input data configuration for one MIPI data type. We can have
+/* This is input data configuration for one MIPI data type. We can have
  *  multiple of this in one virtual channel.
  */
 struct ia_css_stream_isys_stream_config {
-       struct ia_css_resolution  input_res; /**< Resolution of input data */
-       enum ia_css_stream_format format; /**< Format of input stream. This data
+       struct ia_css_resolution  input_res; /** Resolution of input data */
+       enum ia_css_stream_format format; /** Format of input stream. This data
                                               format will be mapped to MIPI data
                                               type internally. */
-       int linked_isys_stream_id; /**< default value is -1, other value means
+       int linked_isys_stream_id; /** default value is -1, other value means
                                                        current isys_stream shares the same buffer with
                                                        indicated isys_stream*/
-       bool valid; /**< indicate whether other fields have valid value */
+       bool valid; /** indicate whether other fields have valid value */
 };
 
 struct ia_css_stream_input_config {
-       struct ia_css_resolution  input_res; /**< Resolution of input data */
-       struct ia_css_resolution  effective_res; /**< Resolution of input data.
+       struct ia_css_resolution  input_res; /** Resolution of input data */
+       struct ia_css_resolution  effective_res; /** Resolution of input data.
                                                        Used for CSS 2400/1 System and deprecated for other
                                                        systems (replaced by input_effective_res in
                                                        ia_css_pipe_config) */
-       enum ia_css_stream_format format; /**< Format of input stream. This data
+       enum ia_css_stream_format format; /** Format of input stream. This data
                                               format will be mapped to MIPI data
                                               type internally. */
-       enum ia_css_bayer_order bayer_order; /**< Bayer order for RAW streams */
+       enum ia_css_bayer_order bayer_order; /** Bayer order for RAW streams */
 };
 
 
-/** Input stream description. This describes how input will flow into the
+/* Input stream description. This describes how input will flow into the
  *  CSS. This is used to program the CSS hardware.
  */
 struct ia_css_stream_config {
-       enum ia_css_input_mode    mode; /**< Input mode */
+       enum ia_css_input_mode    mode; /** Input mode */
        union {
-               struct ia_css_input_port  port; /**< Port, for sensor only. */
-               struct ia_css_tpg_config  tpg;  /**< TPG configuration */
-               struct ia_css_prbs_config prbs; /**< PRBS configuration */
-       } source; /**< Source of input data */
-       unsigned int          channel_id; /**< Channel on which input data
+               struct ia_css_input_port  port; /** Port, for sensor only. */
+               struct ia_css_tpg_config  tpg;  /** TPG configuration */
+               struct ia_css_prbs_config prbs; /** PRBS configuration */
+       } source; /** Source of input data */
+       unsigned int          channel_id; /** Channel on which input data
                                                   will arrive. Use this field
                                                   to specify virtual channel id.
                                                   Valid values are: 0, 1, 2, 3 */
@@ -110,29 +110,29 @@ struct ia_css_stream_config {
         * and will be deprecated. In the future,all platforms will use the N*N method
         */
 #endif
-       unsigned int sensor_binning_factor; /**< Binning factor used by sensor
+       unsigned int sensor_binning_factor; /** Binning factor used by sensor
                                                 to produce image data. This is
                                                 used for shading correction. */
-       unsigned int pixels_per_clock; /**< Number of pixels per clock, which can be
+       unsigned int pixels_per_clock; /** Number of pixels per clock, which can be
                                            1, 2 or 4. */
-       bool online; /**< offline will activate RAW copy on SP, use this for
+       bool online; /** offline will activate RAW copy on SP, use this for
                          continuous capture. */
                /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */
-       unsigned init_num_cont_raw_buf; /**< initial number of raw buffers to
+       unsigned init_num_cont_raw_buf; /** initial number of raw buffers to
                                             allocate */
-       unsigned target_num_cont_raw_buf; /**< total number of raw buffers to
+       unsigned target_num_cont_raw_buf; /** total number of raw buffers to
                                             allocate */
-       bool pack_raw_pixels; /**< Pack pixels in the raw buffers */
-       bool continuous; /**< Use SP copy feature to continuously capture frames
+       bool pack_raw_pixels; /** Pack pixels in the raw buffers */
+       bool continuous; /** Use SP copy feature to continuously capture frames
                              to system memory and run pipes in offline mode */
-       bool disable_cont_viewfinder; /**< disable continous viewfinder for ZSL use case */
-       int32_t flash_gpio_pin; /**< pin on which the flash is connected, -1 for no flash */
-       int left_padding; /**< The number of input-formatter left-paddings, -1 for default from binary.*/
-       struct ia_css_mipi_buffer_config mipi_buffer_config; /**< mipi buffer configuration */
-       struct ia_css_metadata_config   metadata_config;     /**< Metadata configuration. */
-       bool ia_css_enable_raw_buffer_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+       bool disable_cont_viewfinder; /** disable continous viewfinder for ZSL use case */
+       int32_t flash_gpio_pin; /** pin on which the flash is connected, -1 for no flash */
+       int left_padding; /** The number of input-formatter left-paddings, -1 for default from binary.*/
+       struct ia_css_mipi_buffer_config mipi_buffer_config; /** mipi buffer configuration */
+       struct ia_css_metadata_config   metadata_config;     /** Metadata configuration. */
+       bool ia_css_enable_raw_buffer_locking; /** Enable Raw Buffer Locking for HALv3 Support */
        bool lock_all;
-       /**< Lock all RAW buffers (true) or lock only buffers processed by
+       /** Lock all RAW buffers (true) or lock only buffers processed by
             video or preview pipe (false).
             This setting needs to be enabled to allow raw buffer locking
             without continuous viewfinder. */
@@ -140,15 +140,15 @@ struct ia_css_stream_config {
 
 struct ia_css_stream;
 
-/** Stream info, this struct describes properties of a stream after it has been
+/* Stream info, this struct describes properties of a stream after it has been
  *  created.
  */
 struct ia_css_stream_info {
        struct ia_css_metadata_info metadata_info;
-       /**< Info about the metadata layout, this contains the stride. */
+       /** Info about the metadata layout, this contains the stride. */
 };
 
-/** @brief Load default stream configuration
+/* @brief Load default stream configuration
  * @param[in,out]      stream_config The stream configuration.
  * @return     None
  *
@@ -165,7 +165,7 @@ void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config);
  * create the internal structures and fill in the configuration data and pipes
  */
 
- /** @brief Creates a stream
+ /* @brief Creates a stream
  * @param[in]  stream_config The stream configuration.
  * @param[in]  num_pipes The number of pipes to incorporate in the stream.
  * @param[in]  pipes The pipes.
@@ -180,7 +180,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
                                         struct ia_css_pipe *pipes[],
                                         struct ia_css_stream **stream);
 
-/** @brief Destroys a stream
+/* @brief Destroys a stream
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS or the error code.
  *
@@ -189,7 +189,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
 enum ia_css_err
 ia_css_stream_destroy(struct ia_css_stream *stream);
 
-/** @brief Provides information about a stream
+/* @brief Provides information about a stream
  * @param[in]  stream The stream.
  * @param[out] stream_info The information about the stream.
  * @return     IA_CSS_SUCCESS or the error code.
@@ -200,7 +200,7 @@ enum ia_css_err
 ia_css_stream_get_info(const struct ia_css_stream *stream,
                       struct ia_css_stream_info *stream_info);
 
-/** @brief load (rebuild) a stream that was unloaded.
+/* @brief load (rebuild) a stream that was unloaded.
  * @param[in]  stream The stream
  * @return             IA_CSS_SUCCESS or the error code
  *
@@ -210,7 +210,7 @@ ia_css_stream_get_info(const struct ia_css_stream *stream,
 enum ia_css_err
 ia_css_stream_load(struct ia_css_stream *stream);
 
-/** @brief Starts the stream.
+/* @brief Starts the stream.
  * @param[in]  stream The stream.
  * @return IA_CSS_SUCCESS or the error code.
  *
@@ -223,7 +223,7 @@ ia_css_stream_load(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_start(struct ia_css_stream *stream);
 
-/** @brief Stop the stream.
+/* @brief Stop the stream.
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS or the error code.
  *
@@ -233,7 +233,7 @@ ia_css_stream_start(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_stop(struct ia_css_stream *stream);
 
-/** @brief Check if a stream has stopped
+/* @brief Check if a stream has stopped
  * @param[in]  stream The stream.
  * @return     boolean flag
  *
@@ -242,7 +242,7 @@ ia_css_stream_stop(struct ia_css_stream *stream);
 bool
 ia_css_stream_has_stopped(struct ia_css_stream *stream);
 
-/** @brief     destroy a stream according to the stream seed previosly saved in the seed array.
+/* @brief      destroy a stream according to the stream seed previosly saved in the seed array.
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS (no other errors are generated now)
  *
@@ -251,7 +251,7 @@ ia_css_stream_has_stopped(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_unload(struct ia_css_stream *stream);
 
-/** @brief Returns stream format
+/* @brief Returns stream format
  * @param[in]  stream The stream.
  * @return     format of the string
  *
@@ -260,7 +260,7 @@ ia_css_stream_unload(struct ia_css_stream *stream);
 enum ia_css_stream_format
 ia_css_stream_get_format(const struct ia_css_stream *stream);
 
-/** @brief Check if the stream is configured for 2 pixels per clock
+/* @brief Check if the stream is configured for 2 pixels per clock
  * @param[in]  stream The stream.
  * @return     boolean flag
  *
@@ -270,7 +270,7 @@ ia_css_stream_get_format(const struct ia_css_stream *stream);
 bool
 ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
 
-/** @brief Sets the output frame stride (at the last pipe)
+/* @brief Sets the output frame stride (at the last pipe)
  * @param[in]  stream The stream
  * @param[in]  output_padded_width - the output buffer stride.
  * @return     ia_css_err
@@ -280,7 +280,7 @@ ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width);
 
-/** @brief Return max number of continuous RAW frames.
+/* @brief Return max number of continuous RAW frames.
  * @param[in]  stream The stream.
  * @param[out] buffer_depth The maximum number of continuous RAW frames.
  * @return     IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -291,7 +291,7 @@ ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int
 enum ia_css_err
 ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
 
-/** @brief Set nr of continuous RAW frames to use.
+/* @brief Set nr of continuous RAW frames to use.
  *
  * @param[in]  stream The stream.
  * @param[in]  buffer_depth    Number of frames to set.
@@ -302,7 +302,7 @@ ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_dep
 enum ia_css_err
 ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth);
 
-/** @brief Get number of continuous RAW frames to use.
+/* @brief Get number of continuous RAW frames to use.
  * @param[in]  stream The stream.
  * @param[out] buffer_depth The number of frames to use
  * @return     IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -315,7 +315,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
 
 /* ===== CAPTURE ===== */
 
-/** @brief Configure the continuous capture
+/* @brief Configure the continuous capture
  *
  * @param[in]  stream          The stream.
  * @param[in]  num_captures    The number of RAW frames to be processed to
@@ -347,7 +347,7 @@ ia_css_stream_capture(struct ia_css_stream *stream,
                        unsigned int skip,
                        int offset);
 
-/** @brief Specify which raw frame to tag based on exp_id found in frame info
+/* @brief Specify which raw frame to tag based on exp_id found in frame info
  *
  * @param[in]  stream The stream.
  * @param[in]  exp_id  The exposure id of the raw frame to tag.
@@ -363,7 +363,7 @@ ia_css_stream_capture_frame(struct ia_css_stream *stream,
 
 /* ===== VIDEO ===== */
 
-/** @brief Send streaming data into the css input FIFO
+/* @brief Send streaming data into the css input FIFO
  *
  * @param[in]  stream  The stream.
  * @param[in]  data    Pointer to the pixels to be send.
@@ -395,7 +395,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
                               unsigned int width,
                               unsigned int height);
 
-/** @brief Start an input frame on the CSS input FIFO.
+/* @brief Start an input frame on the CSS input FIFO.
  *
  * @param[in]  stream The stream.
  * @return     None
@@ -411,7 +411,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
 void
 ia_css_stream_start_input_frame(const struct ia_css_stream *stream);
 
-/** @brief Send a line of input data into the CSS input FIFO.
+/* @brief Send a line of input data into the CSS input FIFO.
  *
  * @param[in]  stream          The stream.
  * @param[in]  data    Array of the first line of image data.
@@ -435,7 +435,7 @@ ia_css_stream_send_input_line(const struct ia_css_stream *stream,
                              const unsigned short *data2,
                              unsigned int width2);
 
-/** @brief Send a line of input embedded data into the CSS input FIFO.
+/* @brief Send a line of input embedded data into the CSS input FIFO.
  *
  * @param[in]  stream     Pointer of the stream.
  * @param[in]  format     Format of the embedded data.
@@ -457,7 +457,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
                              const unsigned short *data,
                              unsigned int width);
 
-/** @brief End an input frame on the CSS input FIFO.
+/* @brief End an input frame on the CSS input FIFO.
  *
  * @param[in]  stream  The stream.
  * @return     None
@@ -467,7 +467,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
 void
 ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
 
-/** @brief send a request flash command to SP
+/* @brief send a request flash command to SP
  *
  * @param[in]  stream The stream.
  * @return     None
@@ -481,7 +481,7 @@ ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
 void
 ia_css_stream_request_flash(struct ia_css_stream *stream);
 
-/** @brief Configure a stream with filter coefficients.
+/* @brief Configure a stream with filter coefficients.
  *        @deprecated {Replaced by
  *                                ia_css_pipe_set_isp_config_on_pipe()}
  *
@@ -503,7 +503,7 @@ ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream,
                             const struct ia_css_isp_config *config,
                             struct ia_css_pipe *pipe);
 
-/** @brief Configure a stream with filter coefficients.
+/* @brief Configure a stream with filter coefficients.
  *        @deprecated {Replaced by
  *                                ia_css_pipe_set_isp_config()}
  * @param[in]  stream  The stream.
@@ -523,7 +523,7 @@ ia_css_stream_set_isp_config(
        struct ia_css_stream *stream,
        const struct ia_css_isp_config *config);
 
-/** @brief Get selected configuration settings
+/* @brief Get selected configuration settings
  * @param[in]  stream  The stream.
  * @param[out] config  Configuration settings.
  * @return             None
@@ -532,7 +532,7 @@ void
 ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
                             struct ia_css_isp_config *config);
 
-/** @brief allocate continuous raw frames for continuous capture
+/* @brief allocate continuous raw frames for continuous capture
  * @param[in]  stream The stream.
  * @return IA_CSS_SUCCESS or error code.
  *
@@ -544,7 +544,7 @@ ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
 enum ia_css_err
 ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
 
-/** @brief allocate continuous raw frames for continuous capture
+/* @brief allocate continuous raw frames for continuous capture
  * @param[in]  stream The stream.
  * @return     IA_CSS_SUCCESS or error code.
  *
@@ -555,7 +555,7 @@ ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_update_continuous_frames(struct ia_css_stream *stream);
 
-/** @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
+/* @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
  * @param[in]  stream The stream.
  * @param[in]   exp_id exposure id that uniquely identifies the locked Raw Frame Buffer
  * @return      ia_css_err IA_CSS_SUCCESS or error code
@@ -567,7 +567,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream);
 enum ia_css_err
 ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id);
 
-/** @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
+/* @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
  * @param[in]   stream The stream.
  * @param[in]   enable - true, disable - false
  * @return      None
index 575bb28b4becc09810b2009386fec9dbdfbe3166..b256d7c88716c471f54375192fcc8d3961c711c2 100644 (file)
@@ -31,47 +31,47 @@ more details.
 #ifndef __IA_CSS_TIMER_H
 #define __IA_CSS_TIMER_H
 
-/** @file
+/* @file
  * Timer interface definitions
  */
 #include <type_support.h>              /* for uint32_t */
 #include "ia_css_err.h"
 
-/** @brief timer reading definition */
+/* @brief timer reading definition */
 typedef uint32_t clock_value_t;
 
-/** @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
+/* @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
 struct ia_css_clock_tick {
-       clock_value_t ticks; /**< measured time in ticks.*/
+       clock_value_t ticks; /** measured time in ticks.*/
 };
 
-/** @brief TIMER event codes */
+/* @brief TIMER event codes */
 enum ia_css_tm_event {
        IA_CSS_TM_EVENT_AFTER_INIT,
-       /**< Timer Event after Initialization */
+       /** Timer Event after Initialization */
        IA_CSS_TM_EVENT_MAIN_END,
-       /**< Timer Event after end of Main */
+       /** Timer Event after end of Main */
        IA_CSS_TM_EVENT_THREAD_START,
-       /**< Timer Event after thread start */
+       /** Timer Event after thread start */
        IA_CSS_TM_EVENT_FRAME_PROC_START,
-       /**< Timer Event after Frame Process Start */
+       /** Timer Event after Frame Process Start */
        IA_CSS_TM_EVENT_FRAME_PROC_END
-       /**< Timer Event after Frame Process End */
+       /** Timer Event after Frame Process End */
 };
 
-/** @brief code measurement common struct */
+/* @brief code measurement common struct */
 struct ia_css_time_meas {
-       clock_value_t   start_timer_value;      /**< measured time in ticks */
-       clock_value_t   end_timer_value;        /**< measured time in ticks */
+       clock_value_t   start_timer_value;      /** measured time in ticks */
+       clock_value_t   end_timer_value;        /** measured time in ticks */
 };
 
 /**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */
 #define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t)
-/** @brief checks to ensure correct alignment for ia_css_time_meas. */
+/* @brief checks to ensure correct alignment for ia_css_time_meas. */
 #define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \
                                        + sizeof(clock_value_t))
 
-/** @brief API to fetch timer count directly
+/* @brief API to fetch timer count directly
 *
 * @param curr_ts [out] measured count value
 * @return IA_CSS_SUCCESS if success
index 9238a3317a464cdddc4faa629e9aa9e6c72d079a..81498bd7485b7405e1825963cc8713d2c654fb60 100644 (file)
 #ifndef __IA_CSS_TPG_H
 #define __IA_CSS_TPG_H
 
-/** @file
+/* @file
  * This file contains support for the test pattern generator (TPG)
  */
 
-/** Enumerate the TPG IDs.
+/* Enumerate the TPG IDs.
  */
 enum ia_css_tpg_id {
        IA_CSS_TPG_ID0,
@@ -35,7 +35,7 @@ enum ia_css_tpg_id {
  */
 #define N_CSS_TPG_IDS (IA_CSS_TPG_ID2+1)
 
-/** Enumerate the TPG modes.
+/* Enumerate the TPG modes.
  */
 enum ia_css_tpg_mode {
        IA_CSS_TPG_MODE_RAMP,
@@ -44,7 +44,7 @@ enum ia_css_tpg_mode {
        IA_CSS_TPG_MODE_MONO
 };
 
-/** @brief Configure the test pattern generator.
+/* @brief Configure the test pattern generator.
  *
  * Configure the Test Pattern Generator, the way these values are used to
  * generate the pattern can be seen in the HRT extension for the test pattern
index 5fec3d5c89d88186828f9692c4818dbf104b366d..725b90072cfe620081180dbb61e93aa7cee4fcfd 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef _IA_CSS_TYPES_H
 #define _IA_CSS_TYPES_H
 
-/** @file
+/* @file
  * This file contains types used for the ia_css parameters.
  * These types are in a separate file because they are expected
  * to be used in software layers that do not access the CSS API
@@ -58,7 +58,7 @@
 #include "isp/kernels/output/output_1.0/ia_css_output_types.h"
 
 #define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
-/**< Should be removed after Driver adaptation will be done */
+/** Should be removed after Driver adaptation will be done */
 
 #define IA_CSS_VERSION_MAJOR    2
 #define IA_CSS_VERSION_MINOR    0
@@ -69,8 +69,8 @@
 /* Min and max exposure IDs. These macros are here to allow
  * the drivers to get this information. Changing these macros
  * constitutes a CSS API change. */
-#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1   /**< Minimum exposure ID */
-#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /**< Maximum exposure ID */
+#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1   /** Minimum exposure ID */
+#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /** Maximum exposure ID */
 
 /* opaque types */
 struct ia_css_isp_parameters;
@@ -79,72 +79,72 @@ struct ia_css_memory_offsets;
 struct ia_css_config_memory_offsets;
 struct ia_css_state_memory_offsets;
 
-/** Virtual address within the CSS address space. */
+/* Virtual address within the CSS address space. */
 typedef uint32_t ia_css_ptr;
 
-/** Generic resolution structure.
+/* Generic resolution structure.
  */
 struct ia_css_resolution {
-       uint32_t width;  /**< Width */
-       uint32_t height; /**< Height */
+       uint32_t width;  /** Width */
+       uint32_t height; /** Height */
 };
 
-/** Generic coordinate structure.
+/* Generic coordinate structure.
  */
 struct ia_css_coordinate {
-       int32_t x;      /**< Value of a coordinate on the horizontal axis */
-       int32_t y;      /**< Value of a coordinate on the vertical axis */
+       int32_t x;      /** Value of a coordinate on the horizontal axis */
+       int32_t y;      /** Value of a coordinate on the vertical axis */
 };
 
-/** Vector with signed values. This is used to indicate motion for
+/* Vector with signed values. This is used to indicate motion for
  * Digital Image Stabilization.
  */
 struct ia_css_vector {
-       int32_t x; /**< horizontal motion (in pixels) */
-       int32_t y; /**< vertical motion (in pixels) */
+       int32_t x; /** horizontal motion (in pixels) */
+       int32_t y; /** vertical motion (in pixels) */
 };
 
 /* Short hands */
 #define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
 #define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
 
-/** CSS data descriptor */
+/* CSS data descriptor */
 struct ia_css_data {
-       ia_css_ptr address; /**< CSS virtual address */
-       uint32_t   size;    /**< Disabled if 0 */
+       ia_css_ptr address; /** CSS virtual address */
+       uint32_t   size;    /** Disabled if 0 */
 };
 
-/** Host data descriptor */
+/* Host data descriptor */
 struct ia_css_host_data {
-       char      *address; /**< Host address */
-       uint32_t   size;    /**< Disabled if 0 */
+       char      *address; /** Host address */
+       uint32_t   size;    /** Disabled if 0 */
 };
 
-/** ISP data descriptor */
+/* ISP data descriptor */
 struct ia_css_isp_data {
-       uint32_t   address; /**< ISP address */
-       uint32_t   size;    /**< Disabled if 0 */
+       uint32_t   address; /** ISP address */
+       uint32_t   size;    /** Disabled if 0 */
 };
 
-/** Shading Correction types. */
+/* Shading Correction types. */
 enum ia_css_shading_correction_type {
 #ifndef ISP2401
-       IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
+       IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
 #else
-       IA_CSS_SHADING_CORRECTION_NONE,  /**< Shading Correction is not processed in the pipe. */
-       IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
+       IA_CSS_SHADING_CORRECTION_NONE,  /** Shading Correction is not processed in the pipe. */
+       IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
 #endif
 
-       /**< More shading correction types can be added in the future. */
+       /** More shading correction types can be added in the future. */
 };
 
-/** Shading Correction information. */
+/* Shading Correction information. */
 struct ia_css_shading_info {
-       enum ia_css_shading_correction_type type; /**< Shading Correction type. */
+       enum ia_css_shading_correction_type type; /** Shading Correction type. */
 
-       union { /** Shading Correction information of each Shading Correction types. */
+       union { /* Shading Correction information of each Shading Correction types. */
 
-               /** Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
+               /* Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
                 *
                 *  This structure contains the information necessary to generate
                 *  the shading table required in the isp.
@@ -288,20 +288,20 @@ struct ia_css_shading_info {
                 */
                struct {
 #ifndef ISP2401
-                       uint32_t enable;        /**< Shading correction enabled.
+                       uint32_t enable;        /** Shading correction enabled.
                                                     0:disabled, 1:enabled */
-                       uint32_t num_hor_grids; /**< Number of data points per line
+                       uint32_t num_hor_grids; /** Number of data points per line
                                                     per color on shading table. */
-                       uint32_t num_ver_grids; /**< Number of lines of data points
+                       uint32_t num_ver_grids; /** Number of lines of data points
                                                     per color on shading table. */
-                       uint32_t bqs_per_grid_cell; /**< Grid cell size
+                       uint32_t bqs_per_grid_cell; /** Grid cell size
                                                in BQ(Bayer Quad) unit.
                                                (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
                                                Valid values are 8,16,32,64. */
 #else
-                       uint32_t num_hor_grids; /**< Number of data points per line per color on shading table. */
-                       uint32_t num_ver_grids; /**< Number of lines of data points per color on shading table. */
-                       uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ unit.
+                       uint32_t num_hor_grids; /** Number of data points per line per color on shading table. */
+                       uint32_t num_ver_grids; /** Number of lines of data points per color on shading table. */
+                       uint32_t bqs_per_grid_cell; /** Grid cell size in BQ unit.
                                                         NOTE: bqs = size in BQ(Bayer Quad) unit.
                                                               1BQ means {Gr,R,B,Gb} (2x2 pixels).
                                                               Horizontal 1 bqs corresponds to horizontal 2 pixels.
@@ -310,13 +310,13 @@ struct ia_css_shading_info {
                        uint32_t bayer_scale_hor_ratio_in;
                        uint32_t bayer_scale_hor_ratio_out;
 #ifndef ISP2401
-                       /**< Horizontal ratio of bayer scaling
+                       /** Horizontal ratio of bayer scaling
                        between input width and output width, for the scaling
                        which should be done before shading correction.
                          output_width = input_width * bayer_scale_hor_ratio_out
                                                / bayer_scale_hor_ratio_in */
 #else
-                               /**< Horizontal ratio of bayer scaling between input width and output width,
+                               /** Horizontal ratio of bayer scaling between input width and output width,
                                     for the scaling which should be done before shading correction.
                                        output_width = input_width * bayer_scale_hor_ratio_out
                                                                        / bayer_scale_hor_ratio_in + 0.5 */
@@ -324,30 +324,30 @@ struct ia_css_shading_info {
                        uint32_t bayer_scale_ver_ratio_in;
                        uint32_t bayer_scale_ver_ratio_out;
 #ifndef ISP2401
-                       /**< Vertical ratio of bayer scaling
+                       /** Vertical ratio of bayer scaling
                        between input height and output height, for the scaling
                        which should be done before shading correction.
                          output_height = input_height * bayer_scale_ver_ratio_out
                                                / bayer_scale_ver_ratio_in */
                        uint32_t sc_bayer_origin_x_bqs_on_shading_table;
-                       /**< X coordinate (in bqs) of bayer origin on shading table.
+                       /** X coordinate (in bqs) of bayer origin on shading table.
                        This indicates the left-most pixel of bayer
                        (not include margin) inputted to the shading correction.
                        This corresponds to the left-most pixel of bayer
                        inputted to isp from sensor. */
                        uint32_t sc_bayer_origin_y_bqs_on_shading_table;
-                       /**< Y coordinate (in bqs) of bayer origin on shading table.
+                       /** Y coordinate (in bqs) of bayer origin on shading table.
                        This indicates the top pixel of bayer
                        (not include margin) inputted to the shading correction.
                        This corresponds to the top pixel of bayer
                        inputted to isp from sensor. */
 #else
-                               /**< Vertical ratio of bayer scaling between input height and output height,
+                               /** Vertical ratio of bayer scaling between input height and output height,
                                     for the scaling which should be done before shading correction.
                                        output_height = input_height * bayer_scale_ver_ratio_out
                                                                        / bayer_scale_ver_ratio_in + 0.5 */
                        struct ia_css_resolution isp_input_sensor_data_res_bqs;
-                               /**< Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
+                               /** Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
                                     NOTE: This is NOT the size of the physical sensor size.
                                           CSS requests the driver that ISP inputs sensor data
                                           by the size of isp_input_sensor_data_res_bqs.
@@ -357,22 +357,22 @@ struct ia_css_shading_info {
                                           ISP assumes the area of isp_input_sensor_data_res_bqs
                                           is centered on the physical sensor. */
                        struct ia_css_resolution sensor_data_res_bqs;
-                               /**< Sensor data size (in bqs) at shading correction.
+                               /** Sensor data size (in bqs) at shading correction.
                                     This is the size AFTER bayer scaling. */
                        struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
-                               /**< Origin of sensor data area positioned on shading table at shading correction.
+                               /** Origin of sensor data area positioned on shading table at shading correction.
                                     The coordinate x,y should be positive values. */
 #endif
                } type_1;
 
-               /**< More structures can be added here when more shading correction types will be added
+               /** More structures can be added here when more shading correction types will be added
                     in the future. */
        } info;
 };
 
 #ifndef ISP2401
 
-/** Default Shading Correction information of Shading Correction Type 1. */
+/* Default Shading Correction information of Shading Correction Type 1. */
 #define DEFAULT_SHADING_INFO_TYPE_1 \
 { \
        IA_CSS_SHADING_CORRECTION_TYPE_1,       /* type */ \
@@ -394,7 +394,7 @@ struct ia_css_shading_info {
 
 #else
 
-/** Default Shading Correction information of Shading Correction Type 1. */
+/* Default Shading Correction information of Shading Correction Type 1. */
 #define DEFAULT_SHADING_INFO_TYPE_1 \
 { \
        IA_CSS_SHADING_CORRECTION_TYPE_1,       /* type */ \
@@ -416,27 +416,27 @@ struct ia_css_shading_info {
 
 #endif
 
-/** Default Shading Correction information. */
+/* Default Shading Correction information. */
 #define DEFAULT_SHADING_INFO   DEFAULT_SHADING_INFO_TYPE_1
 
-/** structure that describes the 3A and DIS grids */
+/* structure that describes the 3A and DIS grids */
 struct ia_css_grid_info {
-       /** \name ISP input size
+       /* \name ISP input size
          * that is visible for user
          * @{
          */
        uint32_t isp_in_width;
        uint32_t isp_in_height;
-       /** @}*/
+       /* @}*/
 
-       struct ia_css_3a_grid_info  s3a_grid; /**< 3A grid info */
+       struct ia_css_3a_grid_info  s3a_grid; /** 3A grid info */
        union ia_css_dvs_grid_u dvs_grid;
-               /**< All types of DVS statistics grid info union */
+               /** All types of DVS statistics grid info union */
 
        enum ia_css_vamem_type vamem_type;
 };
 
-/** defaults for ia_css_grid_info structs */
+/* defaults for ia_css_grid_info structs */
 #define DEFAULT_GRID_INFO \
 { \
        0,                              /* isp_in_width */ \
@@ -446,25 +446,25 @@ struct ia_css_grid_info {
        IA_CSS_VAMEM_TYPE_1             /* vamem_type */ \
 }
 
-/** Morphing table, used for geometric distortion and chromatic abberration
+/* Morphing table, used for geometric distortion and chromatic abberration
  *  correction (GDCAC, also called GDC).
  *  This table describes the imperfections introduced by the lens, the
  *  advanced ISP can correct for these imperfections using this table.
  */
 struct ia_css_morph_table {
-       uint32_t enable; /**< To disable GDC, set this field to false. The
+       uint32_t enable; /** To disable GDC, set this field to false. The
                          coordinates fields can be set to NULL in this case. */
-       uint32_t height; /**< Table height */
-       uint32_t width;  /**< Table width */
+       uint32_t height; /** Table height */
+       uint32_t width;  /** Table width */
        uint16_t *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES];
-       /**< X coordinates that describe the sensor imperfection */
+       /** X coordinates that describe the sensor imperfection */
        uint16_t *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES];
-       /**< Y coordinates that describe the sensor imperfection */
+       /** Y coordinates that describe the sensor imperfection */
 };
 
 struct ia_css_dvs_6axis_config {
        unsigned int exp_id;
-       /**< Exposure ID, see ia_css_event_public.h for more detail */
+       /** Exposure ID, see ia_css_event_public.h for more detail */
        uint32_t width_y;
        uint32_t height_y;
        uint32_t width_uv;
@@ -479,16 +479,16 @@ struct ia_css_dvs_6axis_config {
  * This specifies the coordinates (x,y)
  */
 struct ia_css_point {
-       int32_t x; /**< x coordinate */
-       int32_t y; /**< y coordinate */
+       int32_t x; /** x coordinate */
+       int32_t y; /** y coordinate */
 };
 
 /**
  * This specifies the region
  */
 struct ia_css_region {
-       struct ia_css_point origin; /**< Starting point coordinates for the region */
-       struct ia_css_resolution resolution; /**< Region resolution */
+       struct ia_css_point origin; /** Starting point coordinates for the region */
+       struct ia_css_resolution resolution; /** Region resolution */
 };
 
 /**
@@ -509,30 +509,30 @@ struct ia_css_region {
  * y + height <= effective input height
  */
 struct ia_css_dz_config {
-       uint32_t dx; /**< Horizontal zoom factor */
-       uint32_t dy; /**< Vertical zoom factor */
-       struct ia_css_region zoom_region; /**< region for zoom */
+       uint32_t dx; /** Horizontal zoom factor */
+       uint32_t dy; /** Vertical zoom factor */
+       struct ia_css_region zoom_region; /** region for zoom */
 };
 
-/** The still capture mode, this can be RAW (simply copy sensor input to DDR),
+/* The still capture mode, this can be RAW (simply copy sensor input to DDR),
  *  Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR).
  */
 enum ia_css_capture_mode {
-       IA_CSS_CAPTURE_MODE_RAW,      /**< no processing, copy data only */
-       IA_CSS_CAPTURE_MODE_BAYER,    /**< bayer processing, up to demosaic */
-       IA_CSS_CAPTURE_MODE_PRIMARY,  /**< primary ISP */
-       IA_CSS_CAPTURE_MODE_ADVANCED, /**< advanced ISP (GDC) */
-       IA_CSS_CAPTURE_MODE_LOW_LIGHT /**< low light ISP (ANR) */
+       IA_CSS_CAPTURE_MODE_RAW,      /** no processing, copy data only */
+       IA_CSS_CAPTURE_MODE_BAYER,    /** bayer processing, up to demosaic */
+       IA_CSS_CAPTURE_MODE_PRIMARY,  /** primary ISP */
+       IA_CSS_CAPTURE_MODE_ADVANCED, /** advanced ISP (GDC) */
+       IA_CSS_CAPTURE_MODE_LOW_LIGHT /** low light ISP (ANR) */
 };
 
 struct ia_css_capture_config {
-       enum ia_css_capture_mode mode; /**< Still capture mode */
-       uint32_t enable_xnr;           /**< Enable/disable XNR */
+       enum ia_css_capture_mode mode; /** Still capture mode */
+       uint32_t enable_xnr;           /** Enable/disable XNR */
        uint32_t enable_raw_output;
-       bool enable_capture_pp_bli;    /**< Enable capture_pp_bli mode */
+       bool enable_capture_pp_bli;    /** Enable capture_pp_bli mode */
 };
 
-/** default settings for ia_css_capture_config structs */
+/* default settings for ia_css_capture_config structs */
 #define DEFAULT_CAPTURE_CONFIG \
 { \
        IA_CSS_CAPTURE_MODE_PRIMARY,    /* mode (capture) */ \
@@ -542,7 +542,7 @@ struct ia_css_capture_config {
 }
 
 
-/** ISP filter configuration. This is a collection of configurations
+/* ISP filter configuration. This is a collection of configurations
  *  for each of the ISP filters (modules).
  *
  *  NOTE! The contents of all pointers is copied when get or set with the
@@ -557,98 +557,98 @@ struct ia_css_capture_config {
  *    ["ISP block", 2only] : ISP block is used only for ISP2.
  */
 struct ia_css_isp_config {
-       struct ia_css_wb_config   *wb_config;   /**< White Balance
+       struct ia_css_wb_config   *wb_config;   /** White Balance
                                                        [WB1, 1&2] */
-       struct ia_css_cc_config   *cc_config;   /**< Color Correction
+       struct ia_css_cc_config   *cc_config;   /** Color Correction
                                                        [CSC1, 1only] */
-       struct ia_css_tnr_config  *tnr_config;  /**< Temporal Noise Reduction
+       struct ia_css_tnr_config  *tnr_config;  /** Temporal Noise Reduction
                                                        [TNR1, 1&2] */
-       struct ia_css_ecd_config  *ecd_config;  /**< Eigen Color Demosaicing
+       struct ia_css_ecd_config  *ecd_config;  /** Eigen Color Demosaicing
                                                        [DE2, 2only] */
-       struct ia_css_ynr_config  *ynr_config;  /**< Y(Luma) Noise Reduction
+       struct ia_css_ynr_config  *ynr_config;  /** Y(Luma) Noise Reduction
                                                        [YNR2&YEE2, 2only] */
-       struct ia_css_fc_config   *fc_config;   /**< Fringe Control
+       struct ia_css_fc_config   *fc_config;   /** Fringe Control
                                                        [FC2, 2only] */
-       struct ia_css_formats_config   *formats_config; /**< Formats Control for main output
+       struct ia_css_formats_config   *formats_config; /** Formats Control for main output
                                                        [FORMATS, 1&2] */
-       struct ia_css_cnr_config  *cnr_config;  /**< Chroma Noise Reduction
+       struct ia_css_cnr_config  *cnr_config;  /** Chroma Noise Reduction
                                                        [CNR2, 2only] */
-       struct ia_css_macc_config *macc_config; /**< MACC
+       struct ia_css_macc_config *macc_config; /** MACC
                                                        [MACC2, 2only] */
-       struct ia_css_ctc_config  *ctc_config;  /**< Chroma Tone Control
+       struct ia_css_ctc_config  *ctc_config;  /** Chroma Tone Control
                                                        [CTC2, 2only] */
-       struct ia_css_aa_config   *aa_config;   /**< YUV Anti-Aliasing
+       struct ia_css_aa_config   *aa_config;   /** YUV Anti-Aliasing
                                                        [AA2, 2only]
                                                        (not used currently) */
-       struct ia_css_aa_config   *baa_config;  /**< Bayer Anti-Aliasing
+       struct ia_css_aa_config   *baa_config;  /** Bayer Anti-Aliasing
                                                        [BAA2, 1&2] */
-       struct ia_css_ce_config   *ce_config;   /**< Chroma Enhancement
+       struct ia_css_ce_config   *ce_config;   /** Chroma Enhancement
                                                        [CE1, 1only] */
        struct ia_css_dvs_6axis_config *dvs_6axis_config;
-       struct ia_css_ob_config   *ob_config;  /**< Objective Black
+       struct ia_css_ob_config   *ob_config;  /** Objective Black
                                                        [OB1, 1&2] */
-       struct ia_css_dp_config   *dp_config;  /**< Defect Pixel Correction
+       struct ia_css_dp_config   *dp_config;  /** Defect Pixel Correction
                                                        [DPC1/DPC2, 1&2] */
-       struct ia_css_nr_config   *nr_config;  /**< Noise Reduction
+       struct ia_css_nr_config   *nr_config;  /** Noise Reduction
                                                        [BNR1&YNR1&CNR1, 1&2]*/
-       struct ia_css_ee_config   *ee_config;  /**< Edge Enhancement
+       struct ia_css_ee_config   *ee_config;  /** Edge Enhancement
                                                        [YEE1, 1&2] */
-       struct ia_css_de_config   *de_config;  /**< Demosaic
+       struct ia_css_de_config   *de_config;  /** Demosaic
                                                        [DE1, 1only] */
-       struct ia_css_gc_config   *gc_config;  /**< Gamma Correction (for YUV)
+       struct ia_css_gc_config   *gc_config;  /** Gamma Correction (for YUV)
                                                        [GC1, 1only] */
-       struct ia_css_anr_config  *anr_config; /**< Advanced Noise Reduction */
-       struct ia_css_3a_config   *s3a_config; /**< 3A Statistics config */
-       struct ia_css_xnr_config  *xnr_config; /**< eXtra Noise Reduction */
-       struct ia_css_dz_config   *dz_config;  /**< Digital Zoom */
-       struct ia_css_cc_config *yuv2rgb_cc_config; /**< Color Correction
+       struct ia_css_anr_config  *anr_config; /** Advanced Noise Reduction */
+       struct ia_css_3a_config   *s3a_config; /** 3A Statistics config */
+       struct ia_css_xnr_config  *xnr_config; /** eXtra Noise Reduction */
+       struct ia_css_dz_config   *dz_config;  /** Digital Zoom */
+       struct ia_css_cc_config *yuv2rgb_cc_config; /** Color Correction
                                                        [CCM2, 2only] */
-       struct ia_css_cc_config *rgb2yuv_cc_config; /**< Color Correction
+       struct ia_css_cc_config *rgb2yuv_cc_config; /** Color Correction
                                                        [CSC2, 2only] */
-       struct ia_css_macc_table  *macc_table;  /**< MACC
+       struct ia_css_macc_table  *macc_table;  /** MACC
                                                        [MACC1/MACC2, 1&2]*/
-       struct ia_css_gamma_table *gamma_table; /**< Gamma Correction (for YUV)
+       struct ia_css_gamma_table *gamma_table; /** Gamma Correction (for YUV)
                                                        [GC1, 1only] */
-       struct ia_css_ctc_table   *ctc_table;   /**< Chroma Tone Control
+       struct ia_css_ctc_table   *ctc_table;   /** Chroma Tone Control
                                                        [CTC1, 1only] */
 
-       /** \deprecated */
-       struct ia_css_xnr_table   *xnr_table;   /**< eXtra Noise Reduction
+       /* \deprecated */
+       struct ia_css_xnr_table   *xnr_table;   /** eXtra Noise Reduction
                                                        [XNR1, 1&2] */
-       struct ia_css_rgb_gamma_table *r_gamma_table;/**< sRGB Gamma Correction
+       struct ia_css_rgb_gamma_table *r_gamma_table;/** sRGB Gamma Correction
                                                        [GC2, 2only] */
-       struct ia_css_rgb_gamma_table *g_gamma_table;/**< sRGB Gamma Correction
+       struct ia_css_rgb_gamma_table *g_gamma_table;/** sRGB Gamma Correction
                                                        [GC2, 2only] */
-       struct ia_css_rgb_gamma_table *b_gamma_table;/**< sRGB Gamma Correction
+       struct ia_css_rgb_gamma_table *b_gamma_table;/** sRGB Gamma Correction
                                                        [GC2, 2only] */
-       struct ia_css_vector      *motion_vector; /**< For 2-axis DVS */
+       struct ia_css_vector      *motion_vector; /** For 2-axis DVS */
        struct ia_css_shading_table *shading_table;
        struct ia_css_morph_table   *morph_table;
-       struct ia_css_dvs_coefficients *dvs_coefs; /**< DVS 1.0 coefficients */
-       struct ia_css_dvs2_coefficients *dvs2_coefs; /**< DVS 2.0 coefficients */
+       struct ia_css_dvs_coefficients *dvs_coefs; /** DVS 1.0 coefficients */
+       struct ia_css_dvs2_coefficients *dvs2_coefs; /** DVS 2.0 coefficients */
        struct ia_css_capture_config   *capture_config;
        struct ia_css_anr_thres   *anr_thres;
-       /** @deprecated{Old shading settings, see bugzilla bz675 for details} */
+       /* @deprecated{Old shading settings, see bugzilla bz675 for details} */
        struct ia_css_shading_settings *shading_settings;
-       struct ia_css_xnr3_config *xnr3_config; /**< eXtreme Noise Reduction v3 */
-       /** comment from Lasse: Be aware how this feature will affect coordinate
+       struct ia_css_xnr3_config *xnr3_config; /** eXtreme Noise Reduction v3 */
+       /* comment from Lasse: Be aware how this feature will affect coordinate
         *  normalization in different parts of the system. (e.g. face detection,
         *  touch focus, 3A statistics and windows of interest, shading correction,
         *  DVS, GDC) from IQ tool level and application level down-to ISP FW level.
         *  the risk for regression is not in the individual blocks, but how they
         *  integrate together. */
-       struct ia_css_output_config   *output_config;   /**< Main Output Mirroring, flipping */
+       struct ia_css_output_config   *output_config;   /** Main Output Mirroring, flipping */
 
 #ifdef ISP2401
-       struct ia_css_tnr3_kernel_config         *tnr3_config;           /**< TNR3 config */
+       struct ia_css_tnr3_kernel_config         *tnr3_config;           /** TNR3 config */
 #endif
-       struct ia_css_scaler_config              *scaler_config;         /**< Skylake: scaler config (optional) */
-       struct ia_css_formats_config             *formats_config_display;/**< Formats control for viewfinder/display output (optional)
+       struct ia_css_scaler_config              *scaler_config;         /** Skylake: scaler config (optional) */
+       struct ia_css_formats_config             *formats_config_display;/** Formats control for viewfinder/display output (optional)
                                                                                [OSYS, n/a] */
-       struct ia_css_output_config              *output_config_display; /**< Viewfinder/display output mirroring, flipping (optional) */
+       struct ia_css_output_config              *output_config_display; /** Viewfinder/display output mirroring, flipping (optional) */
 
-       struct ia_css_frame                      *output_frame;          /**< Output frame the config is to be applied to (optional) */
-       uint32_t                        isp_config_id;  /**< Unique ID to track which config was actually applied to a particular frame */
+       struct ia_css_frame                      *output_frame;          /** Output frame the config is to be applied to (optional) */
+       uint32_t                        isp_config_id;  /** Unique ID to track which config was actually applied to a particular frame */
 };
 
 #endif /* _IA_CSS_TYPES_H */
index 48c59896e8479fa2185cf50e1b5e067bba35e5e8..1e88901e0b8299fa7e9d9017132037c97465c43d 100644 (file)
 #ifndef __IA_CSS_VERSION_H
 #define __IA_CSS_VERSION_H
 
-/** @file
+/* @file
  * This file contains functions to retrieve CSS-API version information
  */
 
 #include <ia_css_err.h>
 
-/** a common size for the version arrays */
+/* a common size for the version arrays */
 #define MAX_VERSION_SIZE       500
 
-/** @brief Retrieves the current CSS version
+/* @brief Retrieves the current CSS version
  * @param[out] version         A pointer to a buffer where to put the generated
  *                             version string. NULL is ignored.
  * @param[in]  max_size        Size of the version buffer. If version string
index 834eedbbeeff651e42baf9309abcb9e930c784b2..0b95bf9b9aaf8ebba856bf562724ac32a756673c 100644 (file)
 #ifndef __IA_CSS_AA2_TYPES_H
 #define __IA_CSS_AA2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Anti-Aliasing parameters.
 */
 
 
-/** Anti-Aliasing configuration.
+/* Anti-Aliasing configuration.
  *
  *  This structure is used both for YUV AA and Bayer AA.
  *
@@ -39,7 +39,7 @@
  *     ISP2: BAA2 is used.
  */
 struct ia_css_aa_config {
-       uint16_t strength;      /**< Strength of the filter.
+       uint16_t strength;      /** Strength of the filter.
                                        u0.13, [0,8191],
                                        default/ineffective 0 */
 };
index e205574098f2cf6b3c6260bdebcbf5f5a3accbd8..dc317a85736932cd805f1a0b216d608c84632e30 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_ANR_TYPES_H
 #define __IA_CSS_ANR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Advanced Noise Reduction kernel v1
 */
 
 #define ANR_BPP                 10
 #define ANR_ELEMENT_BITS        ((CEIL_DIV(ANR_BPP, 8))*8)
 
-/** Advanced Noise Reduction configuration.
+/* Advanced Noise Reduction configuration.
  *  This is also known as Low-Light.
  */
 struct ia_css_anr_config {
-       int32_t threshold; /**< Threshold */
+       int32_t threshold; /** Threshold */
        int32_t thresholds[4*4*4];
        int32_t factors[3];
 };
index 3832ada433ec4b5aa2404baaceeece9476d1c8ba..9b611315392ccc37cf06d6987b36c61b6942c726 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_ANR2_TYPES_H
 #define __IA_CSS_ANR2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Advanced Noise Reduction kernel v2
 */
 
@@ -23,7 +23,7 @@
 
 #define ANR_PARAM_SIZE          13
 
-/** Advanced Noise Reduction (ANR) thresholds */
+/* Advanced Noise Reduction (ANR) thresholds */
 struct ia_css_anr_thres {
        int16_t data[13*64];
 };
index 4a289853367ab1acc0c5e0466035fac61aa0c697..312141793fd23806fd83c6f4e4ddf3e0576de5b6 100644 (file)
@@ -18,7 +18,7 @@
 #include "vmem.h"
 #include "ia_css_anr2_types.h"
 
-/** Advanced Noise Reduction (ANR) thresholds */
+/* Advanced Noise Reduction (ANR) thresholds */
 
 struct ia_css_isp_anr2_params {
        VMEM_ARRAY(data, ANR_PARAM_SIZE*ISP_VEC_NELEMS);
index 75ca7606b95cfdd81e1b01e826fea1d0b1b206df..a0d355454aa39c38fc1cc049644322c52c43c035 100644 (file)
@@ -27,7 +27,7 @@
 #define BAYER_QUAD_HEIGHT 2
 #define NOF_BAYER_VECTORS 4
 
-/** bayer load/store */
+/* bayer load/store */
 struct sh_css_isp_bayer_ls_isp_config {
        uint32_t base_address[NUM_BAYER_LS];
        uint32_t width[NUM_BAYER_LS];
index 9ae27a9e0baa546f81a4e88c96622da665355756..ec1688e7352d17fb2b5379c40f6c5c22c04922cc 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_BH_TYPES_H
 #define __IA_CSS_BH_TYPES_H
 
-/** Number of elements in the BH table.
+/* Number of elements in the BH table.
   * Should be consistent with hmem.h
   */
 #define IA_CSS_HMEM_BH_TABLE_SIZE      ISP_HIST_DEPTH
@@ -27,7 +27,7 @@
 #define BH_COLOR_Y     (3)
 #define BH_COLOR_NUM   (4)
 
-/** BH table */
+/* BH table */
 struct ia_css_bh_table {
        uint32_t hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
 };
index 219fb835cb26bd12020ce7ad5a344fb9ab1d8cdc..87e0f19c856b2e7e0ee05f5750ca6e63f61eed23 100644 (file)
 #ifndef __IA_CSS_BNLM_TYPES_H
 #define __IA_CSS_BNLM_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Bayer Non-Linear Mean parameters.
 */
 
 #include "type_support.h" /* int32_t */
 
-/** Bayer Non-Linear Mean configuration
+/* Bayer Non-Linear Mean configuration
  *
  * \brief BNLM public parameters.
  * \details Struct with all parameters for the BNLM kernel that can be set
  * ISP2.6.1: BNLM is used.
  */
 struct ia_css_bnlm_config {
-       bool            rad_enable;     /**< Enable a radial dependency in a weight calculation */
-       int32_t         rad_x_origin;   /**< Initial x coordinate for a radius calculation */
-       int32_t         rad_y_origin;   /**< Initial x coordinate for a radius calculation */
+       bool            rad_enable;     /** Enable a radial dependency in a weight calculation */
+       int32_t         rad_x_origin;   /** Initial x coordinate for a radius calculation */
+       int32_t         rad_y_origin;   /** Initial x coordinate for a radius calculation */
        /* a threshold for average of weights if this < Th, do not denoise pixel */
        int32_t         avg_min_th;
        /* minimum weight for denoising if max < th, do not denoise pixel */
        int32_t         max_min_th;
 
        /**@{*/
-       /** Coefficient for approximation, in the form of (1 + x / N)^N,
+       /* Coefficient for approximation, in the form of (1 + x / N)^N,
         * that fits the first-order exp() to default exp_lut in BNLM sheet
         * */
        int32_t         exp_coeff_a;
@@ -48,55 +48,55 @@ struct ia_css_bnlm_config {
        uint32_t        exp_exponent;
        /**@}*/
 
-       int32_t nl_th[3];       /**< Detail thresholds */
+       int32_t nl_th[3];       /** Detail thresholds */
 
-       /** Index for n-th maximum candidate weight for each detail group */
+       /* Index for n-th maximum candidate weight for each detail group */
        int32_t match_quality_max_idx[4];
 
        /**@{*/
-       /** A lookup table for 1/sqrt(1+mu) approximation */
+       /* A lookup table for 1/sqrt(1+mu) approximation */
        int32_t mu_root_lut_thr[15];
        int32_t mu_root_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table for SAD normalization */
+       /* A lookup table for SAD normalization */
        int32_t sad_norm_lut_thr[15];
        int32_t sad_norm_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table that models a weight's dependency on textures */
+       /* A lookup table that models a weight's dependency on textures */
        int32_t sig_detail_lut_thr[15];
        int32_t sig_detail_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table that models a weight's dependency on a pixel's radial distance */
+       /* A lookup table that models a weight's dependency on a pixel's radial distance */
        int32_t sig_rad_lut_thr[15];
        int32_t sig_rad_lut_val[16];
        /**@}*/
        /**@{*/
-       /** A lookup table to control denoise power depending on a pixel's radial distance */
+       /* A lookup table to control denoise power depending on a pixel's radial distance */
        int32_t rad_pow_lut_thr[15];
        int32_t rad_pow_lut_val[16];
        /**@}*/
        /**@{*/
-       /** Non linear transfer functions to calculate the blending coefficient depending on detail group */
-       /** detail group 0 */
+       /* Non linear transfer functions to calculate the blending coefficient depending on detail group */
+       /* detail group 0 */
        /**@{*/
        int32_t nl_0_lut_thr[15];
        int32_t nl_0_lut_val[16];
        /**@}*/
        /**@{*/
-       /** detail group 1 */
+       /* detail group 1 */
        int32_t nl_1_lut_thr[15];
        int32_t nl_1_lut_val[16];
        /**@}*/
        /**@{*/
-       /** detail group 2 */
+       /* detail group 2 */
        int32_t nl_2_lut_thr[15];
        int32_t nl_2_lut_val[16];
        /**@}*/
        /**@{*/
-       /** detail group 3 */
+       /* detail group 3 */
        int32_t nl_3_lut_thr[15];
        int32_t nl_3_lut_val[16];
        /**@}*/
index be80f705d8a173525c96e9f0be1da56256b7bde2..551bd0ed3bacc087fdebda3bf553b89251a2fd48 100644 (file)
 #ifndef __IA_CSS_BNR2_2_TYPES_H
 #define __IA_CSS_BNR2_2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Bayer Noise Reduction parameters.
 */
 
 #include "type_support.h" /* int32_t */
 
-/** Bayer Noise Reduction 2.2 configuration
+/* Bayer Noise Reduction 2.2 configuration
  *
  * \brief BNR2_2 public parameters.
  * \details Struct with all parameters for the BNR2.2 kernel that can be set
  */
 struct ia_css_bnr2_2_config {
        /**@{*/
-       /** Directional variance gain for R/G/B components in dark region */
+       /* Directional variance gain for R/G/B components in dark region */
        int32_t d_var_gain_r;
        int32_t d_var_gain_g;
        int32_t d_var_gain_b;
        /**@}*/
        /**@{*/
-       /** Slope of Directional variance gain between dark and bright region */
+       /* Slope of Directional variance gain between dark and bright region */
        int32_t d_var_gain_slope_r;
        int32_t d_var_gain_slope_g;
        int32_t d_var_gain_slope_b;
        /**@}*/
        /**@{*/
-       /** Non-Directional variance gain for R/G/B components in dark region */
+       /* Non-Directional variance gain for R/G/B components in dark region */
        int32_t n_var_gain_r;
        int32_t n_var_gain_g;
        int32_t n_var_gain_b;
        /**@}*/
        /**@{*/
-       /** Slope of Non-Directional variance gain between dark and bright region */
+       /* Slope of Non-Directional variance gain between dark and bright region */
        int32_t n_var_gain_slope_r;
        int32_t n_var_gain_slope_g;
        int32_t n_var_gain_slope_b;
        /**@}*/
 
-       int32_t dir_thres;              /**< Threshold for directional filtering */
-       int32_t dir_thres_w;            /**< Threshold width for directional filtering */
-       int32_t var_offset_coef;        /**< Variance offset coefficient */
-       int32_t dir_gain;               /**< Gain for directional coefficient */
-       int32_t detail_gain;            /**< Gain for low contrast texture control */
-       int32_t detail_gain_divisor;    /**< Gain divisor for low contrast texture control */
-       int32_t detail_level_offset;    /**< Bias value for low contrast texture control */
-       int32_t d_var_th_min;           /**< Minimum clipping value for directional variance*/
-       int32_t d_var_th_max;           /**< Maximum clipping value for diretional variance*/
-       int32_t n_var_th_min;           /**< Minimum clipping value for non-directional variance*/
-       int32_t n_var_th_max;           /**< Maximum clipping value for non-directional variance*/
+       int32_t dir_thres;              /** Threshold for directional filtering */
+       int32_t dir_thres_w;            /** Threshold width for directional filtering */
+       int32_t var_offset_coef;        /** Variance offset coefficient */
+       int32_t dir_gain;               /** Gain for directional coefficient */
+       int32_t detail_gain;            /** Gain for low contrast texture control */
+       int32_t detail_gain_divisor;    /** Gain divisor for low contrast texture control */
+       int32_t detail_level_offset;    /** Bias value for low contrast texture control */
+       int32_t d_var_th_min;           /** Minimum clipping value for directional variance*/
+       int32_t d_var_th_max;           /** Maximum clipping value for diretional variance*/
+       int32_t n_var_th_min;           /** Minimum clipping value for non-directional variance*/
+       int32_t n_var_th_max;           /** Maximum clipping value for non-directional variance*/
 };
 
 #endif /* __IA_CSS_BNR2_2_TYPES_H */
index 6df6c2be9a70db6b4fd38704798c21e7f1e09130..3ebc069d8ada9f3f1d0e64db4d4c5ee845dcce95 100644 (file)
 #ifndef __IA_CSS_CNR2_TYPES_H
 #define __IA_CSS_CNR2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Chroma Noise Reduction (CNR) parameters
 */
 
-/** Chroma Noise Reduction configuration.
+/* Chroma Noise Reduction configuration.
  *
  *  Small sensitivity of edge means strong smoothness and NR performance.
  *  If you see blurred color on vertical edges,
  *  ISP2: CNR2 is used for Still.
  */
 struct ia_css_cnr_config {
-       uint16_t coring_u;      /**< Coring level of U.
+       uint16_t coring_u;      /** Coring level of U.
                                u0.13, [0,8191], default/ineffective 0 */
-       uint16_t coring_v;      /**< Coring level of V.
+       uint16_t coring_v;      /** Coring level of V.
                                u0.13, [0,8191], default/ineffective 0 */
-       uint16_t sense_gain_vy; /**< Sensitivity of horizontal edge of Y.
+       uint16_t sense_gain_vy; /** Sensitivity of horizontal edge of Y.
                                u13.0, [0,8191], default 100, ineffective 8191 */
-       uint16_t sense_gain_vu; /**< Sensitivity of horizontal edge of U.
+       uint16_t sense_gain_vu; /** Sensitivity of horizontal edge of U.
                                u13.0, [0,8191], default 100, ineffective 8191 */
-       uint16_t sense_gain_vv; /**< Sensitivity of horizontal edge of V.
+       uint16_t sense_gain_vv; /** Sensitivity of horizontal edge of V.
                                u13.0, [0,8191], default 100, ineffective 8191 */
-       uint16_t sense_gain_hy; /**< Sensitivity of vertical edge of Y.
+       uint16_t sense_gain_hy; /** Sensitivity of vertical edge of Y.
                                u13.0, [0,8191], default 50, ineffective 8191 */
-       uint16_t sense_gain_hu; /**< Sensitivity of vertical edge of U.
+       uint16_t sense_gain_hu; /** Sensitivity of vertical edge of U.
                                u13.0, [0,8191], default 50, ineffective 8191 */
-       uint16_t sense_gain_hv; /**< Sensitivity of vertical edge of V.
+       uint16_t sense_gain_hv; /** Sensitivity of vertical edge of V.
                                u13.0, [0,8191], default 50, ineffective 8191 */
 };
 
index 3f11442500f036cc4fd157bc54b517d4c3746415..47a38fd65950be7a1367f79ee10d5d31c03f04f8 100644 (file)
  *
  */
 struct ia_css_conversion_config {
-       uint32_t en;     /**< en parameter */
-       uint32_t dummy0; /**< dummy0 dummy parameter 0 */
-       uint32_t dummy1; /**< dummy1 dummy parameter 1 */
-       uint32_t dummy2; /**< dummy2 dummy parameter 2 */
+       uint32_t en;     /** en parameter */
+       uint32_t dummy0; /** dummy0 dummy parameter 0 */
+       uint32_t dummy1; /** dummy1 dummy parameter 1 */
+       uint32_t dummy2; /** dummy2 dummy parameter 2 */
 };
 
 #endif /* __IA_CSS_CONVERSION_TYPES_H */
index 8bfc8dad37a87ef20d577266c0e9a05338a569cd..0f1812cdd92ab47a2416562da69af13da420f028 100644 (file)
@@ -19,7 +19,7 @@
 #include "dma.h"
 #include "sh_css_internal.h" /* sh_css_crop_pos */
 
-/** Crop frame */
+/* Crop frame */
 struct sh_css_isp_crop_isp_config {
        uint32_t width_a_over_b;
        struct dma_port_config port_b;
index 8091ad4d46024b02c690895c0b6dcc55eb90a501..b5d454225f891a79b65c4b2f01cb50c95eb56392 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_CROP_TYPES_H
 #define __IA_CSS_CROP_TYPES_H
 
-/** Crop frame
+/* Crop frame
  *
  *  ISP block: crop frame
  */
index 54ced072467f9903ce70313dc3dc29c1daa4fbe5..10404380c63731e432496fc62696acd508c20554 100644 (file)
 #ifndef __IA_CSS_CSC_TYPES_H
 #define __IA_CSS_CSC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Color Space Conversion parameters.
 */
 
-/** Color Correction configuration.
+/* Color Correction configuration.
  *
  *  This structure is used for 3 cases.
  *  ("YCgCo" is the output format of Demosaic.)
@@ -68,9 +68,9 @@
  *     4096    -3430   -666
  */
 struct ia_css_cc_config {
-       uint32_t fraction_bits;/**< Fractional bits of matrix.
+       uint32_t fraction_bits;/** Fractional bits of matrix.
                                        u8.0, [0,13] */
-       int32_t matrix[3 * 3]; /**< Conversion matrix.
+       int32_t matrix[3 * 3]; /** Conversion matrix.
                                        s[13-fraction_bits].[fraction_bits],
                                        [-8192,8191] */
 };
index c66e823618f608a866f7432193a96a513637e7f7..ad7040c9d7cbf8944642167598422cda9990cc53 100644 (file)
 
 /*VMEM Luma params*/
 struct ia_css_isp_ctc2_vmem_params {
-       /**< Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
+       /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
        VMEM_ARRAY(y_x, ISP_VEC_NELEMS);
-       /** kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
+       /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
        VMEM_ARRAY(y_y, ISP_VEC_NELEMS);
-       /** Slopes of lines interconnecting
+       /* Slopes of lines interconnecting
         *  0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/
        VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS);
 };
@@ -34,15 +34,15 @@ struct ia_css_isp_ctc2_vmem_params {
 /*DMEM Chroma params*/
 struct ia_css_isp_ctc2_dmem_params {
 
-       /** Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
+       /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
        int32_t uv_y0;
        int32_t uv_y1;
 
-       /** Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
+       /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
        int32_t uv_x0;
        int32_t uv_x1;
 
-       /** Slope of line interconnecting uv_x0 -> uv_x1*/
+       /* Slope of line interconnecting uv_x0 -> uv_x1*/
        int32_t uv_dydx;
 
 };
index 7b75f01e2ad2ab85b7691ea8c6c78b75d4a1641a..1222cf33e8511757084e733490014869be11182f 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_CTC2_TYPES_H
 #define __IA_CSS_CTC2_TYPES_H
 
-/** Chroma Tone Control configuration.
+/* Chroma Tone Control configuration.
 *
 *  ISP block: CTC2 (CTC by polygonal approximation)
 * (ISP1: CTC1 (CTC by look-up table) is used.)
@@ -24,7 +24,7 @@
 */
 struct ia_css_ctc2_config {
 
-       /**< Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
+       /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
        *   --default/ineffective value: 4096(0.5f)
        */
        int32_t y_y0;
@@ -33,19 +33,19 @@ struct ia_css_ctc2_config {
        int32_t y_y3;
        int32_t y_y4;
        int32_t y_y5;
-       /** 1st-4th  kneepoints by Y(Luma) --default/ineffective value:n/a
+       /* 1st-4th  kneepoints by Y(Luma) --default/ineffective value:n/a
        *   requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0
        */
        int32_t y_x1;
        int32_t y_x2;
        int32_t y_x3;
        int32_t y_x4;
-       /** Gains by UV(Chroma) under threholds uv_x0 and uv_x1
+       /* Gains by UV(Chroma) under threholds uv_x0 and uv_x1
        *   --default/ineffective value: 4096(0.5f)
        */
        int32_t uv_y0;
        int32_t uv_y1;
-       /** Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
+       /* Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
        *   --default/ineffective value: n/a
        */
        int32_t uv_x0;
index 1da215bb966d15d166b2ccbaf5421b8c81178e00..4ac47ce10566f0770151374c6b4a9f3106de3428 100644 (file)
 #ifndef __IA_CSS_CTC_TYPES_H
 #define __IA_CSS_CTC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Chroma Tone Control parameters.
 */
 
-/** Fractional bits for CTC gain (used only for ISP1).
+/* Fractional bits for CTC gain (used only for ISP1).
  *
  *  IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits
  *  of gain(=8), but also the bits(=5) to convert chroma
  */
 #define IA_CSS_CTC_COEF_SHIFT          13
 
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2      10
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_1_CTC_TABLE_SIZE           (1U<<IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2)
 
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2      8
-/** Number of elements in the CTC table. */
+/* Number of elements in the CTC table. */
 #define IA_CSS_VAMEM_2_CTC_TABLE_SIZE           ((1U<<IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1)
 
 enum ia_css_vamem_type {
@@ -47,44 +47,44 @@ enum ia_css_vamem_type {
        IA_CSS_VAMEM_TYPE_2
 };
 
-/** Chroma Tone Control configuration.
+/* Chroma Tone Control configuration.
  *
  *  ISP block: CTC2 (CTC by polygonal line approximation)
  * (ISP1: CTC1 (CTC by look-up table) is used.)
  *  ISP2: CTC2 is used.
  */
 struct ia_css_ctc_config {
-       uint16_t y0;    /**< 1st kneepoint gain.
+       uint16_t y0;    /** 1st kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y1;    /**< 2nd kneepoint gain.
+       uint16_t y1;    /** 2nd kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y2;    /**< 3rd kneepoint gain.
+       uint16_t y2;    /** 3rd kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y3;    /**< 4th kneepoint gain.
+       uint16_t y3;    /** 4th kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y4;    /**< 5th kneepoint gain.
+       uint16_t y4;    /** 5th kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t y5;    /**< 6th kneepoint gain.
+       uint16_t y5;    /** 6th kneepoint gain.
                                u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
                                default/ineffective 4096(0.5) */
-       uint16_t ce_gain_exp;   /**< Common exponent of y-axis gain.
+       uint16_t ce_gain_exp;   /** Common exponent of y-axis gain.
                                u8.0, [0,13],
                                default/ineffective 1 */
-       uint16_t x1;    /**< 2nd kneepoint luma.
+       uint16_t x1;    /** 2nd kneepoint luma.
                                u0.13, [0,8191], constraints: 0<x1<x2,
                                default/ineffective 1024 */
-       uint16_t x2;    /**< 3rd kneepoint luma.
+       uint16_t x2;    /** 3rd kneepoint luma.
                                u0.13, [0,8191], constraints: x1<x2<x3,
                                default/ineffective 2048 */
-       uint16_t x3;    /**< 4th kneepoint luma.
+       uint16_t x3;    /** 4th kneepoint luma.
                                u0.13, [0,8191], constraints: x2<x3<x4,
                                default/ineffective 6144 */
-       uint16_t x4;    /**< 5tn kneepoint luma.
+       uint16_t x4;    /** 5tn kneepoint luma.
                                u0.13, [0,8191], constraints: x3<x4<8191,
                                default/ineffective 7168 */
 };
@@ -94,7 +94,7 @@ union ia_css_ctc_data {
        uint16_t vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE];
 };
 
-/** CTC table, used for Chroma Tone Control.
+/* CTC table, used for Chroma Tone Control.
  *
  *  ISP block: CTC1 (CTC by look-up table)
  *  ISP1: CTC1 is used.
index 525c838d5a99c113f3453d3b91db7c3dabcd8fab..803be68abc541cf5b3a5c9a81e7426eb45a5832f 100644 (file)
 #ifndef __IA_CSS_DE_TYPES_H
 #define __IA_CSS_DE_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Demosaic (bayer-to-YCgCo) parameters.
 */
 
-/** Demosaic (bayer-to-YCgCo) configuration.
+/* Demosaic (bayer-to-YCgCo) configuration.
  *
  *  ISP block: DE1
  *  ISP1: DE1 is used.
  * (ISP2: DE2 is used.)
  */
 struct ia_css_de_config {
-       ia_css_u0_16 pixelnoise; /**< Pixel noise used in moire elimination.
+       ia_css_u0_16 pixelnoise; /** Pixel noise used in moire elimination.
                                u0.16, [0,65535],
                                default 0, ineffective 0 */
-       ia_css_u0_16 c1_coring_threshold; /**< Coring threshold for C1.
+       ia_css_u0_16 c1_coring_threshold; /** Coring threshold for C1.
                                This is the same as nr_config.threshold_cb.
                                u0.16, [0,65535],
                                default 128(0.001953125), ineffective 0 */
-       ia_css_u0_16 c2_coring_threshold; /**< Coring threshold for C2.
+       ia_css_u0_16 c2_coring_threshold; /** Coring threshold for C2.
                                This is the same as nr_config.threshold_cr.
                                u0.16, [0,65535],
                                default 128(0.001953125), ineffective 0 */
index eac1b27798574de3b1f26983ef6cbc988a573e88..50bdde419bb1247708aae4a46ea8cc94556c5146 100644 (file)
 #ifndef __IA_CSS_DE2_TYPES_H
 #define __IA_CSS_DE2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Demosaicing parameters.
 */
 
-/** Eigen Color Demosaicing configuration.
+/* Eigen Color Demosaicing configuration.
  *
  *  ISP block: DE2
  * (ISP1: DE1 is used.)
  *  ISP2: DE2 is used.
  */
 struct ia_css_ecd_config {
-       uint16_t zip_strength;  /**< Strength of zipper reduction.
+       uint16_t zip_strength;  /** Strength of zipper reduction.
                                u0.13, [0,8191],
                                default 5489(0.67), ineffective 0 */
-       uint16_t fc_strength;   /**< Strength of false color reduction.
+       uint16_t fc_strength;   /** Strength of false color reduction.
                                u0.13, [0,8191],
                                default 8191(almost 1.0), ineffective 0 */
-       uint16_t fc_debias;     /**< Prevent color change
+       uint16_t fc_debias;     /** Prevent color change
                                     on noise or Gr/Gb imbalance.
                                u0.13, [0,8191],
                                default 0, ineffective 0 */
index b5d7b6b175b642d19400c36c67eda9c2b6ad3360..1bf6dcef7dc76764a9cde48121807b45c4cda76b 100644 (file)
 #ifndef __IA_CSS_DP_TYPES_H
 #define __IA_CSS_DP_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Defect Pixel Correction (DPC) parameters.
 */
 
 
-/** Defect Pixel Correction configuration.
+/* Defect Pixel Correction configuration.
  *
  *  ISP block: DPC1 (DPC after WB)
  *             DPC2 (DPC before WB)
  *  ISP2: DPC2 is used.
  */
 struct ia_css_dp_config {
-       ia_css_u0_16 threshold; /**< The threshold of defect pixel correction,
+       ia_css_u0_16 threshold; /** The threshold of defect pixel correction,
                              representing the permissible difference of
                              intensity between one pixel and its
                              surrounding pixels. Smaller values result
                                in more frequent pixel corrections.
                                u0.16, [0,65535],
                                default 8192, ineffective 65535 */
-       ia_css_u8_8 gain;        /**< The sensitivity of mis-correction. ISP will
+       ia_css_u8_8 gain;        /** The sensitivity of mis-correction. ISP will
                              miss a lot of defects if the value is set
                                too large.
                                u8.8, [0,65535],
index b2c974196ce8201324a434141668941866bdcdfb..6727682d287ffdb4dcc67144b97dea3d0034b081 100644 (file)
 #ifndef __IA_CSS_DPC2_TYPES_H
 #define __IA_CSS_DPC2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters.
 */
 
 #include "type_support.h"
 
 /**@{*/
-/** Floating point constants for different metrics. */
+/* Floating point constants for different metrics. */
 #define METRIC1_ONE_FP (1<<12)
 #define METRIC2_ONE_FP (1<<5)
 #define METRIC3_ONE_FP (1<<12)
@@ -30,7 +30,7 @@
 /**@}*/
 
 /**@{*/
-/** Defect Pixel Correction 2 configuration.
+/* Defect Pixel Correction 2 configuration.
  *
  * \brief DPC2 public parameters.
  * \details Struct with all parameters for the Defect Pixel Correction 2
index 4d0abfe4d0fd2c2ba4d048208a37dd79a77f30be..66a7e58659c0b92d11b0934a56886aab8b1c8fa9 100644 (file)
@@ -30,7 +30,7 @@
 #ifdef ISP2401
 
 #endif
-/** dvserence frame */
+/* dvserence frame */
 struct sh_css_isp_dvs_isp_config {
        uint32_t num_horizontal_blocks;
        uint32_t num_vertical_blocks;
index 216c54a21ea59d3b3683eaa196fb3e966bac7574..30772d217fb2731c73adf69c2e6d062de0d749b4 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_DVS_TYPES_H
 #define __IA_CSS_DVS_TYPES_H
 
-/** DVS frame
+/* DVS frame
  *
  *  ISP block: dvs frame
  */
index 07651f0ac558be7ee64dc0389f3f985b2ac71ae3..32e91824a5e5a0f86f73e1926618bf99cc3606f8 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_EED1_8_TYPES_H
 #define __IA_CSS_EED1_8_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Edge Enhanced Demosaic parameters.
 */
 
  */
 #define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS  9
 
-/** Edge Enhanced Demosaic configuration
+/* Edge Enhanced Demosaic configuration
  *
  * ISP2.6.1: EED1_8 is used.
  */
 
 struct ia_css_eed1_8_config {
-       int32_t rbzp_strength;  /**< Strength of zipper reduction. */
-
-       int32_t fcstrength;     /**< Strength of false color reduction. */
-       int32_t fcthres_0;      /**< Threshold to prevent chroma coring due to noise or green disparity in dark region. */
-       int32_t fcthres_1;      /**< Threshold to prevent chroma coring due to noise or green disparity in bright region. */
-       int32_t fc_sat_coef;    /**< How much color saturation to maintain in high color saturation region. */
-       int32_t fc_coring_prm;  /**< Chroma coring coefficient for tint color suppression. */
-
-       int32_t aerel_thres0;   /**< Threshold for Non-Directional Reliability at dark region. */
-       int32_t aerel_gain0;    /**< Gain for Non-Directional Reliability at dark region. */
-       int32_t aerel_thres1;   /**< Threshold for Non-Directional Reliability at bright region. */
-       int32_t aerel_gain1;    /**< Gain for Non-Directional Reliability at bright region. */
-
-       int32_t derel_thres0;   /**< Threshold for Directional Reliability at dark region. */
-       int32_t derel_gain0;    /**< Gain for Directional Reliability at dark region. */
-       int32_t derel_thres1;   /**< Threshold for Directional Reliability at bright region. */
-       int32_t derel_gain1;    /**< Gain for Directional Reliability at bright region. */
-
-       int32_t coring_pos0;    /**< Positive Edge Coring Threshold in dark region. */
-       int32_t coring_pos1;    /**< Positive Edge Coring Threshold in bright region. */
-       int32_t coring_neg0;    /**< Negative Edge Coring Threshold in dark region. */
-       int32_t coring_neg1;    /**< Negative Edge Coring Threshold in bright region. */
-
-       int32_t gain_exp;       /**< Common Exponent of Gain. */
-       int32_t gain_pos0;      /**< Gain for Positive Edge in dark region. */
-       int32_t gain_pos1;      /**< Gain for Positive Edge in bright region. */
-       int32_t gain_neg0;      /**< Gain for Negative Edge in dark region. */
-       int32_t gain_neg1;      /**< Gain for Negative Edge in bright region. */
-
-       int32_t pos_margin0;    /**< Margin for Positive Edge in dark region. */
-       int32_t pos_margin1;    /**< Margin for Positive Edge in bright region. */
-       int32_t neg_margin0;    /**< Margin for Negative Edge in dark region. */
-       int32_t neg_margin1;    /**< Margin for Negative Edge in bright region. */
-
-       int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /**< Segment data for directional edge weight: X. */
-       int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /**< Segment data for directional edge weight: Y. */
-       int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];     /**< Segment data for directional edge weight: Slope. */
-       int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];       /**< Segment data for directional edge weight: Exponent. */
-       int32_t dedgew_max;     /**< Max Weight for Directional Edge. */
+       int32_t rbzp_strength;  /** Strength of zipper reduction. */
+
+       int32_t fcstrength;     /** Strength of false color reduction. */
+       int32_t fcthres_0;      /** Threshold to prevent chroma coring due to noise or green disparity in dark region. */
+       int32_t fcthres_1;      /** Threshold to prevent chroma coring due to noise or green disparity in bright region. */
+       int32_t fc_sat_coef;    /** How much color saturation to maintain in high color saturation region. */
+       int32_t fc_coring_prm;  /** Chroma coring coefficient for tint color suppression. */
+
+       int32_t aerel_thres0;   /** Threshold for Non-Directional Reliability at dark region. */
+       int32_t aerel_gain0;    /** Gain for Non-Directional Reliability at dark region. */
+       int32_t aerel_thres1;   /** Threshold for Non-Directional Reliability at bright region. */
+       int32_t aerel_gain1;    /** Gain for Non-Directional Reliability at bright region. */
+
+       int32_t derel_thres0;   /** Threshold for Directional Reliability at dark region. */
+       int32_t derel_gain0;    /** Gain for Directional Reliability at dark region. */
+       int32_t derel_thres1;   /** Threshold for Directional Reliability at bright region. */
+       int32_t derel_gain1;    /** Gain for Directional Reliability at bright region. */
+
+       int32_t coring_pos0;    /** Positive Edge Coring Threshold in dark region. */
+       int32_t coring_pos1;    /** Positive Edge Coring Threshold in bright region. */
+       int32_t coring_neg0;    /** Negative Edge Coring Threshold in dark region. */
+       int32_t coring_neg1;    /** Negative Edge Coring Threshold in bright region. */
+
+       int32_t gain_exp;       /** Common Exponent of Gain. */
+       int32_t gain_pos0;      /** Gain for Positive Edge in dark region. */
+       int32_t gain_pos1;      /** Gain for Positive Edge in bright region. */
+       int32_t gain_neg0;      /** Gain for Negative Edge in dark region. */
+       int32_t gain_neg1;      /** Gain for Negative Edge in bright region. */
+
+       int32_t pos_margin0;    /** Margin for Positive Edge in dark region. */
+       int32_t pos_margin1;    /** Margin for Positive Edge in bright region. */
+       int32_t neg_margin0;    /** Margin for Negative Edge in dark region. */
+       int32_t neg_margin1;    /** Margin for Negative Edge in bright region. */
+
+       int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /** Segment data for directional edge weight: X. */
+       int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS];               /** Segment data for directional edge weight: Y. */
+       int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];     /** Segment data for directional edge weight: Slope. */
+       int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)];       /** Segment data for directional edge weight: Exponent. */
+       int32_t dedgew_max;     /** Max Weight for Directional Edge. */
 };
 
 #endif /* __IA_CSS_EED1_8_TYPES_H */
index df1565a5914c18886d65a581868a9635e9886c02..49479572b40d231a6248227d7f2fb83c0224b989 100644 (file)
 #ifndef __IA_CSS_FORMATS_TYPES_H
 #define __IA_CSS_FORMATS_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for output format parameters.
 */
 
 #include "type_support.h"
 
-/** Formats configuration.
+/* Formats configuration.
  *
  *  ISP block: FORMATS
  *  ISP1: FORMATS is used.
  *  ISP2: FORMATS is used.
  */
 struct ia_css_formats_config {
-       uint32_t video_full_range_flag; /**< selects the range of YUV output.
+       uint32_t video_full_range_flag; /** selects the range of YUV output.
                                u8.0, [0,1],
                                default 1, ineffective n/a\n
                                1 - full range, luma 0-255, chroma 0-255\n
index 5a2f0c06a80d3c4d4b4a60f2acaf516289bfc5b5..ef287fa3c428a0caac7573611a550061478006bd 100644 (file)
 #ifndef __IA_CSS_FPN_TYPES_H
 #define __IA_CSS_FPN_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Fixed Pattern Noise parameters.
 */
 
-/** Fixed Pattern Noise table.
+/* Fixed Pattern Noise table.
  *
  *  This contains the fixed patterns noise values
  *  obtained from a black frame capture.
  */
 
 struct ia_css_fpn_table {
-       int16_t *data;          /**< Table content (fixed patterns noise).
+       int16_t *data;          /** Table content (fixed patterns noise).
                                        u0.[13-shift], [0,63] */
-       uint32_t width;         /**< Table width (in pixels).
+       uint32_t width;         /** Table width (in pixels).
                                        This is the input frame width. */
-       uint32_t height;        /**< Table height (in pixels).
+       uint32_t height;        /** Table height (in pixels).
                                        This is the input frame height. */
-       uint32_t shift;         /**< Common exponent of table content.
+       uint32_t shift;         /** Common exponent of table content.
                                        u8.0, [0,13] */
-       uint32_t enabled;       /**< Fpn is enabled.
+       uint32_t enabled;       /** Fpn is enabled.
                                        bool */
 };
 
index dd9f0eda3353119a96b5a6b4ba436089e6a0de6b..594807fe2925b88d51b493df50527a2e42e5336f 100644 (file)
 #ifndef __IA_CSS_GC_TYPES_H
 #define __IA_CSS_GC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Gamma Correction parameters.
 */
 
 #include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h"  /* FIXME: Needed for ia_css_vamem_type */
 
-/** Fractional bits for GAMMA gain */
+/* Fractional bits for GAMMA gain */
 #define IA_CSS_GAMMA_GAIN_K_SHIFT      13
 
-/** Number of elements in the gamma table. */
+/* Number of elements in the gamma table. */
 #define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2    10
 #define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE         (1U<<IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2)
 
-/** Number of elements in the gamma table. */
+/* Number of elements in the gamma table. */
 #define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2    8
 #define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE         ((1U<<IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1)
 
-/** Gamma table, used for Y(Luma) Gamma Correction.
+/* Gamma table, used for Y(Luma) Gamma Correction.
  *
  *  ISP block: GC1 (YUV Gamma Correction)
  *  ISP1: GC1 is used.
  * (ISP2: GC2(sRGB Gamma Correction) is used.)
  */
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
      IA_CSS_VAMEM_TYPE_2(ISP2400) */
 union ia_css_gc_data {
        uint16_t vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE];
-       /**< Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
+       /** Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
        uint16_t vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE];
-       /**< Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
+       /** Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
 };
 
 struct ia_css_gamma_table {
@@ -52,22 +52,22 @@ struct ia_css_gamma_table {
        union ia_css_gc_data data;
 };
 
-/** Gamma Correction configuration (used only for YUV Gamma Correction).
+/* Gamma Correction configuration (used only for YUV Gamma Correction).
  *
  *  ISP block: GC1 (YUV Gamma Correction)
  *  ISP1: GC1 is used.
  * (ISP2: GC2 (sRGB Gamma Correction) is used.)
   */
 struct ia_css_gc_config {
-       uint16_t gain_k1; /**< Gain to adjust U after YUV Gamma Correction.
+       uint16_t gain_k1; /** Gain to adjust U after YUV Gamma Correction.
                                u0.16, [0,65535],
                                default/ineffective 19000(0.29) */
-       uint16_t gain_k2; /**< Gain to adjust V after YUV Gamma Correction.
+       uint16_t gain_k2; /** Gain to adjust V after YUV Gamma Correction.
                                u0.16, [0,65535],
                                default/ineffective 19000(0.29) */
 };
 
-/** Chroma Enhancement configuration.
+/* Chroma Enhancement configuration.
  *
  *  This parameter specifies range of chroma output level.
  *  The standard range is [0,255] or [16,240].
@@ -77,20 +77,20 @@ struct ia_css_gc_config {
  * (ISP2: CE1 is not used.)
  */
 struct ia_css_ce_config {
-       uint8_t uv_level_min; /**< Minimum of chroma output level.
+       uint8_t uv_level_min; /** Minimum of chroma output level.
                                u0.8, [0,255], default/ineffective 0 */
-       uint8_t uv_level_max; /**< Maximum of chroma output level.
+       uint8_t uv_level_max; /** Maximum of chroma output level.
                                u0.8, [0,255], default/ineffective 255 */
 };
 
-/** Multi-Axes Color Correction (MACC) configuration.
+/* Multi-Axes Color Correction (MACC) configuration.
  *
  *  ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config))
  * (ISP1: MACC1 (MACC by only matrix) is used.)
  *  ISP2: MACC2 is used.
  */
 struct ia_css_macc_config {
-       uint8_t exp;    /**< Common exponent of ia_css_macc_table.
+       uint8_t exp;    /** Common exponent of ia_css_macc_table.
                                u8.0, [0,13], default 1, ineffective 1 */
 };
 
index e439583bdfb6f2e8ae0d5b7f7f0bb20225a6c012..fab7467d30a5500a56fc34ce6dd9905013a13820 100644 (file)
 
 #include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h"  /* FIXME: needed for ia_css_vamem_type */
 
-/** @file
+/* @file
 * CSS-API header file for Gamma Correction parameters.
 */
 
-/** sRGB Gamma table, used for sRGB Gamma Correction.
+/* sRGB Gamma table, used for sRGB Gamma Correction.
  *
  *  ISP block: GC2 (sRGB Gamma Correction)
  * (ISP1: GC1(YUV Gamma Correction) is used.)
  *  ISP2: GC2 is used.
  */
 
-/** Number of elements in the sRGB gamma table. */
+/* Number of elements in the sRGB gamma table. */
 #define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8
 #define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE      (1U<<IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2)
 
-/** Number of elements in the sRGB gamma table. */
+/* Number of elements in the sRGB gamma table. */
 #define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2    8
 #define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE     ((1U<<IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1)
 
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
      IA_CSS_VAMEM_TYPE_2(ISP2400) */
 union ia_css_rgb_gamma_data {
        uint16_t vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE];
-       /**< RGB Gamma table on vamem type1. This table is not used,
+       /** RGB Gamma table on vamem type1. This table is not used,
                because sRGB Gamma Correction is not implemented for ISP2300. */
        uint16_t vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE];
-               /**< RGB Gamma table on vamem type2. u0.12, [0,4095] */
+               /** RGB Gamma table on vamem type2. u0.12, [0,4095] */
 };
 
 struct ia_css_rgb_gamma_table {
index c3345b32e3e6e6f300374a9739de59f49e194933..26464421b077e0df7fe39c1a1e5de1773d0f51c7 100644 (file)
  * \detail Currently HDR paramters are used only for testing purposes
  */
 struct ia_css_hdr_irradiance_params {
-       int test_irr;                                          /**< Test parameter */
-       int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];  /**< Histogram matching shift parameter */
-       int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /**< Histogram matching multiplication parameter */
-       int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];      /**< Weight map soft threshold low bound parameter */
-       int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];     /**< Weight map soft threshold high bound parameter */
-       int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /**< Soft threshold linear function coefficien */
-       int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /**< Soft threshold precision shift parameter */
-       int weight_bpp;                                        /**< Weight map bits per pixel */
+       int test_irr;                                          /** Test parameter */
+       int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];  /** Histogram matching shift parameter */
+       int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /** Histogram matching multiplication parameter */
+       int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];      /** Weight map soft threshold low bound parameter */
+       int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];     /** Weight map soft threshold high bound parameter */
+       int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /** Soft threshold linear function coefficien */
+       int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1];    /** Soft threshold precision shift parameter */
+       int weight_bpp;                                        /** Weight map bits per pixel */
 };
 
 /**
@@ -39,7 +39,7 @@ struct ia_css_hdr_irradiance_params {
  * \detail Currently HDR paramters are used only for testing purposes
  */
 struct ia_css_hdr_deghost_params {
-       int test_deg; /**< Test parameter */
+       int test_deg; /** Test parameter */
 };
 
 /**
@@ -47,7 +47,7 @@ struct ia_css_hdr_deghost_params {
  * \detail Currently HDR paramters are used only for testing purposes
  */
 struct ia_css_hdr_exclusion_params {
-       int test_excl; /**< Test parameter */
+       int test_excl; /** Test parameter */
 };
 
 /**
@@ -56,9 +56,9 @@ struct ia_css_hdr_exclusion_params {
  * the CSS API. Currenly, only test paramters are defined.
  */
 struct ia_css_hdr_config {
-       struct ia_css_hdr_irradiance_params irradiance; /**< HDR irradiance paramaters */
-       struct ia_css_hdr_deghost_params    deghost;    /**< HDR deghosting parameters */
-       struct ia_css_hdr_exclusion_params  exclusion; /**< HDR exclusion parameters */
+       struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance paramaters */
+       struct ia_css_hdr_deghost_params    deghost;    /** HDR deghosting parameters */
+       struct ia_css_hdr_exclusion_params  exclusion; /** HDR exclusion parameters */
 };
 
 #endif /* __IA_CSS_HDR_TYPES_H */
index 3d510bf5886a95c7dc2aaec8345cbc29fbf36e67..9cd31c2c0253b8bbf774785d79edc076d105e9cf 100644 (file)
 #ifndef __IA_CSS_MACC1_5_TYPES_H
 #define __IA_CSS_MACC1_5_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Multi-Axis Color Conversion algorithm parameters.
 */
 
-/** Multi-Axis Color Conversion configuration
+/* Multi-Axis Color Conversion configuration
  *
  * ISP2.6.1: MACC1_5 is used.
  */
 
 
-/** Number of axes in the MACC table. */
+/* Number of axes in the MACC table. */
 #define IA_CSS_MACC_NUM_AXES           16
-/** Number of coefficients per MACC axes. */
+/* Number of coefficients per MACC axes. */
 #define IA_CSS_MACC_NUM_COEFS          4
 
-/** Multi-Axes Color Correction (MACC) table.
+/* Multi-Axes Color Correction (MACC) table.
  *
  *  ISP block: MACC (MACC by only matrix)
  *             MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
  */
 struct ia_css_macc1_5_table {
        int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
-       /**< 16 of 2x2 matix
+       /** 16 of 2x2 matix
          MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
            default/ineffective: (s1.12)
                16 of "identity 2x2 matix" {4096,0,0,4096} */
 };
 
-/** Multi-Axes Color Correction (MACC) configuration.
+/* Multi-Axes Color Correction (MACC) configuration.
  *
  *  ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
  *  ISP2: MACC1_5 is used.
  */
 struct ia_css_macc1_5_config {
-       uint8_t exp;    /**< Common exponent of ia_css_macc_table.
+       uint8_t exp;    /** Common exponent of ia_css_macc_table.
                                u8.0, [0,13], default 1, ineffective 1 */
 };
 
index a25581c6f3ac4cb82ac3de70f1853a6a6cdb7002..2c9e5a8ceb9880e93b315d0e1bc0f3e20ba53af0 100644 (file)
 #ifndef __IA_CSS_MACC_TYPES_H
 #define __IA_CSS_MACC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Multi-Axis Color Correction (MACC) parameters.
 */
 
-/** Number of axes in the MACC table. */
+/* Number of axes in the MACC table. */
 #define IA_CSS_MACC_NUM_AXES           16
-/** Number of coefficients per MACC axes. */
+/* Number of coefficients per MACC axes. */
 #define IA_CSS_MACC_NUM_COEFS          4
-/** The number of planes in the morphing table. */
+/* The number of planes in the morphing table. */
 
-/** Multi-Axis Color Correction (MACC) table.
+/* Multi-Axis Color Correction (MACC) table.
  *
  *  ISP block: MACC1 (MACC by only matrix)
  *             MACC2 (MACC by matrix and exponent(ia_css_macc_config))
@@ -51,7 +51,7 @@
 
 struct ia_css_macc_table {
        int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
-       /**< 16 of 2x2 matix
+       /** 16 of 2x2 matix
          MACC1: s2.13, [-65536,65535]
            default/ineffective:
                16 of "identity 2x2 matix" {8192,0,0,8192}
index eeaadfeb5a1ec4be4942523885007e58cbaa354b..d981394c1c1146dd27ba919204ac1d2c60ada9c8 100644 (file)
 #ifndef __IA_CSS_OB2_TYPES_H
 #define __IA_CSS_OB2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Optical Black algorithm parameters.
 */
 
-/** Optical Black configuration
+/* Optical Black configuration
  *
  * ISP2.6.1: OB2 is used.
  */
 #include "ia_css_frac.h"
 
 struct ia_css_ob2_config {
-       ia_css_u0_16 level_gr;    /**< Black level for GR pixels.
+       ia_css_u0_16 level_gr;    /** Black level for GR pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16  level_r;     /**< Black level for R pixels.
+       ia_css_u0_16  level_r;     /** Black level for R pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16  level_b;     /**< Black level for B pixels.
+       ia_css_u0_16  level_b;     /** Black level for B pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16  level_gb;    /**< Black level for GB pixels.
+       ia_css_u0_16  level_gb;    /** Black level for GB pixels.
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
 };
index 88459b6c003d180f35f9eb82def73e29c0b1b6d9..a9717b8f44acc744786592080e48412c4888dbef 100644 (file)
 #ifndef __IA_CSS_OB_TYPES_H
 #define __IA_CSS_OB_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Optical Black level parameters.
 */
 
 #include "ia_css_frac.h"
 
-/** Optical black mode.
+/* Optical black mode.
  */
 enum ia_css_ob_mode {
-       IA_CSS_OB_MODE_NONE,    /**< OB has no effect. */
-       IA_CSS_OB_MODE_FIXED,   /**< Fixed OB */
-       IA_CSS_OB_MODE_RASTER   /**< Raster OB */
+       IA_CSS_OB_MODE_NONE,    /** OB has no effect. */
+       IA_CSS_OB_MODE_FIXED,   /** Fixed OB */
+       IA_CSS_OB_MODE_RASTER   /** Raster OB */
 };
 
-/** Optical Black level configuration.
+/* Optical Black level configuration.
  *
  *  ISP block: OB1
  *  ISP1: OB1 is used.
  *  ISP2: OB1 is used.
  */
 struct ia_css_ob_config {
-       enum ia_css_ob_mode mode; /**< Mode (None / Fixed / Raster).
+       enum ia_css_ob_mode mode; /** Mode (None / Fixed / Raster).
                                        enum, [0,2],
                                        default 1, ineffective 0 */
-       ia_css_u0_16 level_gr;    /**< Black level for GR pixels
+       ia_css_u0_16 level_gr;    /** Black level for GR pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16 level_r;     /**< Black level for R pixels
+       ia_css_u0_16 level_r;     /** Black level for R pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16 level_b;     /**< Black level for B pixels
+       ia_css_u0_16 level_b;     /** Black level for B pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       ia_css_u0_16 level_gb;    /**< Black level for GB pixels
+       ia_css_u0_16 level_gb;    /** Black level for GB pixels
                                        (used for Fixed Mode only).
                                        u0.16, [0,65535],
                                        default/ineffective 0 */
-       uint16_t start_position; /**< Start position of OB area
+       uint16_t start_position; /** Start position of OB area
                                        (used for Raster Mode only).
                                        u16.0, [0,63],
                                        default/ineffective 0 */
-       uint16_t end_position;  /**< End position of OB area
+       uint16_t end_position;  /** End position of OB area
                                        (used for Raster Mode only).
                                        u16.0, [0,63],
                                        default/ineffective 0 */
index 26ec27e085c1da60a6b3d035deda02f24b3b5906..eb7defa4114563a23bae8f9c8991585db334f7b8 100644 (file)
@@ -19,7 +19,7 @@
 #include "dma.h"
 #include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
 
-/** output frame */
+/* output frame */
 struct sh_css_isp_output_isp_config {
        uint32_t width_a_over_b;
        uint32_t height;
index 4335ac28b31d76fec890dff2e4fbb0e208f8a0df..9c7342fb8145f379ec394567a5a9f5db7727f6f1 100644 (file)
 #ifndef __IA_CSS_OUTPUT_TYPES_H
 #define __IA_CSS_OUTPUT_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for parameters of output frames.
 */
 
-/** Output frame
+/* Output frame
  *
  *  ISP block: output frame
  */
@@ -40,8 +40,8 @@ struct ia_css_output1_configuration {
 };
 
 struct ia_css_output_config {
-       uint8_t enable_hflip;  /**< enable horizontal output mirroring */
-       uint8_t enable_vflip;  /**< enable vertical output mirroring */
+       uint8_t enable_hflip;  /** enable horizontal output mirroring */
+       uint8_t enable_vflip;  /** enable vertical output mirroring */
 };
 
 #endif /* __IA_CSS_OUTPUT_TYPES_H */
index 955fd472a2414b939023b789fa35c76c22f3860b..62d371841619b0104ebd917cea589d1b3b75c077 100644 (file)
@@ -18,7 +18,7 @@
 #include <ia_css_frame_public.h>
 #include "sh_css_internal.h"
 
-/** qplane frame
+/* qplane frame
  *
  *  ISP block: qplane frame
  */
index 54f8c299d22730092c10fcd80720fbafd6a0a2cd..5c0b8febd79aeac36909f87dddba98e45c8e4be6 100644 (file)
@@ -18,7 +18,7 @@
 #include <ia_css_frame_public.h>
 #include "sh_css_internal.h"
 
-/** Raw frame
+/* Raw frame
  *
  *  ISP block: Raw frame
  */
index 1f1b72a417d1d9ed3e51c8b8920154e19b23c787..026443b999a647500ae9b55105f0afd2fab50e98 100644 (file)
@@ -19,7 +19,7 @@
 #include "sh_css_defs.h"
 #include "dma.h"
 
-/** Reference frame */
+/* Reference frame */
 struct ia_css_ref_configuration {
        const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
        uint32_t dvs_frame_delay;
index ce0eaeeee9c6ef3fac5528812a88ebc0b1138666..4750fba268b938ea19e84009447b19cfa8459068 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_REF_TYPES_H
 #define __IA_CSS_REF_TYPES_H
 
-/** Reference frame
+/* Reference frame
  *
  *  ISP block: reference frame
  */
index f57ed1ec5981a4f17c19fff115bfcdbcc6c818c1..8d674d2c6427515c5e2e926b07846e6f2ac578d7 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_S3A_TYPES_H
 #define __IA_CSS_S3A_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for 3A statistics parameters.
 */
 
 #include "../../../../components/stats_3a/src/stats_3a_public.h"
 #endif
 
-/** 3A configuration. This configures the 3A statistics collection
+/* 3A configuration. This configures the 3A statistics collection
  *  module.
  */
  
-/** 3A statistics grid
+/* 3A statistics grid
  *
  *  ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE))
  *             S3A2 (3A Support for 3A ver.2 (Histogram is used for AE))
 struct ia_css_3a_grid_info {
 
 #if defined(SYSTEM_css_skycam_c0_system)
-       uint32_t ae_enable;                                     /**< ae enabled in binary,
+       uint32_t ae_enable;                                     /** ae enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct ae_public_config_grid_config     ae_grd_info;    /**< see description in ae_public.h*/
+       struct ae_public_config_grid_config     ae_grd_info;    /** see description in ae_public.h*/
 
-       uint32_t awb_enable;                                    /**< awb enabled in binary,
+       uint32_t awb_enable;                                    /** awb enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct awb_public_config_grid_config    awb_grd_info;   /**< see description in awb_public.h*/
+       struct awb_public_config_grid_config    awb_grd_info;   /** see description in awb_public.h*/
 
-       uint32_t af_enable;                                     /**< af enabled in binary,
+       uint32_t af_enable;                                     /** af enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct af_public_grid_config            af_grd_info;    /**< see description in af_public.h*/
+       struct af_public_grid_config            af_grd_info;    /** see description in af_public.h*/
 
-       uint32_t awb_fr_enable;                                 /**< awb_fr enabled in binary,
+       uint32_t awb_fr_enable;                                 /** awb_fr enabled in binary,
                                                                   0:disabled, 1:enabled */
-       struct awb_fr_public_grid_config        awb_fr_grd_info;/**< see description in awb_fr_public.h*/
+       struct awb_fr_public_grid_config        awb_fr_grd_info;/** see description in awb_fr_public.h*/
   
-        uint32_t elem_bit_depth;    /**< TODO:Taken from BYT  - need input from AIQ
+        uint32_t elem_bit_depth;    /** TODO:Taken from BYT  - need input from AIQ
                                        if needed for SKC
                                        Bit depth of element used
                                        to calculate 3A statistics.
@@ -63,34 +63,34 @@ struct ia_css_3a_grid_info {
                                        bayer bit depth in DSP. */
 
 #else
-       uint32_t enable;            /**< 3A statistics enabled.
+       uint32_t enable;            /** 3A statistics enabled.
                                        0:disabled, 1:enabled */
-       uint32_t use_dmem;          /**< DMEM or VMEM determines layout.
+       uint32_t use_dmem;          /** DMEM or VMEM determines layout.
                                        0:3A statistics are stored to VMEM,
                                        1:3A statistics are stored to DMEM */
-       uint32_t has_histogram;     /**< Statistics include histogram.
+       uint32_t has_histogram;     /** Statistics include histogram.
                                        0:no histogram, 1:has histogram */
-       uint32_t width;             /**< Width of 3A grid table.
+       uint32_t width;             /** Width of 3A grid table.
                                        (= Horizontal number of grid cells
                                        in table, which cells have effective
                                        statistics.) */
-       uint32_t height;            /**< Height of 3A grid table.
+       uint32_t height;            /** Height of 3A grid table.
                                        (= Vertical number of grid cells
                                        in table, which cells have effective
                                        statistics.) */
-       uint32_t aligned_width;     /**< Horizontal stride (for alloc).
+       uint32_t aligned_width;     /** Horizontal stride (for alloc).
                                        (= Horizontal number of grid cells
                                        in table, which means
                                        the allocated width.) */
-       uint32_t aligned_height;    /**< Vertical stride (for alloc).
+       uint32_t aligned_height;    /** Vertical stride (for alloc).
                                        (= Vertical number of grid cells
                                        in table, which means
                                        the allocated height.) */
-       uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+       uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
                                        (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
                                        Valid values are 8,16,32,64. */
-       uint32_t deci_factor_log2;  /**< log2 of bqs_per_grid_cell. */
-       uint32_t elem_bit_depth;    /**< Bit depth of element used
+       uint32_t deci_factor_log2;  /** log2 of bqs_per_grid_cell. */
+       uint32_t elem_bit_depth;    /** Bit depth of element used
                                        to calculate 3A statistics.
                                        This is 13, which is the normalized
                                        bayer bit depth in DSP. */
@@ -148,7 +148,7 @@ struct ia_css_3a_grid_info {
  * However, that will require driver/ 3A lib modifications.
  */
 
-/** 3A configuration. This configures the 3A statistics collection
+/* 3A configuration. This configures the 3A statistics collection
  *  module.
  *
  *  ae_y_*: Coefficients to calculate luminance from bayer.
@@ -167,38 +167,38 @@ struct ia_css_3a_grid_info {
  *  ISP2: S3A2 and SDVS2 are used.
  */
 struct ia_css_3a_config {
-       ia_css_u0_16 ae_y_coef_r;       /**< Weight of R for Y.
+       ia_css_u0_16 ae_y_coef_r;       /** Weight of R for Y.
                                                u0.16, [0,65535],
                                                default/ineffective 25559 */
-       ia_css_u0_16 ae_y_coef_g;       /**< Weight of G for Y.
+       ia_css_u0_16 ae_y_coef_g;       /** Weight of G for Y.
                                                u0.16, [0,65535],
                                                default/ineffective 32768 */
-       ia_css_u0_16 ae_y_coef_b;       /**< Weight of B for Y.
+       ia_css_u0_16 ae_y_coef_b;       /** Weight of B for Y.
                                                u0.16, [0,65535],
                                                default/ineffective 7209 */
-       ia_css_u0_16 awb_lg_high_raw;   /**< AWB level gate high for raw.
+       ia_css_u0_16 awb_lg_high_raw;   /** AWB level gate high for raw.
                                                u0.16, [0,65535],
                                                default 65472(=1023*64),
                                                ineffective 65535 */
-       ia_css_u0_16 awb_lg_low;        /**< AWB level gate low.
+       ia_css_u0_16 awb_lg_low;        /** AWB level gate low.
                                                u0.16, [0,65535],
                                                default 64(=1*64),
                                                ineffective 0 */
-       ia_css_u0_16 awb_lg_high;       /**< AWB level gate high.
+       ia_css_u0_16 awb_lg_high;       /** AWB level gate high.
                                                u0.16, [0,65535],
                                                default 65535,
                                                ineffective 65535 */
-       ia_css_s0_15 af_fir1_coef[7];   /**< AF FIR coefficients of fir1.
+       ia_css_s0_15 af_fir1_coef[7];   /** AF FIR coefficients of fir1.
                                                s0.15, [-32768,32767],
                                default/ineffective
                                -6689,-12207,-32768,32767,12207,6689,0 */
-       ia_css_s0_15 af_fir2_coef[7];   /**< AF FIR coefficients of fir2.
+       ia_css_s0_15 af_fir2_coef[7];   /** AF FIR coefficients of fir2.
                                                s0.15, [-32768,32767],
                                default/ineffective
                                2053,0,-18437,32767,-18437,2053,0 */
 };
 
-/** 3A statistics. This structure describes the data stored
+/* 3A statistics. This structure describes the data stored
  *  in each 3A grid point.
  *
  *  ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE)
@@ -209,43 +209,43 @@ struct ia_css_3a_config {
  *  ISP2: S3A2 is used.
  */
 struct ia_css_3a_output {
-       int32_t ae_y;    /**< Sum of Y in a statistics window, for AE.
+       int32_t ae_y;    /** Sum of Y in a statistics window, for AE.
                                (u19.13) */
-       int32_t awb_cnt; /**< Number of effective pixels
+       int32_t awb_cnt; /** Number of effective pixels
                                in a statistics window.
                                Pixels passed by the AWB level gate check are
                                judged as "effective". (u32) */
-       int32_t awb_gr;  /**< Sum of Gr in a statistics window, for AWB.
+       int32_t awb_gr;  /** Sum of Gr in a statistics window, for AWB.
                                All Gr pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t awb_r;   /**< Sum of R in a statistics window, for AWB.
+       int32_t awb_r;   /** Sum of R in a statistics window, for AWB.
                                All R pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t awb_b;   /**< Sum of B in a statistics window, for AWB.
+       int32_t awb_b;   /** Sum of B in a statistics window, for AWB.
                                All B pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t awb_gb;  /**< Sum of Gb in a statistics window, for AWB.
+       int32_t awb_gb;  /** Sum of Gb in a statistics window, for AWB.
                                All Gb pixels (not only for effective pixels)
                                are summed. (u19.13) */
-       int32_t af_hpf1; /**< Sum of |Y| following high pass filter af_fir1
+       int32_t af_hpf1; /** Sum of |Y| following high pass filter af_fir1
                                within a statistics window, for AF. (u19.13) */
-       int32_t af_hpf2; /**< Sum of |Y| following high pass filter af_fir2
+       int32_t af_hpf2; /** Sum of |Y| following high pass filter af_fir2
                                within a statistics window, for AF. (u19.13) */
 };
 
 
-/** 3A Statistics. This structure describes the statistics that are generated
+/* 3A Statistics. This structure describes the statistics that are generated
  *  using the provided configuration (ia_css_3a_config).
  */
 struct ia_css_3a_statistics {
-       struct ia_css_3a_grid_info    grid;     /**< grid info contains the dimensions of the 3A grid */
-       struct ia_css_3a_output      *data;     /**< the pointer to 3a_output[grid.width * grid.height]
+       struct ia_css_3a_grid_info    grid;     /** grid info contains the dimensions of the 3A grid */
+       struct ia_css_3a_output      *data;     /** the pointer to 3a_output[grid.width * grid.height]
                                                     containing the 3A statistics */
-       struct ia_css_3a_rgby_output *rgby_data;/**< the pointer to 3a_rgby_output[256]
+       struct ia_css_3a_rgby_output *rgby_data;/** the pointer to 3a_rgby_output[256]
                                                     containing the histogram */
 };
 
-/** Histogram (Statistics for AE).
+/* Histogram (Statistics for AE).
  *
  *  4 histograms(r,g,b,y),
  *  256 bins for each histogram, unsigned 24bit value for each bin.
@@ -256,10 +256,10 @@ struct ia_css_3a_statistics {
  *  ISP2: HIST2 is used.
  */
 struct ia_css_3a_rgby_output {
-       uint32_t r;     /**< Number of R of one bin of the histogram R. (u24) */
-       uint32_t g;     /**< Number of G of one bin of the histogram G. (u24) */
-       uint32_t b;     /**< Number of B of one bin of the histogram B. (u24) */
-       uint32_t y;     /**< Number of Y of one bin of the histogram Y. (u24) */
+       uint32_t r;     /** Number of R of one bin of the histogram R. (u24) */
+       uint32_t g;     /** Number of G of one bin of the histogram G. (u24) */
+       uint32_t b;     /** Number of B of one bin of the histogram B. (u24) */
+       uint32_t y;     /** Number of Y of one bin of the histogram Y. (u24) */
 };
 
 #endif /* __IA_CSS_S3A_TYPES_H */
index 8b2b56b0310b6f27de2f1d748acf9b7fda1b96cf..9aa019539f479c7d55457ea29878d7aa74da4db8 100644 (file)
@@ -22,7 +22,7 @@
 
 #define NUM_S3A_LS 1
 
-/** s3a statistics store */
+/* s3a statistics store */
 #ifdef ISP2401
 struct ia_css_s3a_stat_ls_configuration {
        uint32_t s3a_grid_size_log2;
index 44e3c43a5d4a6d503b2c5dcbaf05372e27d8e85b..b35ac3e4009be38ecb15ed6d2c824434d730d1c9 100644 (file)
@@ -32,7 +32,7 @@ ia_css_sc_dump(
        unsigned level);
 
 #ifdef ISP2401
-/** @brief Configure the shading correction.
+/* @brief Configure the shading correction.
  * @param[out] to      Parameters used in the shading correction kernel in the isp.
  * @param[in]  from    Parameters passed from the host.
  * @param[in]  size    Size of the sh_css_isp_sc_isp_config structure.
@@ -45,7 +45,7 @@ ia_css_sc_config(
        const struct ia_css_sc_configuration *from,
        unsigned size);
 
-/** @brief Configure the shading correction.
+/* @brief Configure the shading correction.
  * @param[in]  binary  The binary, which has the shading correction.
  * @param[in]  internal_frame_origin_x_bqs_on_sctbl
  *                     X coordinate (in bqs) of the origin of the internal frame on the shading table.
index 5a833bc48af1f58509786b79e9af6cba34c39a1d..30ce499ac8cfc1d3264eb2c0d68007b2c6e38bab 100644 (file)
 #ifndef __IA_CSS_SC_TYPES_H
 #define __IA_CSS_SC_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Lens Shading Correction (SC) parameters.
 */
 
 
-/** Number of color planes in the shading table. */
+/* Number of color planes in the shading table. */
 #define IA_CSS_SC_NUM_COLORS           4
 
-/** The 4 colors that a shading table consists of.
+/* The 4 colors that a shading table consists of.
  *  For each color we store a grid of values.
  */
 enum ia_css_sc_color {
-       IA_CSS_SC_COLOR_GR, /**< Green on a green-red line */
-       IA_CSS_SC_COLOR_R,  /**< Red */
-       IA_CSS_SC_COLOR_B,  /**< Blue */
-       IA_CSS_SC_COLOR_GB  /**< Green on a green-blue line */
+       IA_CSS_SC_COLOR_GR, /** Green on a green-red line */
+       IA_CSS_SC_COLOR_R,  /** Red */
+       IA_CSS_SC_COLOR_B,  /** Blue */
+       IA_CSS_SC_COLOR_GB  /** Green on a green-blue line */
 };
 
-/** Lens Shading Correction table.
+/* Lens Shading Correction table.
  *
  *  This describes the color shading artefacts
  *  introduced by lens imperfections. To correct artefacts,
@@ -64,39 +64,39 @@ enum ia_css_sc_color {
  *  ISP2: SC1 is used.
  */
 struct ia_css_shading_table {
-       uint32_t enable; /**< Set to false for no shading correction.
+       uint32_t enable; /** Set to false for no shading correction.
                          The data field can be NULL when enable == true */
 /* ------ deprecated(bz675) : from ------ */
-       uint32_t sensor_width;  /**< Native sensor width in pixels. */
-       uint32_t sensor_height; /**< Native sensor height in lines.
+       uint32_t sensor_width;  /** Native sensor width in pixels. */
+       uint32_t sensor_height; /** Native sensor height in lines.
                When shading_settings.enable_shading_table_conversion is set
                as 0, sensor_width and sensor_height are NOT used.
                These are used only in the legacy shading table conversion
                in the css, when shading_settings.
                enable_shading_table_conversion is set as 1. */
 /* ------ deprecated(bz675) : to ------ */
-       uint32_t width;  /**< Number of data points per line per color.
+       uint32_t width;  /** Number of data points per line per color.
                                u8.0, [0,81] */
-       uint32_t height; /**< Number of lines of data points per color.
+       uint32_t height; /** Number of lines of data points per color.
                                u8.0, [0,61] */
-       uint32_t fraction_bits; /**< Bits of fractional part in the data
+       uint32_t fraction_bits; /** Bits of fractional part in the data
                                points.
                                u8.0, [0,13] */
        uint16_t *data[IA_CSS_SC_NUM_COLORS];
-       /**< Table data, one array for each color.
+       /** Table data, one array for each color.
             Use ia_css_sc_color to index this array.
             u[13-fraction_bits].[fraction_bits], [0,8191] */
 };
 
 /* ------ deprecated(bz675) : from ------ */
-/** Shading Correction settings.
+/* Shading Correction settings.
  *
  *  NOTE:
  *  This structure should be removed when the shading table conversion is
  *  removed from the css.
  */
 struct ia_css_shading_settings {
-       uint32_t enable_shading_table_conversion; /**< Set to 0,
+       uint32_t enable_shading_table_conversion; /** Set to 0,
                if the conversion of the shading table should be disabled
                in the css. (default 1)
                  0: The shading table is directly sent to the isp.
@@ -119,14 +119,14 @@ struct ia_css_shading_settings {
 
 #ifdef ISP2401
 
-/** Shading Correction configuration.
+/* Shading Correction configuration.
  *
  *  NOTE: The shading table size is larger than or equal to the internal frame size.
  */
 struct ia_css_sc_configuration {
-       uint32_t internal_frame_origin_x_bqs_on_sctbl; /**< Origin X (in bqs) of internal frame on shading table. */
-       uint32_t internal_frame_origin_y_bqs_on_sctbl; /**< Origin Y (in bqs) of internal frame on shading table. */
-                                               /**< NOTE: bqs = size in BQ(Bayer Quad) unit.
+       uint32_t internal_frame_origin_x_bqs_on_sctbl; /** Origin X (in bqs) of internal frame on shading table. */
+       uint32_t internal_frame_origin_y_bqs_on_sctbl; /** Origin Y (in bqs) of internal frame on shading table. */
+                                               /** NOTE: bqs = size in BQ(Bayer Quad) unit.
                                                        1BQ means {Gr,R,B,Gb}(2x2 pixels).
                                                        Horizontal 1 bqs corresponds to horizontal 2 pixels.
                                                        Vertical 1 bqs corresponds to vertical 2 pixels. */
index 295dc60b778c1016c3b556ddbaa48c4f7f0c4237..031983c357e4732865fe1f07a2012eba116682a4 100644 (file)
 #ifndef __IA_CSS_SDIS_COMMON_TYPES_H
 #define __IA_CSS_SDIS_COMMON_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for DVS statistics parameters.
 */
 
 #include <type_support.h>
 
-/** DVS statistics grid dimensions in number of cells.
+/* DVS statistics grid dimensions in number of cells.
  */
 
 struct ia_css_dvs_grid_dim {
-       uint32_t width;         /**< Width of DVS grid table in cells */
-       uint32_t height;        /**< Height of DVS grid table in cells */
+       uint32_t width;         /** Width of DVS grid table in cells */
+       uint32_t height;        /** Height of DVS grid table in cells */
 };
 
-/** DVS statistics dimensions in number of cells for
+/* DVS statistics dimensions in number of cells for
  * grid, coeffieicient and projection.
  */
 
@@ -55,7 +55,7 @@ struct ia_css_sdis_info {
                0,      /* dis_deci_factor_log2 */ \
        }
 
-/** DVS statistics grid
+/* DVS statistics grid
  *
  *  ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes))
  *             SDVS2 (DVS Support for DVS ver.2 (6-axes))
@@ -63,23 +63,23 @@ struct ia_css_sdis_info {
  *  ISP2: SDVS2 is used.
  */
 struct ia_css_dvs_grid_res {
-       uint32_t width;         /**< Width of DVS grid table.
+       uint32_t width;         /** Width of DVS grid table.
                                        (= Horizontal number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, this is equal to
                                         the number of vertical statistics. */
-       uint32_t aligned_width; /**< Stride of each grid line.
+       uint32_t aligned_width; /** Stride of each grid line.
                                        (= Horizontal number of grid cells
                                        in table, which means
                                        the allocated width.) */
-       uint32_t height;        /**< Height of DVS grid table.
+       uint32_t height;        /** Height of DVS grid table.
                                        (= Vertical number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, This is equal to
                                        the number of horizontal statistics. */
-       uint32_t aligned_height;/**< Stride of each grid column.
+       uint32_t aligned_height;/** Stride of each grid column.
                                        (= Vertical number of grid cells
                                        in table, which means
                                        the allocated height.) */
@@ -89,125 +89,125 @@ struct ia_css_dvs_grid_res {
  * However, that implies driver I/F changes
  */
 struct ia_css_dvs_grid_info {
-       uint32_t enable;        /**< DVS statistics enabled.
+       uint32_t enable;        /** DVS statistics enabled.
                                        0:disabled, 1:enabled */
-       uint32_t width;         /**< Width of DVS grid table.
+       uint32_t width;         /** Width of DVS grid table.
                                        (= Horizontal number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, this is equal to
                                         the number of vertical statistics. */
-       uint32_t aligned_width; /**< Stride of each grid line.
+       uint32_t aligned_width; /** Stride of each grid line.
                                        (= Horizontal number of grid cells
                                        in table, which means
                                        the allocated width.) */
-       uint32_t height;        /**< Height of DVS grid table.
+       uint32_t height;        /** Height of DVS grid table.
                                        (= Vertical number of grid cells
                                        in table, which cells have effective
                                        statistics.)
                                        For DVS1, This is equal to
                                        the number of horizontal statistics. */
-       uint32_t aligned_height;/**< Stride of each grid column.
+       uint32_t aligned_height;/** Stride of each grid column.
                                        (= Vertical number of grid cells
                                        in table, which means
                                        the allocated height.) */
-       uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+       uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
                                        (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
                                        For DVS1, valid value is 64.
                                        For DVS2, valid value is only 64,
                                        currently. */
-       uint32_t num_hor_coefs; /**< Number of horizontal coefficients. */
-       uint32_t num_ver_coefs; /**< Number of vertical coefficients. */
+       uint32_t num_hor_coefs; /** Number of horizontal coefficients. */
+       uint32_t num_ver_coefs; /** Number of vertical coefficients. */
 };
 
-/** Number of DVS statistics levels
+/* Number of DVS statistics levels
  */
 #define IA_CSS_DVS_STAT_NUM_OF_LEVELS  3
 
-/** DVS statistics generated by accelerator global configuration
+/* DVS statistics generated by accelerator global configuration
  */
 struct dvs_stat_public_dvs_global_cfg {
        unsigned char kappa;
-       /**< DVS statistics global configuration - kappa */
+       /** DVS statistics global configuration - kappa */
        unsigned char match_shift;
-       /**< DVS statistics global configuration - match_shift */
+       /** DVS statistics global configuration - match_shift */
        unsigned char ybin_mode;
-       /**< DVS statistics global configuration - y binning mode */
+       /** DVS statistics global configuration - y binning mode */
 };
 
-/** DVS statistics generated by accelerator level grid
+/* DVS statistics generated by accelerator level grid
  *  configuration
  */
 struct dvs_stat_public_dvs_level_grid_cfg {
        unsigned char grid_width;
-       /**< DVS statistics grid width */
+       /** DVS statistics grid width */
        unsigned char grid_height;
-       /**< DVS statistics grid height */
+       /** DVS statistics grid height */
        unsigned char block_width;
-       /**< DVS statistics block width */
+       /** DVS statistics block width */
        unsigned char block_height;
-       /**< DVS statistics block  height */
+       /** DVS statistics block  height */
 };
 
-/** DVS statistics generated by accelerator level grid start
+/* DVS statistics generated by accelerator level grid start
  *  configuration
  */
 struct dvs_stat_public_dvs_level_grid_start {
        unsigned short x_start;
-       /**< DVS statistics level x start */
+       /** DVS statistics level x start */
        unsigned short y_start;
-       /**< DVS statistics level y start */
+       /** DVS statistics level y start */
        unsigned char enable;
-       /**< DVS statistics level enable */
+       /** DVS statistics level enable */
 };
 
-/** DVS statistics generated by accelerator level grid end
+/* DVS statistics generated by accelerator level grid end
  *  configuration
  */
 struct dvs_stat_public_dvs_level_grid_end {
        unsigned short x_end;
-       /**< DVS statistics level x end */
+       /** DVS statistics level x end */
        unsigned short y_end;
-       /**< DVS statistics level y end */
+       /** DVS statistics level y end */
 };
 
-/** DVS statistics generated by accelerator Feature Extraction
+/* DVS statistics generated by accelerator Feature Extraction
  *  Region Of Interest (FE-ROI) configuration
  */
 struct dvs_stat_public_dvs_level_fe_roi_cfg {
        unsigned char x_start;
-       /**< DVS statistics fe-roi level x start */
+       /** DVS statistics fe-roi level x start */
        unsigned char y_start;
-       /**< DVS statistics fe-roi level y start */
+       /** DVS statistics fe-roi level y start */
        unsigned char x_end;
-       /**< DVS statistics fe-roi level x end */
+       /** DVS statistics fe-roi level x end */
        unsigned char y_end;
-       /**< DVS statistics fe-roi level y end */
+       /** DVS statistics fe-roi level y end */
 };
 
-/** DVS statistics generated by accelerator public configuration
+/* DVS statistics generated by accelerator public configuration
  */
 struct dvs_stat_public_dvs_grd_cfg {
        struct dvs_stat_public_dvs_level_grid_cfg    grd_cfg;
-       /**< DVS statistics level grid configuration */
+       /** DVS statistics level grid configuration */
        struct dvs_stat_public_dvs_level_grid_start  grd_start;
-       /**< DVS statistics level grid start configuration */
+       /** DVS statistics level grid start configuration */
        struct dvs_stat_public_dvs_level_grid_end    grd_end;
-       /**< DVS statistics level grid end configuration */
+       /** DVS statistics level grid end configuration */
 };
 
-/** DVS statistics grid generated by accelerator
+/* DVS statistics grid generated by accelerator
  */
 struct ia_css_dvs_stat_grid_info {
        struct dvs_stat_public_dvs_global_cfg       dvs_gbl_cfg;
-       /**< DVS statistics global configuration (kappa, match, binning) */
+       /** DVS statistics global configuration (kappa, match, binning) */
        struct dvs_stat_public_dvs_grd_cfg       grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
-       /**< DVS statistics grid configuration (blocks and grids) */
+       /** DVS statistics grid configuration (blocks and grids) */
        struct dvs_stat_public_dvs_level_fe_roi_cfg fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
-       /**< DVS statistics FE ROI (region of interest) configuration */
+       /** DVS statistics FE ROI (region of interest) configuration */
 };
 
-/** DVS statistics generated by accelerator default grid info
+/* DVS statistics generated by accelerator default grid info
  */
 #define DEFAULT_DVS_GRID_INFO { \
 { \
@@ -219,14 +219,14 @@ struct ia_css_dvs_stat_grid_info {
 }
 
 
-/** Union that holds all types of DVS statistics grid info in
+/* Union that holds all types of DVS statistics grid info in
  *  CSS format
  * */
 union ia_css_dvs_grid_u {
        struct ia_css_dvs_stat_grid_info dvs_stat_grid_info;
-       /**< DVS statistics produced by accelerator grid info */
+       /** DVS statistics produced by accelerator grid info */
        struct ia_css_dvs_grid_info dvs_grid_info;
-       /**< DVS (DVS1/DVS2) grid info */
+       /** DVS (DVS1/DVS2) grid info */
 };
 
 #endif /* __IA_CSS_SDIS_COMMON_TYPES_H */
index d408b58a027d957c06624e595d6ec4bde543d848..d2ee57008fb6ad87c07eea76112b8608f909706a 100644 (file)
 #ifndef __IA_CSS_SDIS_TYPES_H
 #define __IA_CSS_SDIS_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for DVS statistics parameters.
 */
 
-/** Number of DVS coefficient types */
+/* Number of DVS coefficient types */
 #define IA_CSS_DVS_NUM_COEF_TYPES      6
 
 #ifndef PIPE_GENERATION
 #include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
 #endif
 
-/** DVS 1.0 Coefficients.
+/* DVS 1.0 Coefficients.
  *  This structure describes the coefficients that are needed for the dvs statistics.
  */
 
 struct ia_css_dvs_coefficients {
-       struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
-       int16_t *hor_coefs;     /**< the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+       struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
+       int16_t *hor_coefs;     /** the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the horizontal coefficients */
-       int16_t *ver_coefs;     /**< the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+       int16_t *ver_coefs;     /** the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the vertical coefficients */
 };
 
-/** DVS 1.0 Statistics.
+/* DVS 1.0 Statistics.
  *  This structure describes the statistics that are generated using the provided coefficients.
  */
 
 struct ia_css_dvs_statistics {
-       struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
-       int32_t *hor_proj;      /**< the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
+       struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
+       int32_t *hor_proj;      /** the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the horizontal projections */
-       int32_t *ver_proj;      /**< the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
+       int32_t *ver_proj;      /** the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
                                     containing the vertical projections */
 };
 
index 7db7dd10fe0081bc3ce4325f55f3a2e72d40e383..2a0bc403174667b1f3e55ba72620cdad47258639 100644 (file)
 #ifndef __IA_CSS_SDIS2_TYPES_H
 #define __IA_CSS_SDIS2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for DVS statistics parameters.
 */
 
-/** Number of DVS coefficient types */
+/* Number of DVS coefficient types */
 #define IA_CSS_DVS2_NUM_COEF_TYPES     4
 
 #ifndef PIPE_GENERATION
 #include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
 #endif
 
-/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
  *  arrays that contain the coeffients for each type.
  */
 struct ia_css_dvs2_coef_types {
-       int16_t *odd_real; /**< real part of the odd coefficients*/
-       int16_t *odd_imag; /**< imaginary part of the odd coefficients*/
-       int16_t *even_real;/**< real part of the even coefficients*/
-       int16_t *even_imag;/**< imaginary part of the even coefficients*/
+       int16_t *odd_real; /** real part of the odd coefficients*/
+       int16_t *odd_imag; /** imaginary part of the odd coefficients*/
+       int16_t *even_real;/** real part of the even coefficients*/
+       int16_t *even_imag;/** imaginary part of the even coefficients*/
 };
 
-/** DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
+/* DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
  *  e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real 
  *  coefficients.
  */
 struct ia_css_dvs2_coefficients {
-       struct ia_css_dvs_grid_info grid;        /**< grid info contains the dimensions of the dvs grid */
-       struct ia_css_dvs2_coef_types hor_coefs; /**< struct with pointers that contain the horizontal coefficients */
-       struct ia_css_dvs2_coef_types ver_coefs; /**< struct with pointers that contain the vertical coefficients */
+       struct ia_css_dvs_grid_info grid;        /** grid info contains the dimensions of the dvs grid */
+       struct ia_css_dvs2_coef_types hor_coefs; /** struct with pointers that contain the horizontal coefficients */
+       struct ia_css_dvs2_coef_types ver_coefs; /** struct with pointers that contain the vertical coefficients */
 };
 
-/** DVS 2.0 Statistic types. This structure contains 4 pointers to
+/* DVS 2.0 Statistic types. This structure contains 4 pointers to
  *  arrays that contain the statistics for each type.
  */
 struct ia_css_dvs2_stat_types {
-       int32_t *odd_real; /**< real part of the odd statistics*/
-       int32_t *odd_imag; /**< imaginary part of the odd statistics*/
-       int32_t *even_real;/**< real part of the even statistics*/
-       int32_t *even_imag;/**< imaginary part of the even statistics*/
+       int32_t *odd_real; /** real part of the odd statistics*/
+       int32_t *odd_imag; /** imaginary part of the odd statistics*/
+       int32_t *even_real;/** real part of the even statistics*/
+       int32_t *even_imag;/** imaginary part of the even statistics*/
 };
 
-/** DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
+/* DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
  *  e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing 
  *  the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1]
  */
 struct ia_css_dvs2_statistics {
-       struct ia_css_dvs_grid_info grid;       /**< grid info contains the dimensions of the dvs grid */
-       struct ia_css_dvs2_stat_types hor_prod; /**< struct with pointers that contain the horizontal statistics */
-       struct ia_css_dvs2_stat_types ver_prod; /**< struct with pointers that contain the vertical statistics */
+       struct ia_css_dvs_grid_info grid;       /** grid info contains the dimensions of the dvs grid */
+       struct ia_css_dvs2_stat_types hor_prod; /** struct with pointers that contain the horizontal statistics */
+       struct ia_css_dvs2_stat_types ver_prod; /** struct with pointers that contain the vertical statistics */
 };
 
 #endif /* __IA_CSS_SDIS2_TYPES_H */
index cc47a50e5ad55b620ef67b415356fe13c373db06..91ea8dd4651d6f9b25f4dd81104987450e42e33e 100644 (file)
 #ifndef __IA_CSS_TDF_TYPES_H
 #define __IA_CSS_TDF_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Transform Domain Filter parameters.
 */
 
 #include "type_support.h"
 
-/** Transform Domain Filter configuration
+/* Transform Domain Filter configuration
  *
  * \brief TDF public parameters.
  * \details Struct with all parameters for the TDF kernel that can be set
  * ISP2.6.1: TDF is used.
  */
 struct ia_css_tdf_config {
-       int32_t thres_flat_table[64];   /**< Final optimized strength table of NR for flat region. */
-       int32_t thres_detail_table[64]; /**< Final optimized strength table of NR for detail region. */
-       int32_t epsilon_0;              /**< Coefficient to control variance for dark area (for flat region). */
-       int32_t epsilon_1;              /**< Coefficient to control variance for bright area (for flat region). */
-       int32_t eps_scale_text;         /**< Epsilon scaling coefficient for texture region. */
-       int32_t eps_scale_edge;         /**< Epsilon scaling coefficient for edge region. */
-       int32_t sepa_flat;              /**< Threshold to judge flat (edge < m_Flat_thre). */
-       int32_t sepa_edge;              /**< Threshold to judge edge (edge > m_Edge_thre). */
-       int32_t blend_flat;             /**< Blending ratio at flat region. */
-       int32_t blend_text;             /**< Blending ratio at texture region. */
-       int32_t blend_edge;             /**< Blending ratio at edge region. */
-       int32_t shading_gain;           /**< Gain of Shading control. */
-       int32_t shading_base_gain;      /**< Base Gain of Shading control. */
-       int32_t local_y_gain;           /**< Gain of local luminance control. */
-       int32_t local_y_base_gain;      /**< Base gain of local luminance control. */
-       int32_t rad_x_origin;           /**< Initial x coord. for radius computation. */
-       int32_t rad_y_origin;           /**< Initial y coord. for radius computation. */
+       int32_t thres_flat_table[64];   /** Final optimized strength table of NR for flat region. */
+       int32_t thres_detail_table[64]; /** Final optimized strength table of NR for detail region. */
+       int32_t epsilon_0;              /** Coefficient to control variance for dark area (for flat region). */
+       int32_t epsilon_1;              /** Coefficient to control variance for bright area (for flat region). */
+       int32_t eps_scale_text;         /** Epsilon scaling coefficient for texture region. */
+       int32_t eps_scale_edge;         /** Epsilon scaling coefficient for edge region. */
+       int32_t sepa_flat;              /** Threshold to judge flat (edge < m_Flat_thre). */
+       int32_t sepa_edge;              /** Threshold to judge edge (edge > m_Edge_thre). */
+       int32_t blend_flat;             /** Blending ratio at flat region. */
+       int32_t blend_text;             /** Blending ratio at texture region. */
+       int32_t blend_edge;             /** Blending ratio at edge region. */
+       int32_t shading_gain;           /** Gain of Shading control. */
+       int32_t shading_base_gain;      /** Base Gain of Shading control. */
+       int32_t local_y_gain;           /** Gain of local luminance control. */
+       int32_t local_y_base_gain;      /** Base gain of local luminance control. */
+       int32_t rad_x_origin;           /** Initial x coord. for radius computation. */
+       int32_t rad_y_origin;           /** Initial y coord. for radius computation. */
 };
 
 #endif /* __IA_CSS_TDF_TYPES_H */
index 135563f52174512e0aed99050731aff54496edd5..223423f8c40b2dd247401b3c129519449c614d84 100644 (file)
@@ -16,7 +16,7 @@ more details.
 #ifndef _IA_CSS_TNR3_TYPES_H
 #define _IA_CSS_TNR3_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel
 */
 
@@ -27,7 +27,7 @@ more details.
  */
 #define TNR3_NUM_SEGMENTS    3
 
-/** Temporal Noise Reduction v3 (TNR3) configuration.
+/* Temporal Noise Reduction v3 (TNR3) configuration.
  * The parameter to this kernel is fourfold
  * 1. Three piecewise linear graphs (one for each plane) with three segments
  * each. Each line graph has Luma values on the x axis and sigma values for
@@ -44,17 +44,17 @@ more details.
  * 4. Selection of the reference frame buffer to be used for noise reduction.
  */
 struct ia_css_tnr3_kernel_config {
-       unsigned int maxfb_y;                        /**< Maximum Feedback Gain for Y */
-       unsigned int maxfb_u;                        /**< Maximum Feedback Gain for U */
-       unsigned int maxfb_v;                        /**< Maximum Feedback Gain for V */
-       unsigned int round_adj_y;                    /**< Rounding Adjust for Y */
-       unsigned int round_adj_u;                    /**< Rounding Adjust for U */
-       unsigned int round_adj_v;                    /**< Rounding Adjust for V */
-       unsigned int knee_y[TNR3_NUM_SEGMENTS - 1];  /**< Knee points */
-       unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for Y at points Y0, Y1, Y2, Y3 */
-       unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for U at points U0, U1, U2, U3 */
-       unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for V at points V0, V1, V2, V3 */
-       unsigned int ref_buf_select;                 /**< Selection of the reference buffer */
+       unsigned int maxfb_y;                        /** Maximum Feedback Gain for Y */
+       unsigned int maxfb_u;                        /** Maximum Feedback Gain for U */
+       unsigned int maxfb_v;                        /** Maximum Feedback Gain for V */
+       unsigned int round_adj_y;                    /** Rounding Adjust for Y */
+       unsigned int round_adj_u;                    /** Rounding Adjust for U */
+       unsigned int round_adj_v;                    /** Rounding Adjust for V */
+       unsigned int knee_y[TNR3_NUM_SEGMENTS - 1];  /** Knee points */
+       unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for Y at points Y0, Y1, Y2, Y3 */
+       unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for U at points U0, U1, U2, U3 */
+       unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for V at points V0, V1, V2, V3 */
+       unsigned int ref_buf_select;                 /** Selection of the reference buffer */
 };
 
 #endif
index 4fd35e6ccd704ceb4a6aac9467d30b83cd70c9b2..9bbc9ab2e6c0d1a5ff5b176d07c5a6945bb580df 100644 (file)
 #ifndef __IA_CSS_TNR_TYPES_H
 #define __IA_CSS_TNR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Temporal Noise Reduction (TNR) parameters.
 */
 
-/** Temporal Noise Reduction (TNR) configuration.
+/* Temporal Noise Reduction (TNR) configuration.
  *
  *  When difference between current frame and previous frame is less than or
  *  equal to threshold, TNR works and current frame is mixed
 
 
 struct ia_css_tnr_config {
-       ia_css_u0_16 gain; /**< Interpolation ratio of current frame
+       ia_css_u0_16 gain; /** Interpolation ratio of current frame
                                and previous frame.
                                gain=0.0 -> previous frame is outputted.
                                gain=1.0 -> current frame is outputted.
                                u0.16, [0,65535],
                        default 32768(0.5), ineffective 65535(almost 1.0) */
-       ia_css_u0_16 threshold_y; /**< Threshold to enable interpolation of Y.
+       ia_css_u0_16 threshold_y; /** Threshold to enable interpolation of Y.
                                If difference between current frame and
                                previous frame is greater than threshold_y,
                                TNR for Y is disabled.
                                u0.16, [0,65535], default/ineffective 0 */
-       ia_css_u0_16 threshold_uv; /**< Threshold to enable interpolation of
+       ia_css_u0_16 threshold_uv; /** Threshold to enable interpolation of
                                U/V.
                                If difference between current frame and
                                previous frame is greater than threshold_uv,
index df5d37c8c946401c01347711add4ef153034d41b..9df4e12f6c2c7f1379caed574e4a55d7becb1164 100644 (file)
@@ -23,9 +23,9 @@
 
 #define VFDEC_BITS_PER_PIXEL   GAMMA_OUTPUT_BITS
 
-/** Viewfinder decimation */
+/* Viewfinder decimation */
 struct sh_css_isp_vf_isp_config {
-       uint32_t vf_downscale_bits; /**< Log VF downscale value */
+       uint32_t vf_downscale_bits; /** Log VF downscale value */
        uint32_t enable;
        struct ia_css_frame_sp_info info;
        struct {
index d8cfdfbc8c0bd1af7f938f88985516d8367ab29a..e3efafa279ffc3c5a62bf9054a909061e2d6935a 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_VF_TYPES_H
 #define __IA_CSS_VF_TYPES_H
 
-/** Viewfinder decimation
+/* Viewfinder decimation
  *
  *  ISP block: vfeven_horizontal_downscale
  */
@@ -24,7 +24,7 @@
 #include <type_support.h>
 
 struct ia_css_vf_configuration {
-       uint32_t vf_downscale_bits; /**< Log VF downscale value */
+       uint32_t vf_downscale_bits; /** Log VF downscale value */
        const struct ia_css_frame_info *info;
 };
 
index 6bcfa274be884c3aa95cf4efb6e59a18580ff12d..bf98734d057e5e0e7280ad8882647e78a4a054a9 100644 (file)
 #ifndef __IA_CSS_WB_TYPES_H
 #define __IA_CSS_WB_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for White Balance parameters.
 */
 
 
-/** White Balance configuration (Gain Adjust).
+/* White Balance configuration (Gain Adjust).
  *
  *  ISP block: WB1
  *  ISP1: WB1 is used.
  *  ISP2: WB1 is used.
  */
 struct ia_css_wb_config {
-       uint32_t integer_bits; /**< Common exponent of gains.
+       uint32_t integer_bits; /** Common exponent of gains.
                                u8.0, [0,3],
                                default 1, ineffective 1 */
-       uint32_t gr;    /**< Significand of Gr gain.
+       uint32_t gr;    /** Significand of Gr gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
-       uint32_t r;     /**< Significand of R gain.
+       uint32_t r;     /** Significand of R gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
-       uint32_t b;     /**< Significand of B gain.
+       uint32_t b;     /** Significand of B gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
-       uint32_t gb;    /**< Significand of Gb gain.
+       uint32_t gb;    /** Significand of Gb gain.
                                u[integer_bits].[16-integer_bits], [0,65535],
                                default/ineffective 32768(u1.15, 1.0) */
 };
index 3018100f6f767b15d14a1f539b4be65ae6a1fded..abcb531f51ccdf6c1073cf09862e90685bd6640b 100644 (file)
@@ -21,7 +21,7 @@
 #include "ia_css_xnr.host.h"
 
 const struct ia_css_xnr_config default_xnr_config = {
-       /** default threshold 6400 translates to 25 on ISP. */
+       /* default threshold 6400 translates to 25 on ISP. */
        6400
 };
 
index 806c9f8f0e2eb68fd638a7bbb842f63ec0d5fd0a..a5caebbe2f8481825f6aed6e743df9ac9af0002f 100644 (file)
@@ -41,7 +41,7 @@ struct sh_css_isp_xnr_vamem_params {
 };
 
 struct sh_css_isp_xnr_params {
-       /** XNR threshold.
+       /* XNR threshold.
         * type:u0.16 but actual valid range is:[0,255]
         * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits)
         * default: 25 */
index 89e8b0f17e8cf90184c458ac0b08625ff767fd9e..d2b634211a3fadcefb1fd7f4758a6dec51208479 100644 (file)
 #ifndef __IA_CSS_XNR_TYPES_H
 #define __IA_CSS_XNR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Extra Noise Reduction (XNR) parameters.
 */
 
-/** XNR table.
+/* XNR table.
  *
  *  NOTE: The driver does not need to set this table,
  *        because the default values are set inside the css.
  *
  */
 
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2      6
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_1_XNR_TABLE_SIZE           (1U<<IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2)
 
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2      6
-/** Number of elements in the xnr table. */
+/* Number of elements in the xnr table. */
 #define IA_CSS_VAMEM_2_XNR_TABLE_SIZE          (1U<<IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2)
 
-/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
      IA_CSS_VAMEM_TYPE_2(ISP2400) */
 union ia_css_xnr_data {
        uint16_t vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE];
-       /**< Coefficients table on vamem type1. u0.12, [0,4095] */
+       /** Coefficients table on vamem type1. u0.12, [0,4095] */
        uint16_t vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE];
-       /**< Coefficients table on vamem type2. u0.12, [0,4095] */
+       /** Coefficients table on vamem type2. u0.12, [0,4095] */
 };
 
 struct ia_css_xnr_table {
@@ -61,7 +61,7 @@ struct ia_css_xnr_table {
 };
 
 struct ia_css_xnr_config {
-       /** XNR threshold.
+       /* XNR threshold.
         * type:u0.16 valid range:[0,65535]
         * default: 6400 */
        uint16_t threshold;
index 8f14d10806515c9bec9f10cb9b12da61e25740f4..669200caf72e6b765819086e68f818b5ea4ee1ca 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef __IA_CSS_XNR3_TYPES_H
 #define __IA_CSS_XNR3_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Extra Noise Reduction (XNR) parameters.
 */
 
  * IA_CSS_XNR3_SIGMA_SCALE.
  */
 struct ia_css_xnr3_sigma_params {
-       int y0;     /**< Sigma for Y range similarity in dark area */
-       int y1;     /**< Sigma for Y range similarity in bright area */
-       int u0;     /**< Sigma for U range similarity in dark area */
-       int u1;     /**< Sigma for U range similarity in bright area */
-       int v0;     /**< Sigma for V range similarity in dark area */
-       int v1;     /**< Sigma for V range similarity in bright area */
+       int y0;     /** Sigma for Y range similarity in dark area */
+       int y1;     /** Sigma for Y range similarity in bright area */
+       int u0;     /** Sigma for U range similarity in dark area */
+       int u1;     /** Sigma for U range similarity in bright area */
+       int v0;     /** Sigma for V range similarity in dark area */
+       int v1;     /** Sigma for V range similarity in bright area */
 };
 
 /**
@@ -64,10 +64,10 @@ struct ia_css_xnr3_sigma_params {
  * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0.
  */
 struct ia_css_xnr3_coring_params {
-       int u0;     /**< Coring threshold of U channel in dark area */
-       int u1;     /**< Coring threshold of U channel in bright area */
-       int v0;     /**< Coring threshold of V channel in dark area */
-       int v1;     /**< Coring threshold of V channel in bright area */
+       int u0;     /** Coring threshold of U channel in dark area */
+       int u1;     /** Coring threshold of U channel in bright area */
+       int v0;     /** Coring threshold of V channel in dark area */
+       int v1;     /** Coring threshold of V channel in bright area */
 };
 
 /**
@@ -81,7 +81,7 @@ struct ia_css_xnr3_coring_params {
  * value of 0.0 bypasses the entire xnr3 filter.
  */
 struct ia_css_xnr3_blending_params {
-       int strength;   /**< Blending strength */
+       int strength;   /** Blending strength */
 };
 
 /**
@@ -90,9 +90,9 @@ struct ia_css_xnr3_blending_params {
  * from the CSS API.
  */
 struct ia_css_xnr3_config {
-       struct ia_css_xnr3_sigma_params    sigma;    /**< XNR3 sigma parameters */
-       struct ia_css_xnr3_coring_params   coring;   /**< XNR3 coring parameters */
-       struct ia_css_xnr3_blending_params blending; /**< XNR3 blending parameters */
+       struct ia_css_xnr3_sigma_params    sigma;    /** XNR3 sigma parameters */
+       struct ia_css_xnr3_coring_params   coring;   /** XNR3 coring parameters */
+       struct ia_css_xnr3_blending_params blending; /** XNR3 blending parameters */
 };
 
 #endif /* __IA_CSS_XNR3_TYPES_H */
index 3f46655bee57fd9af3d52f3a8f6209d04a349bac..3f8589a5a43a8b3cc6add4b9a1b53de7f703d0ed 100644 (file)
 #ifndef __IA_CSS_YNR_TYPES_H
 #define __IA_CSS_YNR_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR).
 */
 
-/** Configuration used by Bayer Noise Reduction (BNR) and
+/* Configuration used by Bayer Noise Reduction (BNR) and
  *  YCC Noise Reduction (YNR,CNR).
  *
  *  ISP block: BNR1, YNR1, CNR1
  *        BNR1,YNR2,CNR2 are used for Still.
  */
 struct ia_css_nr_config {
-       ia_css_u0_16 bnr_gain;     /**< Strength of noise reduction (BNR).
+       ia_css_u0_16 bnr_gain;     /** Strength of noise reduction (BNR).
                                u0.16, [0,65535],
                                default 14336(0.21875), ineffective 0 */
-       ia_css_u0_16 ynr_gain;     /**< Strength of noise reduction (YNR).
+       ia_css_u0_16 ynr_gain;     /** Strength of noise reduction (YNR).
                                u0.16, [0,65535],
                                default 14336(0.21875), ineffective 0 */
-       ia_css_u0_16 direction;    /**< Sensitivity of edge (BNR).
+       ia_css_u0_16 direction;    /** Sensitivity of edge (BNR).
                                u0.16, [0,65535],
                                default 512(0.0078125), ineffective 0 */
-       ia_css_u0_16 threshold_cb; /**< Coring threshold for Cb (CNR).
+       ia_css_u0_16 threshold_cb; /** Coring threshold for Cb (CNR).
                                This is the same as
                                de_config.c1_coring_threshold.
                                u0.16, [0,65535],
                                default 0(0), ineffective 0 */
-       ia_css_u0_16 threshold_cr; /**< Coring threshold for Cr (CNR).
+       ia_css_u0_16 threshold_cr; /** Coring threshold for Cr (CNR).
                                This is the same as
                                de_config.c2_coring_threshold.
                                u0.16, [0,65535],
                                default 0(0), ineffective 0 */
 };
 
-/** Edge Enhancement (sharpen) configuration.
+/* Edge Enhancement (sharpen) configuration.
  *
  *  ISP block: YEE1
  *  ISP1: YEE1 is used.
@@ -57,24 +57,24 @@ struct ia_css_nr_config {
  *       (YEE2 is used for Still.)
  */
 struct ia_css_ee_config {
-       ia_css_u5_11 gain;        /**< The strength of sharpness.
+       ia_css_u5_11 gain;        /** The strength of sharpness.
                                        u5.11, [0,65535],
                                        default 8192(4.0), ineffective 0 */
-       ia_css_u8_8 threshold;    /**< The threshold that divides noises from
+       ia_css_u8_8 threshold;    /** The threshold that divides noises from
                                        edge.
                                        u8.8, [0,65535],
                                        default 256(1.0), ineffective 65535 */
-       ia_css_u5_11 detail_gain; /**< The strength of sharpness in pell-mell
+       ia_css_u5_11 detail_gain; /** The strength of sharpness in pell-mell
                                        area.
                                        u5.11, [0,65535],
                                        default 2048(1.0), ineffective 0 */
 };
 
-/** YNR and YEE (sharpen) configuration.
+/* YNR and YEE (sharpen) configuration.
  */
 struct ia_css_yee_config {
-       struct ia_css_nr_config nr; /**< The NR configuration. */
-       struct ia_css_ee_config ee; /**< The EE configuration. */
+       struct ia_css_nr_config nr; /** The NR configuration. */
+       struct ia_css_ee_config ee; /** The EE configuration. */
 };
 
 #endif /* __IA_CSS_YNR_TYPES_H */
index e0a0b10ac5fa540c0e569e3f4b57f3ca2047ec54..83161a24207d9b50e69df725781efd6d5b62fe34 100644 (file)
 #ifndef __IA_CSS_YNR2_TYPES_H
 #define __IA_CSS_YNR2_TYPES_H
 
-/** @file
+/* @file
 * CSS-API header file for Y(Luma) Noise Reduction.
 */
 
-/** Y(Luma) Noise Reduction configuration.
+/* Y(Luma) Noise Reduction configuration.
  *
  *  ISP block: YNR2 & YEE2
  * (ISP1: YNR1 and YEE1 are used.)
  *  ISP2: YNR2 and YEE2 are used for Still.
  */
 struct ia_css_ynr_config {
-       uint16_t edge_sense_gain_0;   /**< Sensitivity of edge in dark area.
+       uint16_t edge_sense_gain_0;   /** Sensitivity of edge in dark area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
-       uint16_t edge_sense_gain_1;   /**< Sensitivity of edge in bright area.
+       uint16_t edge_sense_gain_1;   /** Sensitivity of edge in bright area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
-       uint16_t corner_sense_gain_0; /**< Sensitivity of corner in dark area.
+       uint16_t corner_sense_gain_0; /** Sensitivity of corner in dark area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
-       uint16_t corner_sense_gain_1; /**< Sensitivity of corner in bright area.
+       uint16_t corner_sense_gain_1; /** Sensitivity of corner in bright area.
                                        u13.0, [0,8191],
                                        default 1000, ineffective 0 */
 };
 
-/** Fringe Control configuration.
+/* Fringe Control configuration.
  *
  *  ISP block: FC2 (FC2 is used with YNR2/YEE2.)
  * (ISP1: FC2 is not used.)
@@ -49,43 +49,43 @@ struct ia_css_ynr_config {
  *  ISP2: FC2 is used for Still.
  */
 struct ia_css_fc_config {
-       uint8_t  gain_exp;   /**< Common exponent of gains.
+       uint8_t  gain_exp;   /** Common exponent of gains.
                                u8.0, [0,13],
                                default 1, ineffective 0 */
-       uint16_t coring_pos_0; /**< Coring threshold for positive edge in dark area.
+       uint16_t coring_pos_0; /** Coring threshold for positive edge in dark area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t coring_pos_1; /**< Coring threshold for positive edge in bright area.
+       uint16_t coring_pos_1; /** Coring threshold for positive edge in bright area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t coring_neg_0; /**< Coring threshold for negative edge in dark area.
+       uint16_t coring_neg_0; /** Coring threshold for negative edge in dark area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t coring_neg_1; /**< Coring threshold for negative edge in bright area.
+       uint16_t coring_neg_1; /** Coring threshold for negative edge in bright area.
                                u0.13, [0,8191],
                                default 0(0), ineffective 0 */
-       uint16_t gain_pos_0; /**< Gain for positive edge in dark area.
+       uint16_t gain_pos_0; /** Gain for positive edge in dark area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t gain_pos_1; /**< Gain for positive edge in bright area.
+       uint16_t gain_pos_1; /** Gain for positive edge in bright area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t gain_neg_0; /**< Gain for negative edge in dark area.
+       uint16_t gain_neg_0; /** Gain for negative edge in dark area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t gain_neg_1; /**< Gain for negative edge in bright area.
+       uint16_t gain_neg_1; /** Gain for negative edge in bright area.
                                u0.13, [0,8191],
                                default 4096(0.5), ineffective 0 */
-       uint16_t crop_pos_0; /**< Limit for positive edge in dark area.
+       uint16_t crop_pos_0; /** Limit for positive edge in dark area.
                                u0.13, [0,8191],
                                default/ineffective 8191(almost 1.0) */
-       uint16_t crop_pos_1; /**< Limit for positive edge in bright area.
+       uint16_t crop_pos_1; /** Limit for positive edge in bright area.
                                u0.13, [0,8191],
                                default/ineffective 8191(almost 1.0) */
-       int16_t  crop_neg_0; /**< Limit for negative edge in dark area.
+       int16_t  crop_neg_0; /** Limit for negative edge in dark area.
                                s0.13, [-8192,0],
                                default/ineffective -8192(-1.0) */
-       int16_t  crop_neg_1; /**< Limit for negative edge in bright area.
+       int16_t  crop_neg_1; /** Limit for negative edge in bright area.
                                s0.13, [-8192,0],
                                default/ineffective -8192(-1.0) */
 };
index 63a8703c9c44bc0a4e1f8b8527bd0ac7bc8734e6..c9ff0cb2493aa3fe13faa148565082c5891b8b78 100644 (file)
@@ -24,7 +24,7 @@
  */
 #define NUM_YUV_LS 2
 
-/** YUV load/store */
+/* YUV load/store */
 struct sh_css_isp_yuv_ls_isp_config {
        unsigned base_address[NUM_YUV_LS];
        unsigned width[NUM_YUV_LS];
index e814f1bf19f792d3c1d848f59145d15964a4453d..6512a1ceb9d34d1cc3f5f6ddcee6ef093f9b1436 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index c65194619a34a832febcbb395a82b82c11980bf2..5a58abe2b2333fbed5a0573a34d396e38e41f3e1 100644 (file)
@@ -269,7 +269,7 @@ enum ia_css_err
 ia_css_binary_find(struct ia_css_binary_descr *descr,
                   struct ia_css_binary *binary);
 
-/** @brief Get the shading information of the specified shading correction type.
+/* @brief Get the shading information of the specified shading correction type.
  *
  * @param[in] binary: The isp binary which has the shading correction.
  * @param[in] type: The shading correction type.
index e028e460ae4c3c4d901967a4f2da7dd9d54b7676..295e07049393225696836e36b5af270d5596e2fb 100644 (file)
@@ -972,7 +972,7 @@ ia_css_binary_uninit(void)
        return IA_CSS_SUCCESS;
 }
 
-/** @brief Compute decimation factor for 3A statistics and shading correction.
+/* @brief Compute decimation factor for 3A statistics and shading correction.
  *
  * @param[in]  width   Frame width in pixels.
  * @param[in]  height  Frame height in pixels.
index 42d9a85088585092ab64b0b09aed0340f05acc7c..e50d9f2e2609e408dca2c90519c6806243c6aa8f 100644 (file)
@@ -152,7 +152,7 @@ void ia_css_queue_map(
                unmap_buffer_type_to_queue_id(thread_id, buf_type);
 }
 
-/**
+/*
  * @brief Query the internal queue ID.
  */
 bool ia_css_query_internal_queue_id(
index 3c8dcfd4bbc6bc70e300e491c15b8bb86cd7c095..4b28b2a0863a70528c321a1b11874e203d84786c 100644 (file)
@@ -54,21 +54,21 @@ extern unsigned int ia_css_debug_trace_level;
  *  Values can be combined to dump a combination of sets.
  */
 enum ia_css_debug_enable_param_dump {
-       IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /**< FPN table */
-       IA_CSS_DEBUG_DUMP_OB = 1 << 1,  /**< OB table */
-       IA_CSS_DEBUG_DUMP_SC = 1 << 2,  /**< Shading table */
-       IA_CSS_DEBUG_DUMP_WB = 1 << 3,  /**< White balance */
-       IA_CSS_DEBUG_DUMP_DP = 1 << 4,  /**< Defect Pixel */
-       IA_CSS_DEBUG_DUMP_BNR = 1 << 5,  /**< Bayer Noise Reductions */
-       IA_CSS_DEBUG_DUMP_S3A = 1 << 6,  /**< 3A Statistics */
-       IA_CSS_DEBUG_DUMP_DE = 1 << 7,  /**< De Mosaicing */
-       IA_CSS_DEBUG_DUMP_YNR = 1 << 8,  /**< Luma Noise Reduction */
-       IA_CSS_DEBUG_DUMP_CSC = 1 << 9,  /**< Color Space Conversion */
-       IA_CSS_DEBUG_DUMP_GC = 1 << 10,  /**< Gamma Correction */
-       IA_CSS_DEBUG_DUMP_TNR = 1 << 11,  /**< Temporal Noise Reduction */
-       IA_CSS_DEBUG_DUMP_ANR = 1 << 12,  /**< Advanced Noise Reduction */
-       IA_CSS_DEBUG_DUMP_CE = 1 << 13,  /**< Chroma Enhancement */
-       IA_CSS_DEBUG_DUMP_ALL = 1 << 14  /**< Dump all device parameters */
+       IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /** FPN table */
+       IA_CSS_DEBUG_DUMP_OB = 1 << 1,  /** OB table */
+       IA_CSS_DEBUG_DUMP_SC = 1 << 2,  /** Shading table */
+       IA_CSS_DEBUG_DUMP_WB = 1 << 3,  /** White balance */
+       IA_CSS_DEBUG_DUMP_DP = 1 << 4,  /** Defect Pixel */
+       IA_CSS_DEBUG_DUMP_BNR = 1 << 5,  /** Bayer Noise Reductions */
+       IA_CSS_DEBUG_DUMP_S3A = 1 << 6,  /** 3A Statistics */
+       IA_CSS_DEBUG_DUMP_DE = 1 << 7,  /** De Mosaicing */
+       IA_CSS_DEBUG_DUMP_YNR = 1 << 8,  /** Luma Noise Reduction */
+       IA_CSS_DEBUG_DUMP_CSC = 1 << 9,  /** Color Space Conversion */
+       IA_CSS_DEBUG_DUMP_GC = 1 << 10,  /** Gamma Correction */
+       IA_CSS_DEBUG_DUMP_TNR = 1 << 11,  /** Temporal Noise Reduction */
+       IA_CSS_DEBUG_DUMP_ANR = 1 << 12,  /** Advanced Noise Reduction */
+       IA_CSS_DEBUG_DUMP_CE = 1 << 13,  /** Chroma Enhancement */
+       IA_CSS_DEBUG_DUMP_ALL = 1 << 14  /** Dump all device parameters */
 };
 
 #define IA_CSS_ERROR(fmt, ...) \
index 0fa7cb2423d863de2bcee3d86f7c3baecdfc84c3..dd1127a21494d866f1311d9c96c55a39fbcbdfa0 100644 (file)
@@ -1617,7 +1617,7 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
 
 #elif SP_DEBUG == SP_DEBUG_TRACE
 
-/**
+/*
  * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will
  * me mapped on the file name string.
  *
@@ -2267,7 +2267,7 @@ void ia_css_debug_dump_debug_info(const char *context)
        return;
 }
 
-/** this function is for debug use, it can make SP go to sleep
+/* this function is for debug use, it can make SP go to sleep
   state after each frame, then user can dump the stable SP dmem.
   this function can be called after ia_css_start_sp()
   and before sh_css_init_buffer_queues()
@@ -2526,7 +2526,7 @@ void ia_css_debug_dump_ddr_debug_queue(void)
 }
 */
 
-/**
+/*
  * @brief Initialize the debug mode.
  * Refer to "ia_css_debug.h" for more details.
  */
@@ -2537,7 +2537,7 @@ bool ia_css_debug_mode_init(void)
        return rc;
 }
 
-/**
+/*
  * @brief Disable the DMA channel.
  * Refer to "ia_css_debug.h" for more details.
  */
@@ -2552,7 +2552,7 @@ ia_css_debug_mode_disable_dma_channel(int dma_id,
        return rc;
 }
 
-/**
+/*
  * @brief Enable the DMA channel.
  * Refer to "ia_css_debug.h" for more details.
  */
index 2698c3e1adb0787fad774fbfa544bfce208c6811..239c06730bf4f27a2145ca99c9ebe59745835f82 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -52,7 +52,7 @@ more details.
 
 #include "ia_css_queue.h"      /* host_sp_enqueue_XXX */
 #include "ia_css_event.h"      /* ia_css_event_encode */
-/**
+/*
  * @brief Encode the information into the software-event.
  * Refer to "sw_event_public.h" for details.
  */
index 56d6858890ec1dc2d0289947a651c517b39002fa..913a4bf7a34fc24522f762b2e52530b569e3a917 100644 (file)
@@ -37,7 +37,7 @@ int ia_css_eventq_recv(
        return error;
 }
 
-/**
+/*
  * @brief The Host sends the event to the SP.
  * Refer to "sh_css_sp.h" for details.
  */
index c7e07b79f4e55be5d91ac0f974ae61685fa9d872..89ad8080ceb126888dbebe2bff3477de7cf91f73 100644 (file)
@@ -41,7 +41,7 @@ more details.
 /*********************************************************************
 ****   Frame INFO APIs
 **********************************************************************/
-/** @brief Sets the given width and alignment to the frame info
+/* @brief Sets the given width and alignment to the frame info
  *
  * @param
  * @param[in]  info        The info to which parameters would set
@@ -53,7 +53,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
        unsigned int width,
        unsigned int min_padded_width);
 
-/** @brief Sets the given format to the frame info
+/* @brief Sets the given format to the frame info
  *
  * @param
  * @param[in]  info        The info to which parameters would set
@@ -63,7 +63,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
 void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
        enum ia_css_frame_format format);
 
-/** @brief Sets the frame info with the given parameters
+/* @brief Sets the frame info with the given parameters
  *
  * @param
  * @param[in]  info        The info to which parameters would set
@@ -79,7 +79,7 @@ void ia_css_frame_info_init(struct ia_css_frame_info *info,
        enum ia_css_frame_format format,
        unsigned int aligned);
 
-/** @brief Checks whether 2 frame infos has the same resolution
+/* @brief Checks whether 2 frame infos has the same resolution
  *
  * @param
  * @param[in]  frame_a         The first frame to be compared
@@ -90,7 +90,7 @@ bool ia_css_frame_info_is_same_resolution(
        const struct ia_css_frame_info *info_a,
        const struct ia_css_frame_info *info_b);
 
-/** @brief Check the frame info is valid
+/* @brief Check the frame info is valid
  *
  * @param
  * @param[in]  info       The frame attributes to be initialized
@@ -102,7 +102,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
 ****   Frame APIs
 **********************************************************************/
 
-/** @brief Initialize the plane depending on the frame type
+/* @brief Initialize the plane depending on the frame type
  *
  * @param
  * @param[in]  frame           The frame attributes to be initialized
@@ -110,7 +110,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
  */
 enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
 
-/** @brief Free an array of frames
+/* @brief Free an array of frames
  *
  * @param
  * @param[in]  num_frames      The number of frames to be freed in the array
@@ -120,7 +120,7 @@ enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
 void ia_css_frame_free_multiple(unsigned int num_frames,
        struct ia_css_frame **frames_array);
 
-/** @brief Allocate a CSS frame structure of given size in bytes..
+/* @brief Allocate a CSS frame structure of given size in bytes..
  *
  * @param      frame   The allocated frame.
  * @param[in]  size_bytes      The frame size in bytes.
@@ -135,7 +135,7 @@ enum ia_css_err ia_css_frame_allocate_with_buffer_size(
        const unsigned int size_bytes,
        const bool contiguous);
 
-/** @brief Check whether 2 frames are same type
+/* @brief Check whether 2 frames are same type
  *
  * @param
  * @param[in]  frame_a         The first frame to be compared
@@ -146,7 +146,7 @@ bool ia_css_frame_is_same_type(
        const struct ia_css_frame *frame_a,
        const struct ia_css_frame *frame_b);
 
-/** @brief Configure a dma port from frame info
+/* @brief Configure a dma port from frame info
  *
  * @param
  * @param[in]  config         The DAM port configuration
@@ -158,7 +158,7 @@ void ia_css_dma_configure_from_info(
        const struct ia_css_frame_info *info);
 
 #ifdef ISP2401
-/** @brief Finds the cropping resolution
+/* @brief Finds the cropping resolution
  * This function finds the maximum cropping resolution in an input image keeping
  * the aspect ratio for the given output resolution.Calculates the coordinates
  * for cropping from the center and returns the starting pixel location of the
index f1a943cf04c01a17dd29cbda7158d0622ce00373..5faa89ad8a230e31c2ca8853cc945dd83cc1ebf5 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index 11d3995ba0dba8e8090f172bbc28e79413ff8779..adefa57820a4ac46a59467ed6e2c06792161af98 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index d9a5f3e9283acbe29f58416b2f9b39b99fe39d00..8dc74927e9a2fb997e031189610f7088e94cf7bd 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index 8e651b80345a0ea47862c78fdfa3ef1359d75bf3..2283dd1c1c9bdc6eadaf13bbfdad5721e5cae275 100644 (file)
@@ -53,7 +53,7 @@ enum ia_css_param_class {
 };
 #define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1)
 
-/** ISP parameter descriptor */
+/* ISP parameter descriptor */
 struct ia_css_isp_parameter {
        uint32_t offset; /* Offset in isp_<mem>)parameters, etc. */
        uint32_t size;   /* Disabled if 0 */
@@ -77,10 +77,10 @@ struct ia_css_isp_param_isp_segments {
 
 /* Memory offsets in binary info */
 struct ia_css_isp_param_memory_offsets {
-       uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES];  /**< offset wrt hdr in bytes */
+       uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES];  /** offset wrt hdr in bytes */
 };
 
-/** Offsets for ISP kernel parameters per isp memory.
+/* Offsets for ISP kernel parameters per isp memory.
  * Only relevant for standard ISP binaries, not ACC or SP.
  */
 union ia_css_all_memory_offsets {
index 832d9e16edebf45813dd224f00cf7092ea0d882b..f793ce125f02a85c494d71ef3c597adcefc01d2b 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index 02bf908d94e6c0a25f458ee909ab4fda454f6e71..4cf2defe9ef032589a144a68240224f2caca6f21 100644 (file)
@@ -44,7 +44,7 @@ more details.
  * Virtual Input System. (Input System 2401)
  */
 typedef input_system_cfg_t     ia_css_isys_descr_t;
-/** end of Virtual Input System */
+/* end of Virtual Input System */
 #endif
 
 #if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
@@ -112,7 +112,7 @@ unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
 
 #endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
 
-/** @brief Translate format and compression to format type.
+/* @brief Translate format and compression to format type.
  *
  * @param[in]  input_format    The input format.
  * @param[in]  compression     The compression scheme.
@@ -195,7 +195,7 @@ extern void ia_css_isys_stream2mmio_sid_rmgr_release(
        stream2mmio_ID_t        stream2mmio,
        stream2mmio_sid_ID_t    *sid);
 
-/** end of Virtual Input System */
+/* end of Virtual Input System */
 #endif
 
 #endif                         /* __IA_CSS_ISYS_H__ */
index d1d4f79c00f19c704dd795999c5ef6acde8d9125..3b04dc51335a50a859805bc5f0999cb0ac999eba 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index faef97672eac285b7e69f3b0373126eba3d7295e..d8c3b75d7faca3b8502cdd2f2123d3e39ab7bd4c 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
  * Support for Intel Camera Imaging ISP subsystem.
  * Copyright (c) 2010 - 2015, Intel Corporation.
  *
index 5032627342d95993297b0f85a97ad64dc059b68d..4def4a542b7db3b524d7c473bb23e51669d4e77c 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index 239ef310bdeb6f653eb345ed06513eaae0e88e05..4122084fd237443fe4fe39672c55366af9553fc9 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index a93c7f44ff1283e18ba1c26821a9cbdbfb0dace5..222b294c0ab000152f5148ea49a9f5324db2475d 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index 46a157f64343c21e176efc976fcea11140b0efbc..70f6cb5e5918d185fb9f23b1746e39c808c3bd39 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index 0f1e8a2f6b104a45e682e5126161376b213b1471..90922a7acefdcaca2f6aba26fed2c60c5adc6cb2 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -166,7 +166,7 @@ static int32_t calculate_stride(
        bool    raw_packed,
        int32_t align_in_bytes);
 
-/** end of Forwarded Declaration */
+/* end of Forwarded Declaration */
 
 /**************************************************
  *
@@ -292,7 +292,7 @@ ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
        return rc;
 }
 
-/** end of Public Methods */
+/* end of Public Methods */
 
 /**************************************************
  *
@@ -894,5 +894,5 @@ static csi_mipi_packet_type_t get_csi_mipi_packet_type(
 
        return packet_type;
 }
-/** end of Private Methods */
+/* end of Private Methods */
 #endif
index 90646f5f888568299a6c55b5218828fc4bd08ded..e64936e2d46e5b7fe9d9d6ed83a1dbb6e9027464 100644 (file)
@@ -103,7 +103,7 @@ struct ia_css_pipeline_stage_desc {
        struct ia_css_frame *vf_frame;
 };
 
-/** @brief initialize the pipeline module
+/* @brief initialize the pipeline module
  *
  * @return    None
  *
@@ -112,7 +112,7 @@ struct ia_css_pipeline_stage_desc {
  */
 void ia_css_pipeline_init(void);
 
-/** @brief initialize the pipeline structure with default values
+/* @brief initialize the pipeline structure with default values
  *
  * @param[out] pipeline  structure to be initialized with defaults
  * @param[in] pipe_id
@@ -129,7 +129,7 @@ enum ia_css_err ia_css_pipeline_create(
        unsigned int pipe_num,
        unsigned int dvs_frame_delay);
 
-/** @brief destroy a pipeline
+/* @brief destroy a pipeline
  *
  * @param[in] pipeline
  * @return    None
@@ -138,7 +138,7 @@ enum ia_css_err ia_css_pipeline_create(
 void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
 
 
-/** @brief Starts a pipeline
+/* @brief Starts a pipeline
  *
  * @param[in] pipe_id
  * @param[in] pipeline
@@ -148,7 +148,7 @@ void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
 void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
                           struct ia_css_pipeline *pipeline);
 
-/** @brief Request to stop a pipeline
+/* @brief Request to stop a pipeline
  *
  * @param[in] pipeline
  * @return                     IA_CSS_SUCCESS or error code upon error.
@@ -156,7 +156,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
  */
 enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
 
-/** @brief Check whether pipeline has stopped
+/* @brief Check whether pipeline has stopped
  *
  * @param[in] pipeline
  * @return    true if the pipeline has stopped
@@ -164,7 +164,7 @@ enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
  */
 bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
 
-/** @brief clean all the stages pipeline and make it as new
+/* @brief clean all the stages pipeline and make it as new
  *
  * @param[in] pipeline
  * @return    None
@@ -172,7 +172,7 @@ bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
  */
 void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline);
 
-/** @brief Add a stage to pipeline.
+/* @brief Add a stage to pipeline.
  *
  * @param     pipeline               Pointer to the pipeline to be added to.
  * @param[in] stage_desc       The description of the stage
@@ -188,7 +188,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
                        struct ia_css_pipeline_stage_desc *stage_desc,
                        struct ia_css_pipeline_stage **stage);
 
-/** @brief Finalize the stages in a pipeline
+/* @brief Finalize the stages in a pipeline
  *
  * @param     pipeline               Pointer to the pipeline to be added to.
  * @return                     None
@@ -198,7 +198,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
 void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
                        bool continuous);
 
-/** @brief gets a stage from the pipeline
+/* @brief gets a stage from the pipeline
  *
  * @param[in] pipeline
  * @return                     IA_CSS_SUCCESS or error code upon error.
@@ -208,7 +208,7 @@ enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
                          int mode,
                          struct ia_css_pipeline_stage **stage);
 
-/** @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
+/* @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
  *
  * @param[in] pipeline
  * @param[in] fw_handle
@@ -221,7 +221,7 @@ enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeli
                          uint32_t fw_handle,
                          struct ia_css_pipeline_stage **stage);
 
-/** @brief Gets the Firmware handle correponding the stage num from the pipeline
+/* @brief Gets the Firmware handle correponding the stage num from the pipeline
  *
  * @param[in] pipeline
  * @param[in] stage_num
@@ -234,7 +234,7 @@ enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeli
                          uint32_t stage_num,
                          uint32_t *fw_handle);
 
-/** @brief gets the output stage from the pipeline
+/* @brief gets the output stage from the pipeline
  *
  * @param[in] pipeline
  * @return                     IA_CSS_SUCCESS or error code upon error.
@@ -245,7 +245,7 @@ enum ia_css_err ia_css_pipeline_get_output_stage(
                        int mode,
                        struct ia_css_pipeline_stage **stage);
 
-/** @brief Checks whether the pipeline uses params
+/* @brief Checks whether the pipeline uses params
  *
  * @param[in] pipeline
  * @return    true if the pipeline uses params
index 62d13978475d87b1d482df5df8f79e1c09aea4fa..8f93d29d1c5173c36c1f9581ec84b2a3a045a609 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -114,7 +114,7 @@ void ia_css_pipeline_map(unsigned int pipe_num, bool map)
        IA_CSS_LEAVE_PRIVATE("void");
 }
 
-/** @brief destroy a pipeline
+/* @brief destroy a pipeline
  *
  * @param[in] pipeline
  * @return    None
@@ -187,7 +187,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
              "ia_css_pipeline_start() leave: return_void\n");
 }
 
-/**
+/*
  * @brief Query the SP thread ID.
  * Refer to "sh_css_internal.h" for details.
  */
@@ -285,7 +285,7 @@ void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
        IA_CSS_LEAVE_PRIVATE("void");
 }
 
-/** @brief Add a stage to pipeline.
+/* @brief Add a stage to pipeline.
  *
  * @param       pipeline      Pointer to the pipeline to be added to.
  * @param[in]   stage_desc    The description of the stage
index e50a0f8137530bbbd60bb230c4e6a2b771cbe77c..aaf2e247cafbe0d2ed2eb6fbc02219e3b49257fb 100644 (file)
@@ -51,7 +51,7 @@ typedef struct ia_css_queue ia_css_queue_t;
 /*****************************************************************************
  * Queue Public APIs
  *****************************************************************************/
-/** @brief Initialize a local queue instance.
+/* @brief Initialize a local queue instance.
  *
  * @param[out] qhandle. Handle to queue instance for use with API
  * @param[in]  desc.   Descriptor with queue properties filled-in
@@ -63,7 +63,7 @@ extern int ia_css_queue_local_init(
                        ia_css_queue_t *qhandle,
                        ia_css_queue_local_t *desc);
 
-/** @brief Initialize a remote queue instance
+/* @brief Initialize a remote queue instance
  *
  * @param[out] qhandle. Handle to queue instance for use with API
  * @param[in]  desc.   Descriptor with queue properties filled-in
@@ -74,7 +74,7 @@ extern int ia_css_queue_remote_init(
                        ia_css_queue_t *qhandle,
                        ia_css_queue_remote_t *desc);
 
-/** @brief Uninitialize a queue instance
+/* @brief Uninitialize a queue instance
  *
  * @param[in]  qhandle. Handle to queue instance
  * @return     0 - Successful uninit.
@@ -83,7 +83,7 @@ extern int ia_css_queue_remote_init(
 extern int ia_css_queue_uninit(
                        ia_css_queue_t *qhandle);
 
-/** @brief Enqueue an item in the queue instance
+/* @brief Enqueue an item in the queue instance
  *
  * @param[in]  qhandle. Handle to queue instance
  * @param[in]  item.    Object to be enqueued.
@@ -96,7 +96,7 @@ extern int ia_css_queue_enqueue(
                        ia_css_queue_t *qhandle,
                        uint32_t item);
 
-/** @brief Dequeue an item from the queue instance
+/* @brief Dequeue an item from the queue instance
  *
  * @param[in]  qhandle. Handle to queue instance
  * @param[out] item.    Object to be dequeued into this item.
@@ -110,7 +110,7 @@ extern int ia_css_queue_dequeue(
                        ia_css_queue_t *qhandle,
                        uint32_t *item);
 
-/** @brief Check if the queue is empty
+/* @brief Check if the queue is empty
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  is_empty  True if empty, False if not.
@@ -123,7 +123,7 @@ extern int ia_css_queue_is_empty(
                        ia_css_queue_t *qhandle,
                        bool *is_empty);
 
-/** @brief Check if the queue is full
+/* @brief Check if the queue is full
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  is_full   True if Full, False if not.
@@ -136,7 +136,7 @@ extern int ia_css_queue_is_full(
                        ia_css_queue_t *qhandle,
                        bool *is_full);
 
-/** @brief Get used space in the queue
+/* @brief Get used space in the queue
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  size      Number of available elements in the queue
@@ -148,7 +148,7 @@ extern int ia_css_queue_get_used_space(
                        ia_css_queue_t *qhandle,
                        uint32_t *size);
 
-/** @brief Get free space in the queue
+/* @brief Get free space in the queue
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  size      Number of free elements in the queue
@@ -160,7 +160,7 @@ extern int ia_css_queue_get_free_space(
                        ia_css_queue_t *qhandle,
                        uint32_t *size);
 
-/** @brief Peek at an element in the queue
+/* @brief Peek at an element in the queue
  *
  * @param[in]  qhandle.  Handle to queue instance
  * @param[in]  offset   Offset of element to peek,
@@ -175,7 +175,7 @@ extern int ia_css_queue_peek(
                uint32_t offset,
                uint32_t *element);
 
-/** @brief Get the usable size for the queue
+/* @brief Get the usable size for the queue
  *
  * @param[in]  qhandle. Handle to queue instance
  * @param[out] size     Size value to be returned here.
index 946d4f2d21080099daa2aa76589cda6a8ba0cd37..7bb2b494836e8d16eaafe99415966b09f6436339 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index efa9c140484f6a2dc9a4f9b9fc1f4a811cfcbf33..370ff3816dbe0b4bcf2958cd18ff2673b08a39e3 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -44,7 +44,7 @@ enum ia_css_err ia_css_rmgr_init(void)
        return err;
 }
 
-/**
+/*
  * @brief Uninitialize resource pool (host)
  */
 void ia_css_rmgr_uninit(void)
index e56006c07ee863016f464b0bbd7d628a1d9da8ce..54239ac9d7c90a22c73e5781815fa1ae87bf8308 100644 (file)
 #include <memory_access.h>    /* mmmgr_malloc, mhmm_free */
 #include <ia_css_debug.h>
 
-/**
+/*
  * @brief VBUF resource handles
  */
 #define NUM_HANDLES 1000
 struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES];
 
-/**
+/*
  * @brief VBUF resource pool - refpool
  */
 struct ia_css_rmgr_vbuf_pool refpool = {
@@ -37,7 +37,7 @@ struct ia_css_rmgr_vbuf_pool refpool = {
        NULL,                   /* handles */
 };
 
-/**
+/*
  * @brief VBUF resource pool - writepool
  */
 struct ia_css_rmgr_vbuf_pool writepool = {
@@ -48,7 +48,7 @@ struct ia_css_rmgr_vbuf_pool writepool = {
        NULL,                   /* handles */
 };
 
-/**
+/*
  * @brief VBUF resource pool - hmmbufferpool
  */
 struct ia_css_rmgr_vbuf_pool hmmbufferpool = {
@@ -63,7 +63,7 @@ struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool;
 struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool;
 struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool;
 
-/**
+/*
  * @brief Initialize the reference count (host, vbuf)
  */
 static void rmgr_refcount_init_vbuf(void)
@@ -72,7 +72,7 @@ static void rmgr_refcount_init_vbuf(void)
        memset(&handle_table, 0, sizeof(handle_table));
 }
 
-/**
+/*
  * @brief Retain the reference count for a handle (host, vbuf)
  *
  * @param handle       The pointer to the handle
@@ -109,7 +109,7 @@ void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
        (*handle)->count++;
 }
 
-/**
+/*
  * @brief Release the reference count for a handle (host, vbuf)
  *
  * @param handle       The pointer to the handle
@@ -131,7 +131,7 @@ void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
        }
 }
 
-/**
+/*
  * @brief Initialize the resource pool (host, vbuf)
  *
  * @param pool The pointer to the pool
@@ -163,7 +163,7 @@ enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
        return err;
 }
 
-/**
+/*
  * @brief Uninitialize the resource pool (host, vbuf)
  *
  * @param pool The pointer to the pool
@@ -197,7 +197,7 @@ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
        }
 }
 
-/**
+/*
  * @brief Push a handle to the pool
  *
  * @param pool         The pointer to the pool
@@ -224,7 +224,7 @@ void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
        assert(succes);
 }
 
-/**
+/*
  * @brief Pop a handle from the pool
  *
  * @param pool         The pointer to the pool
@@ -254,7 +254,7 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
        }
 }
 
-/**
+/*
  * @brief Acquire a handle from the pool (host, vbuf)
  *
  * @param pool         The pointer to the pool
@@ -302,7 +302,7 @@ void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
        ia_css_rmgr_refcount_retain_vbuf(handle);
 }
 
-/**
+/*
  * @brief Release a handle to the pool (host, vbuf)
  *
  * @param pool         The pointer to the pool
index 27e9eb1e2102b3f29bd4b8403587c5c9f10cf9d2..bc4b1723369ec71664ea0821a2b5605428e2c44b 100644 (file)
@@ -37,17 +37,17 @@ more details.
 
 
 typedef struct {
-       uint32_t        ddr_data_offset;       /**<  posistion of data in DDR */
-       uint32_t        dmem_data_addr;        /**< data segment address in dmem */
-       uint32_t        dmem_bss_addr;         /**< bss segment address in dmem  */
-       uint32_t        data_size;             /**< data segment size            */
-       uint32_t        bss_size;              /**< bss segment size             */
-       uint32_t        spctrl_config_dmem_addr; /** <location of dmem_cfg  in SP dmem */
-       uint32_t        spctrl_state_dmem_addr;  /** < location of state  in SP dmem */
-       unsigned int    sp_entry;                /** < entry function ptr on SP */
-       const void      *code;                   /**< location of firmware */
+       uint32_t        ddr_data_offset;       /**  posistion of data in DDR */
+       uint32_t        dmem_data_addr;        /** data segment address in dmem */
+       uint32_t        dmem_bss_addr;         /** bss segment address in dmem  */
+       uint32_t        data_size;             /** data segment size            */
+       uint32_t        bss_size;              /** bss segment size             */
+       uint32_t        spctrl_config_dmem_addr; /* <location of dmem_cfg  in SP dmem */
+       uint32_t        spctrl_state_dmem_addr;  /* < location of state  in SP dmem */
+       unsigned int    sp_entry;                /* < entry function ptr on SP */
+       const void      *code;                   /** location of firmware */
        uint32_t         code_size;
-       char      *program_name;    /**< not used on hardware, only for simulation */
+       char      *program_name;    /** not used on hardware, only for simulation */
 } ia_css_spctrl_cfg;
 
 /* Get the code addr in DDR of SP */
index 3af2891efca743bae47dbe55e208d33a90b3b377..2620d7514f79c63d55dfe1126e9a4687cba327dd 100644 (file)
@@ -41,16 +41,16 @@ typedef enum {
        IA_CSS_SP_SW_RUNNING
 } ia_css_spctrl_sp_sw_state;
 
-/** Structure to encapsulate required arguments for
+/* Structure to encapsulate required arguments for
  * initialization of SP DMEM using the SP itself
  */
 struct ia_css_sp_init_dmem_cfg {
-       ia_css_ptr      ddr_data_addr;  /**< data segment address in ddr  */
-       uint32_t        dmem_data_addr; /**< data segment address in dmem */
-       uint32_t        dmem_bss_addr;  /**< bss segment address in dmem  */
-       uint32_t        data_size;      /**< data segment size            */
-       uint32_t        bss_size;       /**< bss segment size             */
-       sp_ID_t         sp_id;          /** <sp Id */
+       ia_css_ptr      ddr_data_addr;  /** data segment address in ddr  */
+       uint32_t        dmem_data_addr; /** data segment address in dmem */
+       uint32_t        dmem_bss_addr;  /** bss segment address in dmem  */
+       uint32_t        data_size;      /** data segment size            */
+       uint32_t        bss_size;       /** bss segment size             */
+       sp_ID_t         sp_id;          /* <sp Id */
 };
 
 #define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \
index 6d9bceb60196dcb6fb613bf3cec3624e96265632..844e4d536cecc612c308bd756fd94ae44706a788 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
@@ -39,7 +39,7 @@ more details.
 
 struct spctrl_context_info {
        struct ia_css_sp_init_dmem_cfg dmem_config;
-       uint32_t        spctrl_config_dmem_addr; /** location of dmem_cfg  in SP dmem */
+       uint32_t        spctrl_config_dmem_addr; /* location of dmem_cfg  in SP dmem */
        uint32_t        spctrl_state_dmem_addr;
        unsigned int    sp_entry;           /* entry function ptr on SP */
        hrt_vaddress    code_addr;          /* sp firmware location in host mem-DDR*/
index 49c69e60ca5c5ed748bbab5c4bfad41a70c47d1b..b7dd18492a91b72f5d8bca9b28c8c3ae629c41ff 100644 (file)
@@ -13,7 +13,7 @@
  * more details.
  */
 #else
-/**
+/*
 Support for Intel Camera Imaging ISP subsystem.
 Copyright (c) 2010 - 2015, Intel Corporation.
 
index f92b6a9f77eb1faa5058d4326e8aa992193dac8c..322bb3de6098fd3e46342a4f0c50289ffe4d77f4 100644 (file)
@@ -176,7 +176,7 @@ static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
 
 static bool fw_explicitly_loaded = false;
 
-/**
+/*
  * Local prototypes
  */
 
@@ -187,7 +187,7 @@ static enum ia_css_err
 sh_css_pipe_start(struct ia_css_stream *stream);
 
 #ifdef ISP2401
-/**
+/*
  * @brief Stop all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance.
  *
@@ -207,7 +207,7 @@ sh_css_pipe_start(struct ia_css_stream *stream);
 static enum ia_css_err
 sh_css_pipes_stop(struct ia_css_stream *stream);
 
-/**
+/*
  * @brief Check if all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance have stopped.
  *
@@ -1649,7 +1649,7 @@ ia_css_init(const struct ia_css_env *env,
        void (*flush_func)(struct ia_css_acc_fw *fw);
        hrt_data select, enable;
 
-       /**
+       /*
         * The C99 standard does not specify the exact object representation of structs;
         * the representation is compiler dependent.
         *
@@ -4617,23 +4617,23 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
  * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
  */
 static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
-       IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,    /**< Output frame ready. */
-       IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE,     /**< Second output frame ready. */
-       IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /**< Viewfinder Output frame ready. */
-       IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE,  /**< Second viewfinder Output frame ready. */
-       IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE,   /**< Indication that 3A statistics are available. */
-       IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE,  /**< Indication that DIS statistics are available. */
-       IA_CSS_EVENT_TYPE_PIPELINE_DONE,        /**< Pipeline Done event, sent after last pipeline stage. */
-       IA_CSS_EVENT_TYPE_FRAME_TAGGED,         /**< Frame tagged. */
-       IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE,     /**< Input frame ready. */
-       IA_CSS_EVENT_TYPE_METADATA_DONE,        /**< Metadata ready. */
-       IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /**< Indication that LACE statistics are available. */
-       IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE,   /**< Extension stage executed. */
-       IA_CSS_EVENT_TYPE_TIMER,                /**< Timing measurement data. */
-       IA_CSS_EVENT_TYPE_PORT_EOF,             /**< End Of Frame event, sent when in buffered sensor mode. */
-       IA_CSS_EVENT_TYPE_FW_WARNING,           /**< Performance warning encountered by FW */
-       IA_CSS_EVENT_TYPE_FW_ASSERT,            /**< Assertion hit by FW */
-       0,                                      /** error if sp passes  SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
+       IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,    /** Output frame ready. */
+       IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE,     /** Second output frame ready. */
+       IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /** Viewfinder Output frame ready. */
+       IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE,  /** Second viewfinder Output frame ready. */
+       IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE,   /** Indication that 3A statistics are available. */
+       IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE,  /** Indication that DIS statistics are available. */
+       IA_CSS_EVENT_TYPE_PIPELINE_DONE,        /** Pipeline Done event, sent after last pipeline stage. */
+       IA_CSS_EVENT_TYPE_FRAME_TAGGED,         /** Frame tagged. */
+       IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE,     /** Input frame ready. */
+       IA_CSS_EVENT_TYPE_METADATA_DONE,        /** Metadata ready. */
+       IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /** Indication that LACE statistics are available. */
+       IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE,   /** Extension stage executed. */
+       IA_CSS_EVENT_TYPE_TIMER,                /** Timing measurement data. */
+       IA_CSS_EVENT_TYPE_PORT_EOF,             /** End Of Frame event, sent when in buffered sensor mode. */
+       IA_CSS_EVENT_TYPE_FW_WARNING,           /** Performance warning encountered by FW */
+       IA_CSS_EVENT_TYPE_FW_ASSERT,            /** Assertion hit by FW */
+       0,                                      /* error if sp passes  SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
 };
 
 enum ia_css_err
@@ -5028,7 +5028,7 @@ sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
 bool
 sh_css_continuous_is_enabled(uint8_t pipe_num)
 #else
-/**
+/*
  * @brief Stop all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance.
  *
@@ -5107,7 +5107,7 @@ ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
        return IA_CSS_SUCCESS;
 }
 #else
-       /**
+       /*
         * Stop all "ia_css_pipe" instances in this target
         * "ia_css_stream" instance.
         */
@@ -5146,7 +5146,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
                }
        }
 
-       /**
+       /*
         * In the CSS firmware use scenario "Continuous Preview"
         * as well as "Continuous Video", the "ia_css_pipe" instance
         * "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5183,7 +5183,7 @@ ERR:
        return err;
 }
 
-/**
+/*
  * @brief Check if all "ia_css_pipe" instances in the target
  * "ia_css_stream" instance have stopped.
  *
@@ -5218,7 +5218,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
        main_pipe_id = main_pipe->mode;
        IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
 
-       /**
+       /*
         * Check if every "ia_css_pipe" instance in this target
         * "ia_css_stream" instance has stopped.
         */
@@ -5229,7 +5229,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
                                rval);
        }
 
-       /**
+       /*
         * In the CSS firmware use scenario "Continuous Preview"
         * as well as "Continuous Video", the "ia_css_pipe" instance
         * "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5474,7 +5474,7 @@ ERR:
 }
 
 #ifdef ISP2401
-/**
+/*
  * @brief Check if a format is supported by the pipe.
  *
  */
@@ -8626,7 +8626,7 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
        return err;
 }
 
-/**
+/*
  * @brief Tag a specific frame in continuous capture.
  * Refer to "sh_css_internal.h" for details.
  */
@@ -8666,7 +8666,7 @@ enum ia_css_err ia_css_stream_capture_frame(struct ia_css_stream *stream,
        return err;
 }
 
-/**
+/*
  * @brief Configure the continuous capture.
  * Refer to "sh_css_internal.h" for details.
  */
@@ -8822,7 +8822,7 @@ sh_css_init_host_sp_control_vars(void)
                "sh_css_init_host_sp_control_vars() leave: return_void\n");
 }
 
-/**
+/*
  * create the internal structures and fill in the configuration data
  */
 void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
@@ -10435,7 +10435,7 @@ ia_css_start_sp(void)
        return err;
 }
 
-/**
+/*
  *     Time to wait SP for termincate. Only condition when this can happen
  *     is a fatal hw failure, but we must be able to detect this and emit
  *     a proper error trace.
@@ -10713,7 +10713,7 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
        return ret;
 }
 
-/** @brief     Set the state (Enable or Disable) of the Extension stage in the
+/* @brief      Set the state (Enable or Disable) of the Extension stage in the
  *             given pipe.
  */
 enum ia_css_err
@@ -10758,7 +10758,7 @@ ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool
        return err;
 }
 
-/**    @brief  Get the state (Enable or Disable) of the Extension stage in the
+/*     @brief  Get the state (Enable or Disable) of the Extension stage in the
  *     given pipe.
  */
 enum ia_css_err
index 0910021286a4113f6b6b47eb7ee86133f3e01618..161122e1bcbc6e216b1cf0981bf9104d8097ba2f 100644 (file)
@@ -188,7 +188,7 @@ enum host2sp_commands {
        N_host2sp_cmd
 };
 
-/** Enumeration used to indicate the events that are produced by
+/* Enumeration used to indicate the events that are produced by
  *  the SP and consumed by the Host.
  *
  * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
@@ -274,10 +274,10 @@ struct sh_css_ddr_address_map_compound {
 };
 
 struct ia_css_isp_parameter_set_info {
-       struct sh_css_ddr_address_map  mem_map;/**< pointers to Parameters in ISP format IMPT:
+       struct sh_css_ddr_address_map  mem_map;/** pointers to Parameters in ISP format IMPT:
                                                    This should be first member of this struct */
-       uint32_t                       isp_parameters_id;/**< Unique ID to track which config was actually applied to a particular frame */
-       ia_css_ptr                     output_frame_ptr;/**< Output frame to which this config has to be applied (optional) */
+       uint32_t                       isp_parameters_id;/** Unique ID to track which config was actually applied to a particular frame */
+       ia_css_ptr                     output_frame_ptr;/** Output frame to which this config has to be applied (optional) */
 };
 
 /* this struct contains all arguments that can be passed to
@@ -398,9 +398,9 @@ struct sh_css_sp_input_formatter_set {
 /* SP configuration information */
 struct sh_css_sp_config {
        uint8_t                 no_isp_sync; /* Signal host immediately after start */
-       uint8_t                 enable_raw_pool_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+       uint8_t                 enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */
        uint8_t                 lock_all;
-       /**< If raw buffer locking is enabled, this flag indicates whether raw
+       /** If raw buffer locking is enabled, this flag indicates whether raw
             frames are locked when their EOF event is successfully sent to the
             host (true) or when they are passed to the preview/video pipe
             (false). */
@@ -458,13 +458,13 @@ struct sh_css_sp_pipeline_io {
        /*struct sh_css_sp_pipeline_terminal    output;*/
 };
 
-/** This struct tracks how many streams are registered per CSI port.
+/* This struct tracks how many streams are registered per CSI port.
  * This is used to track which streams have already been configured.
  * Only when all streams are configured, the CSI RX is started for that port.
  */
 struct sh_css_sp_pipeline_io_status {
-       uint32_t        active[N_INPUT_SYSTEM_CSI_PORT];        /**< registered streams */
-       uint32_t        running[N_INPUT_SYSTEM_CSI_PORT];       /**< configured streams */
+       uint32_t        active[N_INPUT_SYSTEM_CSI_PORT];        /** registered streams */
+       uint32_t        running[N_INPUT_SYSTEM_CSI_PORT];       /** configured streams */
 };
 
 #endif
@@ -500,7 +500,7 @@ enum sh_css_port_type {
 #define SH_CSS_METADATA_OFFLINE_MODE   0x04
 #define SH_CSS_METADATA_WAIT_INPUT     0x08
 
-/** @brief Free an array of metadata buffers.
+/* @brief Free an array of metadata buffers.
  *
  * @param[in]  num_bufs        Number of metadata buffers to be freed.
  * @param[in]  bufs            Pointer of array of metadata buffers.
@@ -764,7 +764,7 @@ struct sh_css_hmm_buffer {
                        hrt_vaddress    frame_data;
                        uint32_t        flashed;
                        uint32_t        exp_id;
-                       uint32_t        isp_parameters_id; /**< Unique ID to track which config was
+                       uint32_t        isp_parameters_id; /** Unique ID to track which config was
                                                                actually applied to a particular frame */
 #if CONFIG_ON_FRAME_ENQUEUE()
                        struct sh_css_config_on_frame_enqueue config_on_frame_enqueue;
index e12789236bb9abadae85a6e27fbbb8089858255d..4bcc35d219f83fa84ced52007616ab26e063b139 100644 (file)
@@ -22,7 +22,7 @@
 #include <ia_css_pipe_public.h>
 #include <ia_css_stream_public.h>
 
-/** The pipe id type, distinguishes the kind of pipes that
+/* The pipe id type, distinguishes the kind of pipes that
  *  can be run in parallel.
  */
 enum ia_css_pipe_id {
index 36aaa3019a15ec5a82d0d64ec1803a1d79b8cbcb..883474e90c8173912b9800a79f34a9d257ae50a5 100644 (file)
@@ -321,7 +321,7 @@ calculate_mipi_buff_size(
        height = stream_cfg->input_config.input_res.height;
        format = stream_cfg->input_config.format;
        pack_raw_pixels = stream_cfg->pack_raw_pixels;
-       /** end of NOTE */
+       /* end of NOTE */
 
        /**
 #ifndef ISP2401
@@ -341,7 +341,7 @@ calculate_mipi_buff_size(
         * in the non-continuous use scenario.
         */
        width_padded = width + (2 * ISP_VEC_NELEMS);
-       /** end of NOTE */
+       /* end of NOTE */
 
        IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
                     width_padded, height, format);
index a7ffe6d8331bd68a7d5e3d53f87f978bf73c99b0..270ec2b60a3ed365d5d8042adf53f59570e0e12b 100644 (file)
@@ -144,8 +144,8 @@ struct ia_css_isp_parameters {
        struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM];
        struct sh_css_ddr_address_map ddr_ptrs;
        struct sh_css_ddr_address_map_size ddr_ptrs_size;
-       struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */
-       uint32_t isp_parameters_id; /**< Unique ID to track which config was actually applied to a particular frame */
+       struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */
+       uint32_t isp_parameters_id; /** Unique ID to track which config was actually applied to a particular frame */
 };
 
 void
index e6a345979ff14298c4a3ee385ab6f941e6907c3b..6fc00fc402b14991e0b3b267abc9320ca7dbd513 100644 (file)
@@ -261,7 +261,7 @@ sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
        assert(out_frame != NULL);
 
        {
-               /**
+               /*
                 * Clear sh_css_sp_stage for easy debugging.
                 * program_input_circuit must be saved as it is set outside
                 * this function.
@@ -335,7 +335,7 @@ sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
        assert(out_frame != NULL);
 
        {
-               /**
+               /*
                 * Clear sh_css_sp_stage for easy debugging.
                 * program_input_circuit must be saved as it is set outside
                 * this function.
@@ -909,7 +909,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
        xinfo = binary->info;
        info  = &xinfo->sp;
        {
-               /**
+               /*
                 * Clear sh_css_sp_stage for easy debugging.
                 * program_input_circuit must be saved as it is set outside
                 * this function.
@@ -980,7 +980,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
        sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0;
        sh_css_isp_stage.mem_initializers = *isp_mem_if;
 
-       /**
+       /*
         * Even when a stage does not need uds and does not params,
         * ia_css_uds_sp_scale_params() seems to be called (needs
         * further investigation). This function can not deal with
@@ -1429,7 +1429,7 @@ sh_css_init_host2sp_frame_data(void)
 }
 
 
-/**
+/*
  * @brief Update the offline frame information in host_sp_communication.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1461,7 +1461,7 @@ sh_css_update_host2sp_offline_frame(
 }
 
 #if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
-/**
+/*
  * @brief Update the mipi frame information in host_sp_communication.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1488,7 +1488,7 @@ sh_css_update_host2sp_mipi_frame(
                                frame ? frame->data : 0);
 }
 
-/**
+/*
  * @brief Update the mipi metadata information in host_sp_communication.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1735,7 +1735,7 @@ ia_css_isp_has_started(void)
 }
 
 
-/**
+/*
  * @brief Initialize the DMA software-mask in the debug mode.
  * Refer to "sh_css_sp.h" for more details.
  */
@@ -1761,7 +1761,7 @@ sh_css_sp_init_dma_sw_reg(int dma_id)
        return true;
 }
 
-/**
+/*
  * @brief Set the DMA software-mask in the debug mode.
  * Refer to "sh_css_sp.h" for more details.
  */
index e49e478ab3547e9b565315374a3954a4da2c60f3..0b8e3d8720691b5db6a0365cff74a254250aedb9 100644 (file)
@@ -61,7 +61,7 @@ struct sh_css {
 #endif
        hrt_vaddress                   sp_bin_addr;
        hrt_data                       page_table_base_index;
-       unsigned int                   size_mem_words; /** \deprecated{Use ia_css_mipi_buffer_config instead.}*/
+       unsigned int                   size_mem_words; /* \deprecated{Use ia_css_mipi_buffer_config instead.}*/
        enum ia_css_irq_type           irq_type;
        unsigned int                   pipe_counter;
        
index 0790b3d9e25560366a48633ddce37beeea21e762..143038c6c403f0085fd7a8327a6682a67e559858 100644 (file)
@@ -293,9 +293,9 @@ static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
  * EOF timeout timer function. This is an unrecoverable condition
  * without a stream restart.
  */
-static void prp_eof_timeout(unsigned long data)
+static void prp_eof_timeout(struct timer_list *t)
 {
-       struct prp_priv *priv = (struct prp_priv *)data;
+       struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
        struct imx_media_video_dev *vdev = priv->vdev;
        struct imx_ic_priv *ic_priv = priv->ic_priv;
 
@@ -1292,8 +1292,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
        priv->ic_priv = ic_priv;
 
        spin_lock_init(&priv->irqlock);
-       setup_timer(&priv->eof_timeout_timer, prp_eof_timeout,
-                   (unsigned long)priv);
+       timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
 
        priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
                                                   PRPENCVF_SRC_PAD);
index 6d856118c223285702913468b29200e26c448a7c..bb1d6dafca83473eea250ff24be74b1505c9b533 100644 (file)
@@ -254,9 +254,9 @@ static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
  * EOF timeout timer function. This is an unrecoverable condition
  * without a stream restart.
  */
-static void csi_idmac_eof_timeout(unsigned long data)
+static void csi_idmac_eof_timeout(struct timer_list *t)
 {
-       struct csi_priv *priv = (struct csi_priv *)data;
+       struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
        struct imx_media_video_dev *vdev = priv->vdev;
 
        v4l2_err(&priv->sd, "EOF timeout\n");
@@ -1739,8 +1739,7 @@ static int imx_csi_probe(struct platform_device *pdev)
        priv->csi_id = pdata->csi;
        priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
 
-       setup_timer(&priv->eof_timeout_timer, csi_idmac_eof_timeout,
-                   (unsigned long)priv);
+       timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
        spin_lock_init(&priv->irqlock);
 
        v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
index 85775da293fb10d067d7956af1268eabd5727893..667dacac81f03ed7902807790f26f7316b08e34b 100644 (file)
@@ -744,9 +744,9 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
  * The handler runs in interrupt context. That's why we need to defer the
  * tasks to a work queue.
  */
-static void link_stat_timer_handler(unsigned long data)
+static void link_stat_timer_handler(struct timer_list *t)
 {
-       struct most_dev *mdev = (struct most_dev *)data;
+       struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
 
        schedule_work(&mdev->poll_work_obj);
        mdev->link_stat_timer.expires = jiffies + (2 * HZ);
@@ -1138,8 +1138,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
        num_endpoints = usb_iface_desc->desc.bNumEndpoints;
        mutex_init(&mdev->io_mutex);
        INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
-       setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
-                   (unsigned long)mdev);
+       timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
 
        mdev->usb_device = usb_dev;
        mdev->link_stat_timer.expires = jiffies + (2 * HZ);
index 068aece25d37f17414a3767b79fc8510cdf6bb3e..cded30f145aa2423b13cafdf2e149c607cb355d2 100644 (file)
@@ -394,7 +394,7 @@ struct octeon_hcd {
                                result = -1;                                \
                                break;                                      \
                        } else                                              \
-                               cvmx_wait(100);                             \
+                               __delay(100);                               \
                }                                                           \
        } while (0);                                                        \
        result; })
@@ -774,7 +774,7 @@ retry:
        usbn_clk_ctl.s.hclk_rst = 1;
        cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
        /* 2e.  Wait 64 core-clock cycles for HCLK to stabilize */
-       cvmx_wait(64);
+       __delay(64);
        /*
         * 3. Program the power-on reset field in the USBN clock-control
         *    register:
@@ -795,7 +795,7 @@ retry:
        cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
                            usbn_usbp_ctl_status.u64);
        /* 6. Wait 10 cycles */
-       cvmx_wait(10);
+       __delay(10);
        /*
         * 7. Clear ATE_RESET field in the USBN clock-control register:
         *    USBN_USBP_CTL_STATUS[ATE_RESET] = 0
index e69a2153c999c796602f2884137faff8bc1ef8ab..12c9df9cddde22bea46650ededda1ed4d3e057ae 100644 (file)
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
 
        currentValue = READ_REG(REG_DATAMODUL);
 
-       switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define
+       switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
        case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
        case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
        default:                            return undefined;
index c0664dc80bf24684184c83bad118d311d756b75c..446310775e9021bcc7e9f72cb94545e0b9c1b012 100644 (file)
@@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
        if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
            (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
                len = pcur_bss->Ssid.SsidLength;
-
-               wrqu->essid.length = len;
-
                memcpy(extra, pcur_bss->Ssid.Ssid, len);
-
-               wrqu->essid.flags = 1;
        } else {
-               ret = -1;
-               goto exit;
+               len = 0;
+               *extra = 0;
        }
-
-exit:
-
+       wrqu->essid.length = len;
+       wrqu->essid.flags = 1;
 
        return ret;
 }
index 4e7908322d77d1985989a0688096d975e0d41c6f..f56fdc7a4b614bd5732bfe9fac96017a43d49346 100644 (file)
@@ -391,10 +391,10 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
 }
 
 
-static void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(struct timer_list *t)
 {
        struct ieee80211_device *ieee =
-               (struct ieee80211_device *) _ieee;
+               from_timer(ieee, t, beacon_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&ieee->beacon_lock, flags);
@@ -1251,9 +1251,11 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
        spin_unlock_irqrestore(&ieee->lock, flags);
 }
 
-static void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(struct timer_list *t)
 {
-       ieee80211_associate_abort((struct ieee80211_device *) dev);
+       struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
+
+       ieee80211_associate_abort(dev);
 }
 
 
@@ -2718,11 +2720,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
        ieee->enable_rx_imm_BA = true;
        ieee->tx_pending.txb = NULL;
 
-       setup_timer(&ieee->associate_timer, ieee80211_associate_abort_cb,
-                   (unsigned long)ieee);
+       timer_setup(&ieee->associate_timer, ieee80211_associate_abort_cb, 0);
 
-       setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
-                   (unsigned long)ieee);
+       timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
 
 
        INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
index 576c15d25a0f80a53c0891f40331400d8f0b4c47..986a55bb9877e161d02209a04fb19e9eaea27dce 100644 (file)
@@ -138,17 +138,16 @@ _recv_indicatepkt_drop:
        precvpriv->rx_drop++;
 }
 
-static void _r8712_reordering_ctrl_timeout_handler (unsigned long data)
+static void _r8712_reordering_ctrl_timeout_handler (struct timer_list *t)
 {
        struct recv_reorder_ctrl *preorder_ctrl =
-                        (struct recv_reorder_ctrl *)data;
+                        from_timer(preorder_ctrl, t, reordering_ctrl_timer);
 
        r8712_reordering_ctrl_timeout_handler(preorder_ctrl);
 }
 
 void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
 {
-       setup_timer(&preorder_ctrl->reordering_ctrl_timer,
-                    _r8712_reordering_ctrl_timeout_handler,
-                    (unsigned long)preorder_ctrl);
+       timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+                   _r8712_reordering_ctrl_timeout_handler, 0);
 }
index da1d4a641dcd2cd33935532c470dc6db6a373777..455fba721135a8bad731abd399712144b84307da 100644 (file)
@@ -74,7 +74,7 @@ enum _LED_STATE_871x {
  *     Prototype of protected function.
  *===========================================================================
  */
-static void BlinkTimerCallback(unsigned long data);
+static void BlinkTimerCallback(struct timer_list *t);
 
 static void BlinkWorkItemCallback(struct work_struct *work);
 /*===========================================================================
@@ -99,8 +99,7 @@ static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
        pLed->bLedBlinkInProgress = false;
        pLed->BlinkTimes = 0;
        pLed->BlinkingLedState = LED_UNKNOWN;
-       setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
-                   (unsigned long)pLed);
+       timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
        INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
 }
 
@@ -825,9 +824,9 @@ static void SwLedBlink6(struct LED_871x *pLed)
  *             Callback function of LED BlinkTimer,
  *             it just schedules to corresponding BlinkWorkItem.
  */
-static void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
 {
-       struct LED_871x  *pLed = (struct LED_871x *)data;
+       struct LED_871x  *pLed = from_timer(pLed, t, BlinkTimer);
 
        /* This fixed the crash problem on Fedora 12 when trying to do the
         * insmod;ifconfig up;rmmod commands.
index 16497202473fecde563f1da430e68b757a352a2f..aae868509e1302958bfcf13a029bd790f61692ec 100644 (file)
@@ -1164,7 +1164,7 @@ static void spkup_write(const u16 *in_buf, int count)
 static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
 
 static void read_all_doc(struct vc_data *vc);
-static void cursor_done(u_long data);
+static void cursor_done(struct timer_list *unused);
 static DEFINE_TIMER(cursor_timer, cursor_done);
 
 static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
@@ -1682,7 +1682,7 @@ static int speak_highlight(struct vc_data *vc)
        return 0;
 }
 
-static void cursor_done(u_long data)
+static void cursor_done(struct timer_list *unused)
 {
        struct vc_data *vc = vc_cons[cursor_con].d;
        unsigned long flags;
index 6ddd3fc3f08d15d5aff35e296ee13868eb1ecd74..aac29c816d09a54e58257abae1fb42624fb51d7b 100644 (file)
@@ -153,7 +153,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
 }
 EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart);
 
-static void thread_wake_up(u_long data)
+static void thread_wake_up(struct timer_list *unused)
 {
        wake_up_interruptible_all(&speakup_event);
 }
index b604d0cccef12f2bf70b130c7f32d4d356fd3baf..6cb6eb0673c6da0a2f03473e02bde28196a74a9c 100644 (file)
@@ -493,9 +493,9 @@ static const struct file_operations bus_info_debugfs_fops = {
        .release = single_release,
 };
 
-static void dev_periodic_work(unsigned long __opaque)
+static void dev_periodic_work(struct timer_list *t)
 {
-       struct visor_device *dev = (struct visor_device *)__opaque;
+       struct visor_device *dev = from_timer(dev, t, timer);
        struct visor_driver *drv = to_visor_driver(dev->device.driver);
 
        drv->channel_interrupt(dev);
@@ -667,7 +667,7 @@ int create_visor_device(struct visor_device *dev)
        dev->device.release = visorbus_release_device;
        /* keep a reference just for us (now 2) */
        get_device(&dev->device);
-       setup_timer(&dev->timer, dev_periodic_work, (unsigned long)dev);
+       timer_setup(&dev->timer, dev_periodic_work, 0);
        /*
         * bus_id must be a unique name with respect to this bus TYPE (NOT bus
         * instance).  That's why we need to include the bus number within the
index 735d7e5fa86b0143a9d88e42fcb20f33048d49f1..6d8239163ba55452e0e7a4708b14bd7ebec5c785 100644 (file)
@@ -1766,9 +1766,10 @@ static int visornic_poll(struct napi_struct *napi, int budget)
  * Main function of the vnic_incoming thread. Periodically check the response
  * queue and drain it if needed.
  */
-static void poll_for_irq(unsigned long v)
+static void poll_for_irq(struct timer_list *t)
 {
-       struct visornic_devdata *devdata = (struct visornic_devdata *)v;
+       struct visornic_devdata *devdata = from_timer(devdata, t,
+                                                     irq_poll_timer);
 
        if (!visorchannel_signalempty(
                                   devdata->dev->visorchannel,
@@ -1899,8 +1900,7 @@ static int visornic_probe(struct visor_device *dev)
        /* Let's start our threads to get responses */
        netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
 
-       setup_timer(&devdata->irq_poll_timer, poll_for_irq,
-                   (unsigned long)devdata);
+       timer_setup(&devdata->irq_poll_timer, poll_for_irq, 0);
        /* Note: This time has to start running before the while
         * loop below because the napi routine is responsible for
         * setting enab_dis_acked
index 8a275996d4e63a6eddccc6df30a152d72aab2813..028da1dc1b818380bd36c71b4cf403cce5523b45 100644 (file)
@@ -267,7 +267,7 @@ static void update_scan_time(void)
                last_scanned_shadow[i].time_scan = jiffies;
 }
 
-static void remove_network_from_shadow(unsigned long unused)
+static void remove_network_from_shadow(struct timer_list *unused)
 {
        unsigned long now = jiffies;
        int i, j;
@@ -292,7 +292,7 @@ static void remove_network_from_shadow(unsigned long unused)
        }
 }
 
-static void clear_duringIP(unsigned long arg)
+static void clear_duringIP(struct timer_list *unused)
 {
        wilc_optaining_ip = false;
 }
@@ -2278,8 +2278,8 @@ int wilc_init_host_int(struct net_device *net)
 
        priv = wdev_priv(net->ieee80211_ptr);
        if (op_ifcs == 0) {
-               setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
-               setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
+               timer_setup(&hAgingTimer, remove_network_from_shadow, 0);
+               timer_setup(&wilc_during_ip_timer, clear_duringIP, 0);
        }
        op_ifcs++;
 
index 90388698c222996d4d9db8901d876adb1cb0b700..417b9e66b0cd0b9017dfe67b3dd9bb60ae257e80 100644 (file)
@@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
        CSK_LOGIN_PDU_DONE,
        CSK_LOGIN_DONE,
        CSK_DDP_ENABLE,
+       CSK_ABORT_RPL_WAIT,
 };
 
 struct cxgbit_sock_common {
@@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
 int cxgbit_setup_conn_digest(struct cxgbit_sock *);
 int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
 void cxgbit_free_conn(struct iscsi_conn *);
 extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
 int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
index d4fa41be80f9a1719574af28c8981ef8e8d287ca..92eb57e2adaf555fbb4f1938eda4bc78366eb505 100644 (file)
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 }
 
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+       __kfree_skb(skb);
+
+       if (csk->com.state != CSK_STATE_ESTABLISHED)
+               goto no_abort;
+
+       set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+       csk->com.state = CSK_STATE_ABORTING;
+
+       cxgbit_send_abort_req(csk);
+
+       return;
+
+no_abort:
+       cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+       cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+       struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+       cxgbit_get_csk(csk);
+       cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+       spin_lock_bh(&csk->lock);
+       if (csk->lock_owner) {
+               cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+               __skb_queue_tail(&csk->backlogq, skb);
+       } else {
+               __cxgbit_abort_conn(csk, skb);
+       }
+       spin_unlock_bh(&csk->lock);
+
+       cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+                             csk->tid, 600, __func__);
+}
+
 void cxgbit_free_conn(struct iscsi_conn *conn)
 {
        struct cxgbit_sock *csk = conn->context;
@@ -1709,12 +1749,17 @@ rel_skb:
 
 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
 {
+       struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
        pr_debug("%s: csk %p; tid %u; state %d\n",
                 __func__, csk, csk->tid, csk->com.state);
 
        switch (csk->com.state) {
        case CSK_STATE_ABORTING:
                csk->com.state = CSK_STATE_DEAD;
+               if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+                       cxgbit_wake_up(&csk->com.wr_wait, __func__,
+                                      rpl->status);
                cxgbit_put_csk(csk);
                break;
        default:
index 5fdb57cac96874c18a87a0ec9ecfb0449ecc6aae..768cce0ccb807518f32b3d72d875915e9391fdfd 100644 (file)
@@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                        struct cxgbit_device *cdev = csk->com.cdev;
                        struct cxgbi_ppm *ppm = cdev2ppm(cdev);
 
+                       /* Abort the TCP conn if DDP is not complete to
+                        * avoid any possibility of DDP after freeing
+                        * the cmd.
+                        */
+                       if (unlikely(cmd->write_data_done !=
+                                    cmd->se_cmd.data_length))
+                               cxgbit_abort_conn(csk);
+
                        cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
 
                        dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
index 4fd775ace541a978b258fc8f3b26f3c8108c23f3..f3f8856bfb68e8446ad19a0bf8156217f42cacab 100644 (file)
@@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
        case CPL_RX_ISCSI_DDP:
        case CPL_FW4_ACK:
                lro_flush = false;
+               /* fall through */
        case CPL_ABORT_RPL_RSS:
        case CPL_PASS_ESTABLISH:
        case CPL_PEER_CLOSE:
index 9e67c7678c86d2af0aa4617a40686103efa55fd2..9eb10d34682cfb23dc65a00092f519d2f46657e4 100644 (file)
@@ -502,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 EXPORT_SYMBOL(iscsit_aborted_task);
 
 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
-                                     u32, u32, u8 *, u8 *);
+                                     u32, u32, const void *, void *);
 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
 
 static int
@@ -523,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
                iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                          ISCSI_HDR_LEN, 0, NULL,
-                                         (u8 *)header_digest);
+                                         header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -550,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                if (conn->conn_ops->DataDigest) {
                        iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                                  data_buf, data_buf_len,
-                                                 padding,
-                                                 (u8 *)&cmd->pad_bytes,
-                                                 (u8 *)&cmd->data_crc);
+                                                 padding, &cmd->pad_bytes,
+                                                 &cmd->data_crc);
 
                        iov[niov].iov_base = &cmd->data_crc;
                        iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -597,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
                iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
                                          ISCSI_HDR_LEN, 0, NULL,
-                                         (u8 *)header_digest);
+                                         header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -836,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
        unsigned char *buf)
 {
        struct iscsi_conn *conn;
+       const bool do_put = cmd->se_cmd.se_tfo != NULL;
 
        if (!cmd->conn) {
                pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -866,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
         * Perform the kref_put now if se_cmd has already been setup by
         * scsit_setup_scsi_cmd()
         */
-       if (cmd->se_cmd.se_tfo != NULL) {
+       if (do_put) {
                pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
                target_put_sess_cmd(&cmd->se_cmd);
        }
@@ -1410,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
        return data_crc;
 }
 
-static void iscsit_do_crypto_hash_buf(
-       struct ahash_request *hash,
-       const void *buf,
-       u32 payload_length,
-       u32 padding,
-       u8 *pad_bytes,
-       u8 *data_crc)
+static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
+       const void *buf, u32 payload_length, u32 padding,
+       const void *pad_bytes, void *data_crc)
 {
        struct scatterlist sg[2];
 
@@ -1462,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
        iscsit_mod_dataout_timer(cmd);
 
        if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
-               pr_err("DataOut Offset: %u, Length %u greater than"
-                       " iSCSI Command EDTL %u, protocol error.\n",
-                       hdr->offset, payload_length, cmd->se_cmd.data_length);
+               pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+                      be32_to_cpu(hdr->offset), payload_length,
+                      cmd->se_cmd.data_length);
                return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
        }
 
@@ -1878,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                }
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
-                                       ping_data, payload_length,
-                                       padding, cmd->pad_bytes,
-                                       (u8 *)&data_crc);
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
+                                                 payload_length, padding,
+                                                 cmd->pad_bytes, &data_crc);
 
                        if (checksum != data_crc) {
                                pr_err("Ping data CRC32C DataDigest"
@@ -1962,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        struct iscsi_tmr_req *tmr_req;
        struct iscsi_tm *hdr;
        int out_of_order_cmdsn = 0, ret;
-       bool sess_ref = false;
        u8 function, tcm_function = TMR_UNKNOWN;
 
        hdr                     = (struct iscsi_tm *) buf;
@@ -1995,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        cmd->data_direction = DMA_NONE;
        cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
-       if (!cmd->tmr_req)
+       if (!cmd->tmr_req) {
                return iscsit_add_reject_cmd(cmd,
                                             ISCSI_REASON_BOOKMARK_NO_RESOURCES,
                                             buf);
+       }
+
+       transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+                             conn->sess->se_sess, 0, DMA_NONE,
+                             TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+       target_get_sess_cmd(&cmd->se_cmd, true);
 
        /*
         * TASK_REASSIGN for ERL=2 / connection stays inside of
         * LIO-Target $FABRIC_MOD
         */
        if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
-               transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
-                                     conn->sess->se_sess, 0, DMA_NONE,
-                                     TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
-               target_get_sess_cmd(&cmd->se_cmd, true);
-               sess_ref = true;
                tcm_function = iscsit_convert_tmf(function);
                if (tcm_function == TMR_UNKNOWN) {
                        pr_err("Unknown iSCSI TMR Function:"
@@ -2101,12 +2096,14 @@ attach:
 
        if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
                int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
-               if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+               if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
                        out_of_order_cmdsn = 1;
-               else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+               } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+                       target_put_sess_cmd(&cmd->se_cmd);
                        return 0;
-               else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+               } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
                        return -1;
+               }
        }
        iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
@@ -2126,12 +2123,8 @@ attach:
         * For connection recovery, this is also the default action for
         * TMR TASK_REASSIGN.
         */
-       if (sess_ref) {
-               pr_debug("Handle TMR, using sess_ref=true check\n");
-               target_put_sess_cmd(&cmd->se_cmd);
-       }
-
        iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+       target_put_sess_cmd(&cmd->se_cmd);
        return 0;
 }
 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
@@ -2287,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                        goto reject;
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
-                                       text_in, payload_length,
-                                       padding, (u8 *)&pad_bytes,
-                                       (u8 *)&data_crc);
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
+                                                 payload_length, padding,
+                                                 &pad_bytes, &data_crc);
 
                        if (checksum != data_crc) {
                                pr_err("Text data CRC32C DataDigest"
@@ -3978,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                                return;
                        }
 
-                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
-                                       buffer, ISCSI_HDR_LEN,
-                                       0, NULL, (u8 *)&checksum);
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
+                                                 ISCSI_HDR_LEN, 0, NULL,
+                                                 &checksum);
 
                        if (digest != checksum) {
                                pr_err("HeaderDigest CRC32C failed,"
index 0dd4c45f7575a2795f3987dd874ac4891d93184f..0ebc4818e132ade606a77e8e46b46e183e111ddd 100644 (file)
@@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
 
        ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
        if (ret < 0)
-               return NULL;
+               goto free_out;
 
        ret = iscsit_tpg_add_portal_group(tiqn, tpg);
        if (ret != 0)
@@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
        return &tpg->tpg_se_tpg;
 out:
        core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
        kfree(tpg);
        return NULL;
 }
index 76184094a0cf944efc26c0aa32626b71e56b07b8..5efa42b939a104052f4fe0ea8ec94d09961cd8c0 100644 (file)
@@ -34,7 +34,7 @@
 #include "iscsi_target_erl2.h"
 #include "iscsi_target.h"
 
-#define OFFLOAD_BUF_SIZE       32768
+#define OFFLOAD_BUF_SIZE       32768U
 
 /*
  *     Used to dump excess datain payload for certain error recovery
@@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
        if (conn->sess->sess_ops->RDMAExtensions)
                return 0;
 
-       length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+       length = min(buf_len, OFFLOAD_BUF_SIZE);
 
        buf = kzalloc(length, GFP_ATOMIC);
        if (!buf) {
@@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
        memset(&iov, 0, sizeof(struct kvec));
 
        while (offset < buf_len) {
-               size = ((offset + length) > buf_len) ?
-                       (buf_len - offset) : length;
+               size = min(buf_len - offset, length);
 
                iov.iov_len = size;
                iov.iov_base = buf;
index caab1045742dfc659ac906abfffaa4c238f152e4..29a37b242d30a3f225f52ea9f34a00e412f02d95 100644 (file)
@@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
                char *key, *value;
                struct iscsi_param *param;
 
-               if (iscsi_extract_key_value(start, &key, &value) < 0) {
-                       kfree(tmpbuf);
-                       return -1;
-               }
+               if (iscsi_extract_key_value(start, &key, &value) < 0)
+                       goto free_buffer;
 
                pr_debug("Got key: %s=%s\n", key, value);
 
@@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
 
                param = iscsi_check_key(key, phase, sender, param_list);
                if (!param) {
-                       if (iscsi_add_notunderstood_response(key,
-                                       value, param_list) < 0) {
-                               kfree(tmpbuf);
-                               return -1;
-                       }
+                       if (iscsi_add_notunderstood_response(key, value,
+                                                            param_list) < 0)
+                               goto free_buffer;
+
                        start += strlen(key) + strlen(value) + 2;
                        continue;
                }
-               if (iscsi_check_value(param, value) < 0) {
-                       kfree(tmpbuf);
-                       return -1;
-               }
+               if (iscsi_check_value(param, value) < 0)
+                       goto free_buffer;
 
                start += strlen(key) + strlen(value) + 2;
 
                if (IS_PSTATE_PROPOSER(param)) {
-                       if (iscsi_check_proposer_state(param, value) < 0) {
-                               kfree(tmpbuf);
-                               return -1;
-                       }
+                       if (iscsi_check_proposer_state(param, value) < 0)
+                               goto free_buffer;
+
                        SET_PSTATE_RESPONSE_GOT(param);
                } else {
-                       if (iscsi_check_acceptor_state(param, value, conn) < 0) {
-                               kfree(tmpbuf);
-                               return -1;
-                       }
+                       if (iscsi_check_acceptor_state(param, value, conn) < 0)
+                               goto free_buffer;
+
                        SET_PSTATE_ACCEPTOR(param);
                }
        }
 
        kfree(tmpbuf);
        return 0;
+
+free_buffer:
+       kfree(tmpbuf);
+       return -1;
 }
 
 int iscsi_encode_text_output(
index e446a09c886b1a2ca1344d87f755806b237fe406..f65e5e584212faa2fbe360bb345d9c422acb4457 100644 (file)
@@ -25,8 +25,6 @@
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_seq_pdu_list.h"
 
-#define OFFLOAD_BUF_SIZE       32768
-
 #ifdef DEBUG
 static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
 {
index 594d07a1e995ec87d467f4286a1d8668f2e32e3d..4b34f71547c689e21a98530c8eb1417b0de20cc7 100644 (file)
@@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
         */
        param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
        if (!param)
-               goto out;
+               goto free_pl_out;
 
        if (iscsi_update_param_value(param, "CHAP,None") < 0)
-               goto out;
+               goto free_pl_out;
 
        tpg->tpg_attrib.authentication = 0;
 
@@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
        pr_debug("CORE[0] - Allocated Discovery TPG\n");
 
        return 0;
+free_pl_out:
+       iscsi_release_param_list(tpg->param_list);
 out:
        if (tpg->sid == 1)
                core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
        if (!tpg)
                return;
 
+       iscsi_release_param_list(tpg->param_list);
        core_tpg_deregister(&tpg->tpg_se_tpg);
 
        kfree(tpg);
index 54f20f184dd6b5c8422f8e72f81e173c70d5efd0..4435bf374d2d55fd79d9dc75dbd086a7ba6dbc28 100644 (file)
@@ -695,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
        struct iscsi_session *sess;
        struct se_cmd *se_cmd = &cmd->se_cmd;
 
+       WARN_ON(!list_empty(&cmd->i_conn_node));
+
        if (cmd->conn)
                sess = cmd->conn->sess;
        else
@@ -717,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
 {
        struct iscsi_conn *conn = cmd->conn;
 
+       WARN_ON(!list_empty(&cmd->i_conn_node));
+
        if (cmd->data_direction == DMA_TO_DEVICE) {
                iscsit_stop_dataout_timer(cmd);
                iscsit_free_r2ts_from_list(cmd);
index 928127642574b2d4b90592dfcd3688477a7d7a96..e46ca968009c06a2958e347104168cca32c37278 100644 (file)
@@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
 {
        unsigned char *md_buf;
        struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
-       char path[ALUA_METADATA_PATH_LEN];
+       char *path;
        int len, rc;
 
        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
@@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
                return -ENOMEM;
        }
 
-       memset(path, 0, ALUA_METADATA_PATH_LEN);
-
        len = snprintf(md_buf, ALUA_MD_BUF_LEN,
                        "tg_pt_gp_id=%hu\n"
                        "alua_access_state=0x%02x\n"
@@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
                        tg_pt_gp->tg_pt_gp_alua_access_state,
                        tg_pt_gp->tg_pt_gp_alua_access_status);
 
-       snprintf(path, ALUA_METADATA_PATH_LEN,
-               "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
-               config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
-       rc = core_alua_write_tpg_metadata(path, md_buf, len);
+       rc = -ENOMEM;
+       path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+                       &wwn->unit_serial[0],
+                       config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+       if (path) {
+               rc = core_alua_write_tpg_metadata(path, md_buf, len);
+               kfree(path);
+       }
        kfree(md_buf);
        return rc;
 }
@@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
 {
        struct se_portal_group *se_tpg = lun->lun_tpg;
        unsigned char *md_buf;
-       char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+       char *path;
        int len, rc;
 
        mutex_lock(&lun->lun_tg_pt_md_mutex);
@@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
                goto out_unlock;
        }
 
-       memset(path, 0, ALUA_METADATA_PATH_LEN);
-       memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
-
-       len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
-                       se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
-
-       if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
-               snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
-                               se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
                        "alua_tg_pt_status=0x%02x\n",
                        atomic_read(&lun->lun_tg_pt_secondary_offline),
                        lun->lun_tg_pt_secondary_stat);
 
-       snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
-                       db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
-                       lun->unpacked_lun);
+       if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+               path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+                               db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+                               se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+                               se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+                               lun->unpacked_lun);
+       } else {
+               path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+                               db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+                               se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+                               lun->unpacked_lun);
+       }
+       if (!path) {
+               rc = -ENOMEM;
+               goto out_free;
+       }
 
        rc = core_alua_write_tpg_metadata(path, md_buf, len);
+       kfree(path);
+out_free:
        kfree(md_buf);
-
 out_unlock:
        mutex_unlock(&lun->lun_tg_pt_md_mutex);
        return rc;
index 1902cb5c3b52c32de28290199012e9c2ae500e4a..fc9637cce82564dfb2d8b83b160cf1cd8c8b5760 100644 (file)
  */
 #define ALUA_DEFAULT_IMPLICIT_TRANS_SECS                       0
 #define ALUA_MAX_IMPLICIT_TRANS_SECS                   255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN                         512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN                        256
 
 /* Used by core_alua_update_tpg_(primary,secondary)_metadata */
 #define ALUA_MD_BUF_LEN                                        1024
index bd87cc26c6e500cdb813732c291f4d33bbc46964..72b1cd1bf9d9fdcfc64084f0177df66230e77450 100644 (file)
@@ -1611,12 +1611,12 @@ static match_table_t tokens = {
        {Opt_res_type, "res_type=%d"},
        {Opt_res_scope, "res_scope=%d"},
        {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
-       {Opt_mapped_lun, "mapped_lun=%lld"},
+       {Opt_mapped_lun, "mapped_lun=%u"},
        {Opt_target_fabric, "target_fabric=%s"},
        {Opt_target_node, "target_node=%s"},
        {Opt_tpgt, "tpgt=%d"},
        {Opt_port_rtpi, "port_rtpi=%d"},
-       {Opt_target_lun, "target_lun=%lld"},
+       {Opt_target_lun, "target_lun=%u"},
        {Opt_err, NULL}
 };
 
@@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
                        }
                        break;
                case Opt_sa_res_key:
-                       ret = kstrtoull(args->from, 0, &tmp_ll);
+                       ret = match_u64(args,  &tmp_ll);
                        if (ret < 0) {
                                pr_err("kstrtoull() failed for sa_res_key=\n");
                                goto out;
@@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
                        all_tg_pt = (int)arg;
                        break;
                case Opt_mapped_lun:
-                       ret = match_int(args, &arg);
+                       ret = match_u64(args, &tmp_ll);
                        if (ret)
                                goto out;
-                       mapped_lun = (u64)arg;
+                       mapped_lun = (u64)tmp_ll;
                        break;
                /*
                 * PR APTPL Metadata for Target Port
@@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
                                goto out;
                        break;
                case Opt_target_lun:
-                       ret = match_int(args, &arg);
+                       ret = match_u64(args, &tmp_ll);
                        if (ret)
                                goto out;
-                       target_lun = (u64)arg;
+                       target_lun = (u64)tmp_ll;
                        break;
                default:
                        break;
index e9e917cc6441913326316e75cc42dab99cf353c3..e1416b007aa43e0dcbd40d3b4d5e720c42e355c4 100644 (file)
@@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
        NULL,
 };
 
-extern struct configfs_item_operations target_core_dev_item_ops;
-
 static int target_fabric_port_link(
        struct config_item *lun_ci,
        struct config_item *se_dev_ci)
index c629817a8854bea49a18c6b3f93f3f8923f99079..9b2c0c773022c0013de3bbce849ca78af117b2c5 100644 (file)
@@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
        struct inode *inode = file->f_mapping->host;
        int ret;
 
+       if (!nolb) {
+               return 0;
+       }
+
        if (cmd->se_dev->dev_attrib.pi_prot_type) {
                ret = fd_do_prot_unmap(cmd, lba, nolb);
                if (ret)
index 18e3eb16e756735f7fe8028715c4f090979dcb2a..9384d19a7326c81274e589a58cd0a56b4f8bb98a 100644 (file)
@@ -89,6 +89,7 @@ int   target_for_each_device(int (*fn)(struct se_device *dev, void *data),
                               void *data);
 
 /* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
 void   target_setup_backend_cits(struct target_backend *);
 
 /* target_core_fabric_configfs.c */
index dd2cd8048582ce7520661b5ec3477a8e431b0f86..b024613f921718a40c761ea0f8eb7befe28dd168 100644 (file)
@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
        char *buf,
        u32 size)
 {
-       if (!pr_reg->isid_present_at_reg)
+       if (!pr_reg->isid_present_at_reg) {
                buf[0] = '\0';
+               return;
+       }
 
        snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
 }
@@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
                break;
        case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
                we = 1;
+               /* fall through */
        case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
                /*
                 * Some commands are only allowed for registered I_T Nexuses.
@@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
                break;
        case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
                we = 1;
+               /* fall through */
        case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
                /*
                 * Each registered I_T Nexus is a reservation holder.
@@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
 
        buf = transport_kmap_data_sg(cmd);
        if (!buf) {
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                goto out;
        }
 
@@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       ret = TCM_INVALID_PARAMETER_LIST;
+                       ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                        goto out_unmap;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
        struct t10_wwn *wwn = &dev->t10_wwn;
        struct file *file;
        int flags = O_RDWR | O_CREAT | O_TRUNC;
-       char path[512];
+       char *path;
        u32 pr_aptpl_buf_len;
        int ret;
        loff_t pos = 0;
 
-       memset(path, 0, 512);
-
-       if (strlen(&wwn->unit_serial[0]) >= 512) {
-               pr_err("WWN value for struct se_device does not fit"
-                       " into path buffer\n");
-               return -EMSGSIZE;
-       }
+       path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+                       &wwn->unit_serial[0]);
+       if (!path)
+               return -ENOMEM;
 
-       snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
        file = filp_open(path, flags, 0600);
        if (IS_ERR(file)) {
                pr_err("filp_open(%s) for APTPL metadata"
                        " failed\n", path);
+               kfree(path);
                return PTR_ERR(file);
        }
 
@@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
        if (ret < 0)
                pr_debug("Error writing APTPL metadata file: %s\n", path);
        fput(file);
+       kfree(path);
 
        return (ret < 0) ? -EIO : 0;
 }
@@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
                                        register_type, 0)) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               return TCM_INVALID_PARAMETER_LIST;
+                               return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                        }
                } else {
                        /*
@@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
         */
        buf = transport_kmap_data_sg(cmd);
        if (!buf) {
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                goto out_put_pr_reg;
        }
 
@@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
 
        buf = transport_kmap_data_sg(cmd);
        if (!buf) {
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                goto out_put_pr_reg;
        }
        proto_ident = (buf[24] & 0x0f);
@@ -3466,7 +3468,7 @@ after_iport_check:
                if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
                                        dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
                                        iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
-                       ret = TCM_INVALID_PARAMETER_LIST;
+                       ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                        goto out;
                }
                spin_lock(&dev->dev_reservation_lock);
@@ -3528,8 +3530,6 @@ after_iport_check:
 
        core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
 
-       transport_kunmap_data_sg(cmd);
-
        core_scsi3_put_pr_reg(dest_pr_reg);
        return 0;
 out:
@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                 * Set the ADDITIONAL DESCRIPTOR LENGTH
                 */
                put_unaligned_be32(desc_len, &buf[off]);
+               off += 4;
                /*
                 * Size of full desctipor header minus TransportID
                 * containing $FABRIC_MOD specific) initiator device/port
index 7c69b4a9694d2016a8aac3b63a4b7d4399146688..0d99b242e82e3f84da25a47564f96db60be4b5f5 100644 (file)
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                        " %d i: %d bio: %p, allocating another"
                                        " bio\n", bio->bi_vcnt, i, bio);
 
-                               rc = blk_rq_append_bio(req, bio);
+                               rc = blk_rq_append_bio(req, &bio);
                                if (rc) {
                                        pr_err("pSCSI: failed to append bio\n");
                                        goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        }
 
        if (bio) {
-               rc = blk_rq_append_bio(req, bio);
+               rc = blk_rq_append_bio(req, &bio);
                if (rc) {
                        pr_err("pSCSI: failed to append bio\n");
                        goto fail;
index e22847bd79b95b9bfb76a12a6747a0d5b913e44d..9c7bc1ca341a6821582b79e0916901346b841515 100644 (file)
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
                spin_unlock(&se_cmd->t_state_lock);
                return false;
        }
+       if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+               if (se_cmd->scsi_status) {
+                       pr_debug("Attempted to abort io tag: %llu early failure"
+                                " status: 0x%02x\n", se_cmd->tag,
+                                se_cmd->scsi_status);
+                       spin_unlock(&se_cmd->t_state_lock);
+                       return false;
+               }
+       }
        if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
                pr_debug("Attempted to abort io tag: %llu already shutdown,"
                        " skipping\n", se_cmd->tag);
@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
         * LUN_RESET tmr..
         */
        spin_lock_irqsave(&dev->se_tmr_lock, flags);
-       list_del_init(&tmr->tmr_list);
+       if (tmr)
+               list_del_init(&tmr->tmr_list);
        list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
                cmd = tmr_p->task_cmd;
                if (!cmd) {
index 836d552b0385e978bc1a0b98c59a3379c262fd61..58caacd54a3b2a650061d558097d1179e1031035 100644 (file)
@@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev, int err, bool write_pending);
-static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
@@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
        if (transport_cmd_check_stop_to_fabric(cmd))
                return 1;
        if (remove && ack_kref)
-               ret = transport_put_cmd(cmd);
+               ret = target_put_sess_cmd(cmd);
 
        return ret;
 }
@@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
 {
        int ret = 0, post_ret = 0;
 
-       if (transport_check_aborted_status(cmd, 1))
-               return;
-
        pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
                 sense_reason);
        target_show_cmd("-----[ ", cmd);
@@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
         * For SAM Task Attribute emulation for failed struct se_cmd
         */
        transport_complete_task_attr(cmd);
+
        /*
         * Handle special case for COMPARE_AND_WRITE failure, where the
         * callback is expected to drop the per device ->caw_sem.
@@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
             cmd->transport_complete_callback)
                cmd->transport_complete_callback(cmd, false, &post_ret);
 
+       if (transport_check_aborted_status(cmd, 1))
+               return;
+
        switch (sense_reason) {
        case TCM_NON_EXISTENT_LUN:
        case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
                break;
        case TCM_OUT_OF_RESOURCES:
-               sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               break;
+               cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+               goto queue_status;
        case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
@@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
                                               cmd->orig_fe_lun, 0x2C,
                                        ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
                }
-               trace_target_cmd_complete(cmd);
-               ret = cmd->se_tfo->queue_status(cmd);
-               if (ret)
-                       goto queue_full;
-               goto check_stop;
+
+               goto queue_status;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
                        cmd->t_task_cdb[0], sense_reason);
@@ -1816,6 +1813,11 @@ check_stop:
        transport_cmd_check_stop_to_fabric(cmd);
        return;
 
+queue_status:
+       trace_target_cmd_complete(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (!ret)
+               goto check_stop;
 queue_full:
        transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
@@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
        }
 
        cmd->t_state = TRANSPORT_PROCESSING;
+       cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
        cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
        spin_unlock_irq(&cmd->t_state_lock);
 
@@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
                list_del(&cmd->se_delayed_node);
                spin_unlock(&dev->delayed_cmd_lock);
 
+               cmd->transport_state |= CMD_T_SENT;
+
                __target_execute_cmd(cmd, true);
 
                if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
                         dev->dev_cur_ordered_id);
        }
+       cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
 restart:
        target_restart_delayed_cmds(dev);
 }
@@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
                        ret = cmd->se_tfo->queue_data_in(cmd);
                        break;
                }
-               /* Fall through for DMA_TO_DEVICE */
+               /* fall through */
        case DMA_NONE:
 queue_status:
                trace_target_cmd_complete(cmd);
@@ -2268,7 +2275,7 @@ queue_rsp:
                                goto queue_full;
                        break;
                }
-               /* Fall through for DMA_TO_DEVICE */
+               /* fall through */
        case DMA_NONE:
 queue_status:
                trace_target_cmd_complete(cmd);
@@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
        cmd->t_bidi_data_nents = 0;
 }
 
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd:       command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
-       BUG_ON(!cmd->se_tfo);
-       /*
-        * If this cmd has been setup with target_get_sess_cmd(), drop
-        * the kref and call ->release_cmd() in kref callback.
-        */
-       return target_put_sess_cmd(cmd);
-}
-
 void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
        struct scatterlist *sg = cmd->t_data_sg;
@@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
 
 static void transport_write_pending_qf(struct se_cmd *cmd)
 {
+       unsigned long flags;
        int ret;
+       bool stop;
+
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+       if (stop) {
+               pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+                       __func__, __LINE__, cmd->tag);
+               complete_all(&cmd->t_transport_stop_comp);
+               return;
+       }
 
        ret = cmd->se_tfo->write_pending(cmd);
        if (ret) {
@@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
                        target_wait_free_cmd(cmd, &aborted, &tas);
 
                if (!aborted || tas)
-                       ret = transport_put_cmd(cmd);
+                       ret = target_put_sess_cmd(cmd);
        } else {
                if (wait_for_tasks)
                        target_wait_free_cmd(cmd, &aborted, &tas);
@@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
                        transport_lun_remove_cmd(cmd);
 
                if (!aborted || tas)
-                       ret = transport_put_cmd(cmd);
+                       ret = target_put_sess_cmd(cmd);
        }
        /*
         * If the task has been internally aborted due to TMR ABORT_TASK
@@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
                ret = -ESHUTDOWN;
                goto out;
        }
+       se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
        list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
 out:
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
                .key = NOT_READY,
                .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
        },
+       [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+               /*
+                * From spc4r22 section5.7.7,5.7.8
+                * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+                * or a REGISTER AND IGNORE EXISTING KEY service action or
+                * REGISTER AND MOVE service actionis attempted,
+                * but there are insufficient device server resources to complete the
+                * operation, then the command shall be terminated with CHECK CONDITION
+                * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+                * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+                */
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x55,
+               .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+       },
 };
 
 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
index 9469695f5871aea064bea2e4b4f65e32742c3cb4..a415d87f22d24237f1ae67539cfbb91a33ddbc9d 100644 (file)
@@ -150,6 +150,8 @@ struct tcmu_dev {
        wait_queue_head_t nl_cmd_wq;
 
        char dev_config[TCMU_CONFIG_LEN];
+
+       int nl_reply_supported;
 };
 
 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
        struct se_device *se_dev = se_cmd->se_dev;
        struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
-       int cmd_id;
 
        tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
        if (!tcmu_cmd)
@@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
 
        tcmu_cmd->se_cmd = se_cmd;
        tcmu_cmd->tcmu_dev = udev;
-       if (udev->cmd_time_out)
-               tcmu_cmd->deadline = jiffies +
-                                       msecs_to_jiffies(udev->cmd_time_out);
 
        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
        tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
@@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
                return NULL;
        }
 
-       idr_preload(GFP_KERNEL);
-       spin_lock_irq(&udev->commands_lock);
-       cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
-               USHRT_MAX, GFP_NOWAIT);
-       spin_unlock_irq(&udev->commands_lock);
-       idr_preload_end();
-
-       if (cmd_id < 0) {
-               tcmu_free_cmd(tcmu_cmd);
-               return NULL;
-       }
-       tcmu_cmd->cmd_id = cmd_id;
-
        return tcmu_cmd;
 }
 
@@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
        return command_size;
 }
 
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+{
+       struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+       unsigned long tmo = udev->cmd_time_out;
+       int cmd_id;
+
+       if (tcmu_cmd->cmd_id)
+               return 0;
+
+       cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+       if (cmd_id < 0) {
+               pr_err("tcmu: Could not allocate cmd id.\n");
+               return cmd_id;
+       }
+       tcmu_cmd->cmd_id = cmd_id;
+
+       if (!tmo)
+               return 0;
+
+       tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+       mod_timer(&udev->timeout, tcmu_cmd->deadline);
+       return 0;
+}
+
 static sense_reason_t
 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 {
@@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        entry = (void *) mb + CMDR_OFF + cmd_head;
        memset(entry, 0, command_size);
        tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
-       entry->hdr.cmd_id = tcmu_cmd->cmd_id;
 
        /* Handle allocating space from the data area */
        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        }
        entry->req.iov_bidi_cnt = iov_cnt;
 
+       ret = tcmu_setup_cmd_timer(tcmu_cmd);
+       if (ret) {
+               tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+               mutex_unlock(&udev->cmdr_lock);
+               return TCM_OUT_OF_RESOURCES;
+       }
+       entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
        /*
         * Recalaulate the command's base size and size according
         * to the actual needs
@@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 static sense_reason_t
 tcmu_queue_cmd(struct se_cmd *se_cmd)
 {
-       struct se_device *se_dev = se_cmd->se_dev;
-       struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
        sense_reason_t ret;
 
@@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
        ret = tcmu_queue_cmd_ring(tcmu_cmd);
        if (ret != TCM_NO_SENSE) {
                pr_err("TCMU: Could not queue command\n");
-               spin_lock_irq(&udev->commands_lock);
-               idr_remove(&udev->commands, tcmu_cmd->cmd_id);
-               spin_unlock_irq(&udev->commands_lock);
 
                tcmu_free_cmd(tcmu_cmd);
        }
@@ -1044,9 +1055,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
        return 0;
 }
 
-static void tcmu_device_timedout(unsigned long data)
+static void tcmu_device_timedout(struct timer_list *t)
 {
-       struct tcmu_dev *udev = (struct tcmu_dev *)data;
+       struct tcmu_dev *udev = from_timer(udev, t, timeout);
        unsigned long flags;
 
        spin_lock_irqsave(&udev->commands_lock, flags);
@@ -1106,12 +1117,13 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        idr_init(&udev->commands);
        spin_lock_init(&udev->commands_lock);
 
-       setup_timer(&udev->timeout, tcmu_device_timedout,
-               (unsigned long)udev);
+       timer_setup(&udev->timeout, tcmu_device_timedout, 0);
 
        init_waitqueue_head(&udev->nl_cmd_wq);
        spin_lock_init(&udev->nl_cmd_lock);
 
+       INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+
        return &udev->se_dev;
 }
 
@@ -1280,10 +1292,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
        kfree(udev);
 }
 
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+               kmem_cache_free(tcmu_cmd_cache, cmd);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static void tcmu_blocks_release(struct tcmu_dev *udev)
+{
+       int i;
+       struct page *page;
+
+       /* Try to release all block pages */
+       mutex_lock(&udev->cmdr_lock);
+       for (i = 0; i <= udev->dbi_max; i++) {
+               page = radix_tree_delete(&udev->data_blocks, i);
+               if (page) {
+                       __free_page(page);
+                       atomic_dec(&global_db_count);
+               }
+       }
+       mutex_unlock(&udev->cmdr_lock);
+}
+
 static void tcmu_dev_kref_release(struct kref *kref)
 {
        struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
        struct se_device *dev = &udev->se_dev;
+       struct tcmu_cmd *cmd;
+       bool all_expired = true;
+       int i;
+
+       vfree(udev->mb_addr);
+       udev->mb_addr = NULL;
+
+       /* Upper layer should drain all requests before calling this */
+       spin_lock_irq(&udev->commands_lock);
+       idr_for_each_entry(&udev->commands, cmd, i) {
+               if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+                       all_expired = false;
+       }
+       idr_destroy(&udev->commands);
+       spin_unlock_irq(&udev->commands_lock);
+       WARN_ON(!all_expired);
+
+       tcmu_blocks_release(udev);
 
        call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
 }
@@ -1306,6 +1362,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
 
        if (!tcmu_kern_cmd_reply_supported)
                return;
+
+       if (udev->nl_reply_supported <= 0)
+               return;
+
 relock:
        spin_lock(&udev->nl_cmd_lock);
 
@@ -1332,6 +1392,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
        if (!tcmu_kern_cmd_reply_supported)
                return 0;
 
+       if (udev->nl_reply_supported <= 0)
+               return 0;
+
        pr_debug("sleeping for nl reply\n");
        wait_for_completion(&nl_cmd->complete);
 
@@ -1476,8 +1539,6 @@ static int tcmu_configure_device(struct se_device *dev)
        WARN_ON(udev->data_size % PAGE_SIZE);
        WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
 
-       INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
-
        info->version = __stringify(TCMU_MAILBOX_VERSION);
 
        info->mem[0].name = "tcm-user command & data buffer";
@@ -1506,6 +1567,12 @@ static int tcmu_configure_device(struct se_device *dev)
                dev->dev_attrib.emulate_write_cache = 0;
        dev->dev_attrib.hw_queue_depth = 128;
 
+       /* If user didn't explicitly disable netlink reply support, use
+        * module scope setting.
+        */
+       if (udev->nl_reply_supported >= 0)
+               udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
        /*
         * Get a ref incase userspace does a close on the uio device before
         * LIO has initiated tcmu_free_device.
@@ -1527,6 +1594,7 @@ err_netlink:
        uio_unregister_device(&udev->uio_info);
 err_register:
        vfree(udev->mb_addr);
+       udev->mb_addr = NULL;
 err_vzalloc:
        kfree(info->name);
        info->name = NULL;
@@ -1534,37 +1602,11 @@ err_vzalloc:
        return ret;
 }
 
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
-       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
-               kmem_cache_free(tcmu_cmd_cache, cmd);
-               return 0;
-       }
-       return -EINVAL;
-}
-
 static bool tcmu_dev_configured(struct tcmu_dev *udev)
 {
        return udev->uio_info.uio_dev ? true : false;
 }
 
-static void tcmu_blocks_release(struct tcmu_dev *udev)
-{
-       int i;
-       struct page *page;
-
-       /* Try to release all block pages */
-       mutex_lock(&udev->cmdr_lock);
-       for (i = 0; i <= udev->dbi_max; i++) {
-               page = radix_tree_delete(&udev->data_blocks, i);
-               if (page) {
-                       __free_page(page);
-                       atomic_dec(&global_db_count);
-               }
-       }
-       mutex_unlock(&udev->cmdr_lock);
-}
-
 static void tcmu_free_device(struct se_device *dev)
 {
        struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1576,9 +1618,6 @@ static void tcmu_free_device(struct se_device *dev)
 static void tcmu_destroy_device(struct se_device *dev)
 {
        struct tcmu_dev *udev = TCMU_DEV(dev);
-       struct tcmu_cmd *cmd;
-       bool all_expired = true;
-       int i;
 
        del_timer_sync(&udev->timeout);
 
@@ -1586,20 +1625,6 @@ static void tcmu_destroy_device(struct se_device *dev)
        list_del(&udev->node);
        mutex_unlock(&root_udev_mutex);
 
-       vfree(udev->mb_addr);
-
-       /* Upper layer should drain all requests before calling this */
-       spin_lock_irq(&udev->commands_lock);
-       idr_for_each_entry(&udev->commands, cmd, i) {
-               if (tcmu_check_and_free_pending_cmd(cmd) != 0)
-                       all_expired = false;
-       }
-       idr_destroy(&udev->commands);
-       spin_unlock_irq(&udev->commands_lock);
-       WARN_ON(!all_expired);
-
-       tcmu_blocks_release(udev);
-
        tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
 
        uio_unregister_device(&udev->uio_info);
@@ -1610,7 +1635,7 @@ static void tcmu_destroy_device(struct se_device *dev)
 
 enum {
        Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
-       Opt_err,
+       Opt_nl_reply_supported, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -1618,6 +1643,7 @@ static match_table_t tokens = {
        {Opt_dev_size, "dev_size=%u"},
        {Opt_hw_block_size, "hw_block_size=%u"},
        {Opt_hw_max_sectors, "hw_max_sectors=%u"},
+       {Opt_nl_reply_supported, "nl_reply_supported=%d"},
        {Opt_err, NULL}
 };
 
@@ -1692,6 +1718,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                        ret = tcmu_set_dev_attrib(&args[0],
                                        &(dev->dev_attrib.hw_max_sectors));
                        break;
+               case Opt_nl_reply_supported:
+                       arg_p = match_strdup(&args[0]);
+                       if (!arg_p) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
+                       kfree(arg_p);
+                       if (ret < 0)
+                               pr_err("kstrtoint() failed for nl_reply_supported=\n");
+                       break;
                default:
                        break;
                }
@@ -1734,8 +1771,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
 {
        struct se_dev_attrib *da = container_of(to_config_group(item),
                                        struct se_dev_attrib, da_group);
-       struct tcmu_dev *udev = container_of(da->da_dev,
-                                       struct tcmu_dev, se_dev);
+       struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
 
        return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
 }
@@ -1842,6 +1878,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
 }
 CONFIGFS_ATTR(tcmu_, dev_size);
 
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+               char *page)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                               struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+       return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                               struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+       s8 val;
+       int ret;
+
+       ret = kstrtos8(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       udev->nl_reply_supported = val;
+       return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
                                             char *page)
 {
@@ -1884,6 +1948,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
        &tcmu_attr_dev_config,
        &tcmu_attr_dev_size,
        &tcmu_attr_emulate_write_cache,
+       &tcmu_attr_nl_reply_supported,
        NULL,
 };
 
index 7952357df9c862c9d9558522c3664d3d5059a572..edb6e4e9ef3acb8d18fcf9de4280e10fcbda5f14 100644 (file)
@@ -590,7 +590,6 @@ static int __init optee_driver_init(void)
                return -ENODEV;
 
        np = of_find_matching_node(fw_np, optee_match);
-       of_node_put(fw_np);
        if (!np)
                return -ENODEV;
 
index 5d442469c95e94a1ea1e5c634eb6bb62a92d48b7..cf0bde3bb927439a126913935d89bcdbc5fb6941 100644 (file)
@@ -279,7 +279,7 @@ static unsigned detect_isa_irq(void __iomem *);
 #endif                         /* CONFIG_ISA */
 
 #ifndef CONFIG_CYZ_INTR
-static void cyz_poll(unsigned long);
+static void cyz_poll(struct timer_list *);
 
 /* The Cyclades-Z polling cycle is defined by this variable */
 static long cyz_polling_cycle = CZ_DEF_POLL;
@@ -1214,7 +1214,7 @@ static void cyz_rx_restart(struct timer_list *t)
 
 #else                          /* CONFIG_CYZ_INTR */
 
-static void cyz_poll(unsigned long arg)
+static void cyz_poll(struct timer_list *unused)
 {
        struct cyclades_card *cinfo;
        struct cyclades_port *info;
index a6b8240af6cdd6dcfb6bbb07e39e39bc13bf4f7b..b0baa4ce10f9897d5f2284c7aa6e50c6dd5bb7de 100644 (file)
@@ -33,7 +33,7 @@ static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
                                         unsigned int address,
                                         const unsigned char *data, int len,
                                         int is_last);
-static void ipwireless_setup_timer(unsigned long data);
+static void ipwireless_setup_timer(struct timer_list *t);
 static void handle_received_CTRL_packet(struct ipw_hardware *hw,
                unsigned int channel_idx, const unsigned char *data, int len);
 
@@ -1635,8 +1635,7 @@ struct ipw_hardware *ipwireless_hardware_create(void)
        spin_lock_init(&hw->lock);
        tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
        INIT_WORK(&hw->work_rx, ipw_receive_data_work);
-       setup_timer(&hw->setup_timer, ipwireless_setup_timer,
-                       (unsigned long) hw);
+       timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
 
        return hw;
 }
@@ -1670,12 +1669,12 @@ void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
        hw->init_loops = 0;
        printk(KERN_INFO IPWIRELESS_PCCARD_NAME
               ": waiting for card to start up...\n");
-       ipwireless_setup_timer((unsigned long) hw);
+       ipwireless_setup_timer(&hw->setup_timer);
 }
 
-static void ipwireless_setup_timer(unsigned long data)
+static void ipwireless_setup_timer(struct timer_list *t)
 {
-       struct ipw_hardware *hw = (struct ipw_hardware *) data;
+       struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
 
        hw->init_loops++;
 
index ee7958ab269f901fb6e5ae44d285a3e7f7a0bbaa..015686ff48255fedbf0705111f9abc2afd99e63b 100644 (file)
@@ -170,7 +170,7 @@ static struct pci_driver isicom_driver = {
 static int prev_card = 3;      /*      start servicing isi_card[0]     */
 static struct tty_driver *isicom_normal;
 
-static void isicom_tx(unsigned long _data);
+static void isicom_tx(struct timer_list *unused);
 static void isicom_start(struct tty_struct *tty);
 
 static DEFINE_TIMER(tx, isicom_tx);
@@ -394,7 +394,7 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
  *     will do the rest of the work for us.
  */
 
-static void isicom_tx(unsigned long _data)
+static void isicom_tx(struct timer_list *unused)
 {
        unsigned long flags, base;
        unsigned int retries;
index 65a70f3c7cde2b2472e39d795fd65ef81f521648..68cbc03aab4b86e49a3f1f8904de52ef91f4b5c3 100644 (file)
@@ -198,7 +198,7 @@ static void moxa_hangup(struct tty_struct *);
 static int moxa_tiocmget(struct tty_struct *tty);
 static int moxa_tiocmset(struct tty_struct *tty,
                         unsigned int set, unsigned int clear);
-static void moxa_poll(unsigned long);
+static void moxa_poll(struct timer_list *);
 static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
 static void moxa_shutdown(struct tty_port *);
 static int moxa_carrier_raised(struct tty_port *);
@@ -1429,7 +1429,7 @@ put:
        return 0;
 }
 
-static void moxa_poll(unsigned long ignored)
+static void moxa_poll(struct timer_list *unused)
 {
        struct moxa_board_conf *brd;
        u16 __iomem *ip;
index 3a39eb685c693d4f3a85df528a0a3cded9be442b..5131bdc9e765037f882203f1b9b6afa8bf01656c 100644 (file)
@@ -1310,9 +1310,9 @@ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
  *     gsm->pending_cmd will be NULL and we just let the timer expire.
  */
 
-static void gsm_control_retransmit(unsigned long data)
+static void gsm_control_retransmit(struct timer_list *t)
 {
-       struct gsm_mux *gsm = (struct gsm_mux *)data;
+       struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
        struct gsm_control *ctrl;
        unsigned long flags;
        spin_lock_irqsave(&gsm->control_lock, flags);
@@ -1453,9 +1453,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
  *     end will get a DM response)
  */
 
-static void gsm_dlci_t1(unsigned long data)
+static void gsm_dlci_t1(struct timer_list *t)
 {
-       struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+       struct gsm_dlci *dlci = from_timer(dlci, t, t1);
        struct gsm_mux *gsm = dlci->gsm;
 
        switch (dlci->state) {
@@ -1634,7 +1634,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
        }
 
        skb_queue_head_init(&dlci->skb_list);
-       setup_timer(&dlci->t1, gsm_dlci_t1, (unsigned long)dlci);
+       timer_setup(&dlci->t1, gsm_dlci_t1, 0);
        tty_port_init(&dlci->port);
        dlci->port.ops = &gsm_port_ops;
        dlci->gsm = gsm;
@@ -2088,7 +2088,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
        struct gsm_dlci *dlci;
        int i = 0;
 
-       setup_timer(&gsm->t2_timer, gsm_control_retransmit, (unsigned long)gsm);
+       timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
        init_waitqueue_head(&gsm->event);
        spin_lock_init(&gsm->control_lock);
        spin_lock_init(&gsm->tx_lock);
index 9f246d4db3caa605c8811df37c27bf098fd17e6f..30bb0900cd2f5b5c83089202b62af6add9c45e25 100644 (file)
@@ -115,7 +115,7 @@ static void retry_transmit(struct r3964_info *pInfo);
 static void transmit_block(struct r3964_info *pInfo);
 static void receive_char(struct r3964_info *pInfo, const unsigned char c);
 static void receive_error(struct r3964_info *pInfo, const char flag);
-static void on_timeout(unsigned long priv);
+static void on_timeout(struct timer_list *t);
 static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
 static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
                unsigned char __user * buf);
@@ -688,9 +688,9 @@ static void receive_error(struct r3964_info *pInfo, const char flag)
        }
 }
 
-static void on_timeout(unsigned long priv)
+static void on_timeout(struct timer_list *t)
 {
-       struct r3964_info *pInfo = (void *)priv;
+       struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
 
        switch (pInfo->state) {
        case R3964_TX_REQUEST:
@@ -993,7 +993,7 @@ static int r3964_open(struct tty_struct *tty)
        tty->disc_data = pInfo;
        tty->receive_room = 65536;
 
-       setup_timer(&pInfo->tmr, on_timeout, (unsigned long)pInfo);
+       timer_setup(&pInfo->tmr, on_timeout, 0);
 
        return 0;
 }
index f7dc9b1ea806857cdc3083ed5d9b09aeef43a18c..bdd17d2aaafd957d81b382eb16e8fa1f814bb004 100644 (file)
@@ -86,7 +86,7 @@
 
 /****** RocketPort Local Variables ******/
 
-static void rp_do_poll(unsigned long dummy);
+static void rp_do_poll(struct timer_list *unused);
 
 static struct tty_driver *rocket_driver;
 
@@ -525,7 +525,7 @@ static void rp_handle_port(struct r_port *info)
 /*
  *  The top level polling routine.  Repeats every 1/100 HZ (10ms).
  */
-static void rp_do_poll(unsigned long dummy)
+static void rp_do_poll(struct timer_list *unused)
 {
        CONTROLLER_t *ctlp;
        int ctrl, aiop, ch, line;
index ce7ad0acee7aa784772c77339130a3d6289641cd..247788a16f0b62f74f86d0537d51ce0358bdb61d 100644 (file)
@@ -27,23 +27,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
+       int ret;
 
        if (!test_bit(SERPORT_ACTIVE, &serport->flags))
                return 0;
 
-       return serdev_controller_receive_buf(ctrl, cp, count);
+       ret = serdev_controller_receive_buf(ctrl, cp, count);
+
+       dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
+                               "receive_buf returns %d (count = %zu)\n",
+                               ret, count);
+       if (ret < 0)
+               return 0;
+       else if (ret > count)
+               return count;
+
+       return ret;
 }
 
 static void ttyport_write_wakeup(struct tty_port *port)
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
+       struct tty_struct *tty;
+
+       tty = tty_port_tty_get(port);
+       if (!tty)
+               return;
 
-       if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) &&
+       if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
            test_bit(SERPORT_ACTIVE, &serport->flags))
                serdev_controller_write_wakeup(ctrl);
 
-       wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT);
+       wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+
+       tty_kref_put(tty);
 }
 
 static const struct tty_port_client_operations client_ops = {
@@ -136,8 +154,10 @@ static void ttyport_close(struct serdev_controller *ctrl)
 
        clear_bit(SERPORT_ACTIVE, &serport->flags);
 
+       tty_lock(tty);
        if (tty->ops->close)
                tty->ops->close(tty, NULL);
+       tty_unlock(tty);
 
        tty_release_struct(tty, serport->tty_idx);
 }
index d64afdd93872c28580f55c4c120ce117dd530d69..9342fc2ee7dfe292bee10f607434055504401b28 100644 (file)
@@ -325,7 +325,7 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
        if (up->bugs & UART_BUG_THRE) {
                pr_debug("ttyS%d - using backup timer\n", serial_index(port));
 
-               up->timer.function = (TIMER_FUNC_TYPE)serial8250_backup_timeout;
+               up->timer.function = serial8250_backup_timeout;
                mod_timer(&up->timer, jiffies +
                          uart_poll_timeout(port) + HZ / 5);
        }
@@ -348,7 +348,7 @@ static void univ8250_release_irq(struct uart_8250_port *up)
        struct uart_port *port = &up->port;
 
        del_timer_sync(&up->timer);
-       up->timer.function = (TIMER_FUNC_TYPE)serial8250_timeout;
+       up->timer.function = serial8250_timeout;
        if (port->irq)
                serial_unlink_irq_chain(up);
 }
index 362c25ff188a549f5d2e111a8c4ef99248a5b920..ae6a256524d8b618d4116fde8a66518e4e93cce4 100644 (file)
@@ -122,12 +122,14 @@ static void __init init_port(struct earlycon_device *device)
        serial8250_early_out(port, UART_FCR, 0);        /* no fifo */
        serial8250_early_out(port, UART_MCR, 0x3);      /* DTR + RTS */
 
-       divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
-       c = serial8250_early_in(port, UART_LCR);
-       serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
-       serial8250_early_out(port, UART_DLL, divisor & 0xff);
-       serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
-       serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+       if (port->uartclk && device->baud) {
+               divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
+               c = serial8250_early_in(port, UART_LCR);
+               serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
+               serial8250_early_out(port, UART_DLL, divisor & 0xff);
+               serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
+               serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+       }
 }
 
 int __init early_serial8250_setup(struct earlycon_device *device,
index b7e0e34166414fafa3c6e705cce6bd6e952f1c31..54adf8d563501ab844cea41edf0427fdb83a1a8e 100644 (file)
@@ -5135,6 +5135,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
        { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
        { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
 
+       /* Amazon PCI serial device */
+       { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
+
        /*
         * These entries match devices with class COMMUNICATION_SERIAL,
         * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
index 1421804975e0b08115232b3c0d7d8430fa46bf24..c9458a033e3cc0ca325e84e1c1fe4778086e9d09 100644 (file)
@@ -2059,7 +2059,7 @@ static void flush_timeout_function(unsigned long data)
 static struct timer_list flush_timer;
 
 static void
-timed_flush_handler(unsigned long ptr)
+timed_flush_handler(struct timer_list *unused)
 {
        struct e100_serial *info;
        int i;
@@ -4137,7 +4137,7 @@ static int __init rs_init(void)
        /* Setup the timed flush handler system */
 
 #if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
-       setup_timer(&flush_timer, timed_flush_handler, 0);
+       timer_setup(&flush_timer, timed_flush_handler, 0);
        mod_timer(&flush_timer, jiffies + 5);
 #endif
 
index c84e6f0db54e16662b0a258af90bdad3af280ee9..1c4d3f38713863f2ebc2e5e7f13ff5f38ac0715d 100644 (file)
@@ -966,9 +966,9 @@ static void lpuart_dma_rx_complete(void *arg)
        lpuart_copy_rx_to_tty(sport);
 }
 
-static void lpuart_timer_func(unsigned long data)
+static void lpuart_timer_func(struct timer_list *t)
 {
-       struct lpuart_port *sport = (struct lpuart_port *)data;
+       struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
 
        lpuart_copy_rx_to_tty(sport);
 }
@@ -1263,8 +1263,7 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
 
 static void rx_dma_timer_init(struct lpuart_port *sport)
 {
-               setup_timer(&sport->lpuart_timer, lpuart_timer_func,
-                               (unsigned long)sport);
+               timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
                sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
                add_timer(&sport->lpuart_timer);
 }
index 473f4f81d690c6ab21a768c0b0745347fc4d9aa5..ffefd218761e04c633171b3f1b14151e7f05b721 100644 (file)
@@ -263,9 +263,9 @@ static void mrdy_assert(struct ifx_spi_device *ifx_dev)
  *     The SPI has timed out: hang up the tty. Users will then see a hangup
  *     and error events.
  */
-static void ifx_spi_timeout(unsigned long arg)
+static void ifx_spi_timeout(struct timer_list *t)
 {
-       struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+       struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
 
        dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
        tty_port_tty_hangup(&ifx_dev->tty_port, false);
@@ -1016,8 +1016,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
        spin_lock_init(&ifx_dev->write_lock);
        spin_lock_init(&ifx_dev->power_lock);
        ifx_dev->power_status = 0;
-       setup_timer(&ifx_dev->spi_timer, ifx_spi_timeout,
-                   (unsigned long)ifx_dev);
+       timer_setup(&ifx_dev->spi_timer, ifx_spi_timeout, 0);
        ifx_dev->modem = pl_data->modem_type;
        ifx_dev->use_dma = pl_data->use_dma;
        ifx_dev->max_hz = pl_data->max_hz;
index a67a606c38eb0066565c9e1f8fb26ed9cb744980..e4b3d9123a03312b3985e8f1db7840e13e6a38e3 100644 (file)
@@ -906,9 +906,9 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
  * This is our per-port timeout handler, for checking the
  * modem status signals.
  */
-static void imx_timeout(unsigned long data)
+static void imx_timeout(struct timer_list *t)
 {
-       struct imx_port *sport = (struct imx_port *)data;
+       struct imx_port *sport = from_timer(sport, t, timer);
        unsigned long flags;
 
        if (sport->port.state) {
@@ -2082,7 +2082,7 @@ static int serial_imx_probe(struct platform_device *pdev)
        sport->port.rs485_config = imx_rs485_config;
        sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
        sport->port.flags = UPF_BOOT_AUTOCONF;
-       setup_timer(&sport->timer, imx_timeout, (unsigned long)sport);
+       timer_setup(&sport->timer, imx_timeout, 0);
 
        sport->gpios = mctrl_gpio_init(&sport->port, 0);
        if (IS_ERR(sport->gpios))
index ed2b0305862727296d5d5f8109a4a43022cda761..4029272891f9d6e4385d01b03dbb73b53d7026e5 100644 (file)
@@ -188,9 +188,9 @@ bool kgdb_nmi_poll_knock(void)
  * The tasklet is cheap, it does not cause wakeups when reschedules itself,
  * instead it waits for the next tick.
  */
-static void kgdb_nmi_tty_receiver(unsigned long data)
+static void kgdb_nmi_tty_receiver(struct timer_list *t)
 {
-       struct kgdb_nmi_tty_priv *priv = (void *)data;
+       struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
        char ch;
 
        priv->timer.expires = jiffies + (HZ/100);
@@ -241,7 +241,7 @@ static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
                return -ENOMEM;
 
        INIT_KFIFO(priv->fifo);
-       setup_timer(&priv->timer, kgdb_nmi_tty_receiver, (unsigned long)priv);
+       timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
        tty_port_init(&priv->port);
        priv->port.ops = &kgdb_nmi_tty_port_ops;
        tty->driver_data = priv;
index 27d6049eb6a9a210b7317f40ea1a7d92118cab23..371569a0fd00a8161f76ac715a734368e2419e60 100644 (file)
@@ -178,9 +178,9 @@ static void max3100_dowork(struct max3100_port *s)
                queue_work(s->workqueue, &s->work);
 }
 
-static void max3100_timeout(unsigned long data)
+static void max3100_timeout(struct timer_list *t)
 {
-       struct max3100_port *s = (struct max3100_port *)data;
+       struct max3100_port *s = from_timer(s, t, timer);
 
        if (s->port.state) {
                max3100_dowork(s);
@@ -780,8 +780,7 @@ static int max3100_probe(struct spi_device *spi)
                max3100s[i]->poll_time = 1;
        max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
        max3100s[i]->minor = i;
-       setup_timer(&max3100s[i]->timer, max3100_timeout,
-                   (unsigned long)max3100s[i]);
+       timer_setup(&max3100s[i]->timer, max3100_timeout, 0);
 
        dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
        max3100s[i]->port.irq = max3100s[i]->irq;
index 3b74369c262f1d19829dc5aceec7bb8b0b76cbae..00ce31e8d19ad852ed025f2928ef22ae603e4ea8 100644 (file)
@@ -371,7 +371,7 @@ static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
  *
  * This function periodically polls the Serial MUX to check for new data.
  */
-static void mux_poll(unsigned long unused)
+static void mux_poll(struct timer_list *unused)
 {  
        int i;
 
@@ -572,7 +572,7 @@ static int __init mux_init(void)
 
        if(port_cnt > 0) {
                /* Start the Mux timer */
-               setup_timer(&mux_timer, mux_poll, 0UL);
+               timer_setup(&mux_timer, mux_poll, 0);
                mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
 
 #ifdef CONFIG_SERIAL_MUX_CONSOLE
index f8812389b8a8bbc7ab95b5d68c7b9a17b99f7596..223a9499104e2ffc49a928aa4a6caa26cd07db8c 100644 (file)
@@ -103,9 +103,9 @@ static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
  * This is our per-port timeout handler, for checking the
  * modem status signals.
  */
-static void pnx8xxx_timeout(unsigned long data)
+static void pnx8xxx_timeout(struct timer_list *t)
 {
-       struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
+       struct pnx8xxx_port *sport = from_timer(sport, t, timer);
        unsigned long flags;
 
        if (sport->port.state) {
@@ -662,8 +662,7 @@ static void __init pnx8xxx_init_ports(void)
        first = 0;
 
        for (i = 0; i < NR_PORTS; i++) {
-               setup_timer(&pnx8xxx_ports[i].timer, pnx8xxx_timeout,
-                           (unsigned long)&pnx8xxx_ports[i]);
+               timer_setup(&pnx8xxx_ports[i].timer, pnx8xxx_timeout, 0);
                pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
        }
 }
index 4e3f169b30cffdee4513848d22e0008eef65d989..a399772be3fc5342de88d3d202a05b32758c1d0c 100644 (file)
@@ -110,9 +110,9 @@ static void sa1100_mctrl_check(struct sa1100_port *sport)
  * This is our per-port timeout handler, for checking the
  * modem status signals.
  */
-static void sa1100_timeout(unsigned long data)
+static void sa1100_timeout(struct timer_list *t)
 {
-       struct sa1100_port *sport = (struct sa1100_port *)data;
+       struct sa1100_port *sport = from_timer(sport, t, timer);
        unsigned long flags;
 
        if (sport->port.state) {
@@ -627,8 +627,7 @@ static void __init sa1100_init_ports(void)
                sa1100_ports[i].port.fifosize  = 8;
                sa1100_ports[i].port.line      = i;
                sa1100_ports[i].port.iotype    = UPIO_MEM;
-               setup_timer(&sa1100_ports[i].timer, sa1100_timeout,
-                           (unsigned long)&sa1100_ports[i]);
+               timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0);
        }
 
        /*
index 31fcc7072a90d43b7b96d9603ef5e40a680bf61d..d9f399c4e90c0dfa957bb8cbc80b99c8ef27f454 100644 (file)
@@ -1058,9 +1058,9 @@ static int scif_rtrg_enabled(struct uart_port *port)
                        (SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
 }
 
-static void rx_fifo_timer_fn(unsigned long arg)
+static void rx_fifo_timer_fn(struct timer_list *t)
 {
-       struct sci_port *s = (struct sci_port *)arg;
+       struct sci_port *s = from_timer(s, t, rx_fifo_timer);
        struct uart_port *port = &s->port;
 
        dev_dbg(port->dev, "Rx timed out\n");
@@ -1138,8 +1138,7 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
                sci->rx_fifo_timeout = r;
                scif_set_rtrg(port, 1);
                if (r > 0)
-                       setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
-                                   (unsigned long)sci);
+                       timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
        }
 
        return count;
@@ -1392,9 +1391,9 @@ static void work_fn_tx(struct work_struct *work)
        dma_async_issue_pending(chan);
 }
 
-static void rx_timer_fn(unsigned long arg)
+static void rx_timer_fn(struct timer_list *t)
 {
-       struct sci_port *s = (struct sci_port *)arg;
+       struct sci_port *s = from_timer(s, t, rx_timer);
        struct dma_chan *chan = s->chan_rx;
        struct uart_port *port = &s->port;
        struct dma_tx_state state;
@@ -1572,7 +1571,7 @@ static void sci_request_dma(struct uart_port *port)
                        dma += s->buf_len_rx;
                }
 
-               setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
+               timer_setup(&s->rx_timer, rx_timer_fn, 0);
 
                if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
                        sci_submit_rx(s);
@@ -2238,8 +2237,7 @@ static void sci_reset(struct uart_port *port)
        if (s->rx_trigger > 1) {
                if (s->rx_fifo_timeout) {
                        scif_set_rtrg(port, 1);
-                       setup_timer(&s->rx_fifo_timer, rx_fifo_timer_fn,
-                                   (unsigned long)s);
+                       timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
                } else {
                        if (port->type == PORT_SCIFA ||
                            port->type == PORT_SCIFB)
index ed78542c4c37a2b509831790a39e36569c71768a..42b9aded4eb1c6d47a9795d743a27af9627fa563 100644 (file)
@@ -612,9 +612,9 @@ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
  * Obviously not used in interrupt mode
  *
  */
-static void sn_sal_timer_poll(unsigned long data)
+static void sn_sal_timer_poll(struct timer_list *t)
 {
-       struct sn_cons_port *port = (struct sn_cons_port *)data;
+       struct sn_cons_port *port = from_timer(port, t, sc_timer);
        unsigned long flags;
 
        if (!port)
@@ -668,7 +668,7 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
         * timer to poll for input and push data from the console
         * buffer.
         */
-       setup_timer(&port->sc_timer, sn_sal_timer_poll, (unsigned long)port);
+       timer_setup(&port->sc_timer, sn_sal_timer_poll, 0);
 
        if (IS_RUNNING_ON_SIMULATOR())
                port->sc_interrupt_timeout = 6;
index f2c34d65614462f395627dc981c19928ce77fb7f..3c4ad71f261d67144a489b4b138d8814c4b448ef 100644 (file)
@@ -700,7 +700,7 @@ static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
 
 static void usc_loopback_frame( struct mgsl_struct *info );
 
-static void mgsl_tx_timeout(unsigned long context);
+static void mgsl_tx_timeout(struct timer_list *t);
 
 
 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
@@ -1768,7 +1768,7 @@ static int startup(struct mgsl_struct * info)
        
        memset(&info->icount, 0, sizeof(info->icount));
 
-       setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
+       timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
        
        /* Allocate and claim adapter resources */
        retval = mgsl_claim_resources(info);
@@ -7517,9 +7517,9 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
  * Arguments:  context         pointer to device instance data
  * Return Value:       None
  */
-static void mgsl_tx_timeout(unsigned long context)
+static void mgsl_tx_timeout(struct timer_list *t)
 {
-       struct mgsl_struct *info = (struct mgsl_struct*)context;
+       struct mgsl_struct *info = from_timer(info, t, tx_timer);
        unsigned long flags;
        
        if ( debug_level >= DEBUG_LEVEL_INFO )
index 06a03731bba70a59e437da22c262100dd635ce6f..255c496878778d060067bc7a2b8b79459af73819 100644 (file)
@@ -493,8 +493,8 @@ static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
 static int  alloc_tmp_rbuf(struct slgt_info *info);
 static void free_tmp_rbuf(struct slgt_info *info);
 
-static void tx_timeout(unsigned long context);
-static void rx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void rx_timeout(struct timer_list *t);
 
 /*
  * ioctl handlers
@@ -3597,8 +3597,8 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
                info->adapter_num = adapter_num;
                info->port_num = port_num;
 
-               setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
-               setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
+               timer_setup(&info->tx_timer, tx_timeout, 0);
+               timer_setup(&info->rx_timer, rx_timeout, 0);
 
                /* Copy configuration info to device instance data */
                info->pdev = pdev;
@@ -5112,9 +5112,9 @@ static int adapter_test(struct slgt_info *info)
 /*
  * transmit timeout handler
  */
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
 {
-       struct slgt_info *info = (struct slgt_info*)context;
+       struct slgt_info *info = from_timer(info, t, tx_timer);
        unsigned long flags;
 
        DBGINFO(("%s tx_timeout\n", info->device_name));
@@ -5136,9 +5136,9 @@ static void tx_timeout(unsigned long context)
 /*
  * receive buffer polling timer
  */
-static void rx_timeout(unsigned long context)
+static void rx_timeout(struct timer_list *t)
 {
-       struct slgt_info *info = (struct slgt_info*)context;
+       struct slgt_info *info = from_timer(info, t, rx_timer);
        unsigned long flags;
 
        DBGINFO(("%s rx_timeout\n", info->device_name));
index d45f234e1914b81205af50df7419e45f61cf9d51..75f11ce1f0a1ac5fe8754ae2948783c51e24481f 100644 (file)
@@ -615,8 +615,8 @@ static void free_tmp_rx_buf(SLMP_INFO *info);
 
 static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count);
 static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit);
-static void tx_timeout(unsigned long context);
-static void status_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void status_timeout(struct timer_list *t);
 
 static unsigned char read_reg(SLMP_INFO *info, unsigned char addr);
 static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val);
@@ -3782,9 +3782,8 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
                info->bus_type = MGSL_BUS_TYPE_PCI;
                info->irq_flags = IRQF_SHARED;
 
-               setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
-               setup_timer(&info->status_timer, status_timeout,
-                               (unsigned long)info);
+               timer_setup(&info->tx_timer, tx_timeout, 0);
+               timer_setup(&info->status_timer, status_timeout, 0);
 
                /* Store the PCI9050 misc control register value because a flaw
                 * in the PCI9050 prevents LCR registers from being read if
@@ -5468,9 +5467,9 @@ static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
 /* called when HDLC frame times out
  * update stats and do tx completion processing
  */
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
 {
-       SLMP_INFO *info = (SLMP_INFO*)context;
+       SLMP_INFO *info = from_timer(info, t, tx_timer);
        unsigned long flags;
 
        if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5495,10 +5494,10 @@ static void tx_timeout(unsigned long context)
 
 /* called to periodically check the DSR/RI modem signal input status
  */
-static void status_timeout(unsigned long context)
+static void status_timeout(struct timer_list *t)
 {
        u16 status = 0;
-       SLMP_INFO *info = (SLMP_INFO*)context;
+       SLMP_INFO *info = from_timer(info, t, status_timer);
        unsigned long flags;
        unsigned char delta;
 
index c8d90d7e7e3766802bbdccb8ea5c990a0a08343a..5d412df8e94372217726271c7f83e1f55444ae21 100644 (file)
@@ -244,7 +244,7 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
        return 0;
 }
 
-static void kd_nosound(unsigned long ignored)
+static void kd_nosound(struct timer_list *unused)
 {
        static unsigned int zero;
 
index bce4c71cb33883fc67c230497cba9cfa2958e931..88b902c525d7455e3e0398623d5c54dc65ad0ab9 100644 (file)
@@ -158,7 +158,7 @@ static void set_cursor(struct vc_data *vc);
 static void hide_cursor(struct vc_data *vc);
 static void console_callback(struct work_struct *ignored);
 static void con_driver_unregister_callback(struct work_struct *ignored);
-static void blank_screen_t(unsigned long dummy);
+static void blank_screen_t(struct timer_list *unused);
 static void set_palette(struct vc_data *vc);
 
 #define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
@@ -3929,7 +3929,7 @@ void unblank_screen(void)
  * (console operations can still happen at irq time, but only from printk which
  * has the console mutex. Not perfect yet, but better than no locking
  */
-static void blank_screen_t(unsigned long dummy)
+static void blank_screen_t(struct timer_list *unused)
 {
        blank_timer_expired = 1;
        schedule_work(&console_work);
index 6470d259b7d8a2071052dc52a38b70626ff170db..8af797252af206c9ad70aef7d65ee8106df2107f 100644 (file)
@@ -547,21 +547,30 @@ static void cxacru_blocking_completion(struct urb *urb)
        complete(urb->context);
 }
 
-static void cxacru_timeout_kill(unsigned long data)
+struct cxacru_timer {
+       struct timer_list timer;
+       struct urb *urb;
+};
+
+static void cxacru_timeout_kill(struct timer_list *t)
 {
-       usb_unlink_urb((struct urb *) data);
+       struct cxacru_timer *timer = from_timer(timer, t, timer);
+
+       usb_unlink_urb(timer->urb);
 }
 
 static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
                                 int *actual_length)
 {
-       struct timer_list timer;
+       struct cxacru_timer timer = {
+               .urb = urb,
+       };
 
-       setup_timer(&timer, cxacru_timeout_kill, (unsigned long)urb);
-       timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
-       add_timer(&timer);
+       timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0);
+       mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
        wait_for_completion(done);
-       del_timer_sync(&timer);
+       del_timer_sync(&timer.timer);
+       destroy_timer_on_stack(&timer.timer);
 
        if (actual_length)
                *actual_length = urb->actual_length;
index 5a5e8c0aaa3994ad1a110928720a378d6b6cd9e2..973548b5c15ce11414cec038b4b47001c055c9f2 100644 (file)
@@ -557,9 +557,10 @@ static void speedtch_check_status(struct work_struct *work)
        }
 }
 
-static void speedtch_status_poll(unsigned long data)
+static void speedtch_status_poll(struct timer_list *t)
 {
-       struct speedtch_instance_data *instance = (void *)data;
+       struct speedtch_instance_data *instance = from_timer(instance, t,
+                                                            status_check_timer);
 
        schedule_work(&instance->status_check_work);
 
@@ -570,9 +571,10 @@ static void speedtch_status_poll(unsigned long data)
                atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
 }
 
-static void speedtch_resubmit_int(unsigned long data)
+static void speedtch_resubmit_int(struct timer_list *t)
 {
-       struct speedtch_instance_data *instance = (void *)data;
+       struct speedtch_instance_data *instance = from_timer(instance, t,
+                                                            resubmit_timer);
        struct urb *int_urb = instance->int_urb;
        int ret;
 
@@ -860,13 +862,11 @@ static int speedtch_bind(struct usbatm_data *usbatm,
        usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
 
        INIT_WORK(&instance->status_check_work, speedtch_check_status);
-       setup_timer(&instance->status_check_timer, speedtch_status_poll,
-                   (unsigned long)instance);
+       timer_setup(&instance->status_check_timer, speedtch_status_poll, 0);
        instance->last_status = 0xff;
        instance->poll_delay = MIN_POLL_DELAY;
 
-       setup_timer(&instance->resubmit_timer, speedtch_resubmit_int,
-                   (unsigned long)instance);
+       timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0);
 
        instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
 
index 044264aa1f965ade715e3df2fa3ba50a6d5b5543..dbea28495e1ddb49193c9acae8b15a8f08cfb0c6 100644 (file)
@@ -989,18 +989,18 @@ static int usbatm_heavy_init(struct usbatm_data *instance)
        return 0;
 }
 
-static void usbatm_tasklet_schedule(unsigned long data)
+static void usbatm_tasklet_schedule(struct timer_list *t)
 {
-       tasklet_schedule((struct tasklet_struct *) data);
+       struct usbatm_channel *channel = from_timer(channel, t, delay);
+
+       tasklet_schedule(&channel->tasklet);
 }
 
 static void usbatm_init_channel(struct usbatm_channel *channel)
 {
        spin_lock_init(&channel->lock);
        INIT_LIST_HEAD(&channel->list);
-       channel->delay.function = usbatm_tasklet_schedule;
-       channel->delay.data = (unsigned long) &channel->tasklet;
-       init_timer(&channel->delay);
+       timer_setup(&channel->delay, usbatm_tasklet_schedule, 0);
 }
 
 int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
index 8b351444cc40d015fefde2f40143a452232d32c9..9a2ab6751a23c504177fbac78cc7e95cdb21a1ae 100644 (file)
@@ -180,9 +180,9 @@ static int ulpi_of_register(struct ulpi *ulpi)
        /* Find a ulpi bus underneath the parent or the grandparent */
        parent = ulpi->dev.parent;
        if (parent->of_node)
-               np = of_find_node_by_name(parent->of_node, "ulpi");
+               np = of_get_child_by_name(parent->of_node, "ulpi");
        else if (parent->parent && parent->parent->of_node)
-               np = of_find_node_by_name(parent->parent->of_node, "ulpi");
+               np = of_get_child_by_name(parent->parent->of_node, "ulpi");
        if (!np)
                return 0;
 
index da8acd980fc68e1c67b302ab45a62580d99fb0eb..78e92d29f8d98777c1294292808b1a868dcfcb7f 100644 (file)
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
        unsigned iad_num = 0;
 
        memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+       nintf = nintf_orig = config->desc.bNumInterfaces;
+       config->desc.bNumInterfaces = 0;        // Adjusted later
+
        if (config->desc.bDescriptorType != USB_DT_CONFIG ||
            config->desc.bLength < USB_DT_CONFIG_SIZE ||
            config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
        buffer += config->desc.bLength;
        size -= config->desc.bLength;
 
-       nintf = nintf_orig = config->desc.bNumInterfaces;
        if (nintf > USB_MAXINTERFACES) {
                dev_warn(ddev, "config %d has too many interfaces: %d, "
                    "using maximum allowed: %d\n",
@@ -905,14 +907,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
        }
 }
 
+static const __u8 bos_desc_len[256] = {
+       [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
+       [USB_CAP_TYPE_EXT]          = USB_DT_USB_EXT_CAP_SIZE,
+       [USB_SS_CAP_TYPE]           = USB_DT_USB_SS_CAP_SIZE,
+       [USB_SSP_CAP_TYPE]          = USB_DT_USB_SSP_CAP_SIZE(1),
+       [CONTAINER_ID_TYPE]         = USB_DT_USB_SS_CONTN_ID_SIZE,
+       [USB_PTM_CAP_TYPE]          = USB_DT_USB_PTM_ID_SIZE,
+};
+
 /* Get BOS descriptor set */
 int usb_get_bos_descriptor(struct usb_device *dev)
 {
        struct device *ddev = &dev->dev;
        struct usb_bos_descriptor *bos;
        struct usb_dev_cap_header *cap;
+       struct usb_ssp_cap_descriptor *ssp_cap;
        unsigned char *buffer;
-       int length, total_len, num, i;
+       int length, total_len, num, i, ssac;
+       __u8 cap_type;
        int ret;
 
        bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -965,7 +978,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        dev->bos->desc->bNumDeviceCaps = i;
                        break;
                }
+               cap_type = cap->bDevCapabilityType;
                length = cap->bLength;
+               if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
+                       dev->bos->desc->bNumDeviceCaps = i;
+                       break;
+               }
+
                total_len -= length;
 
                if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -973,7 +992,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        continue;
                }
 
-               switch (cap->bDevCapabilityType) {
+               switch (cap_type) {
                case USB_CAP_TYPE_WIRELESS_USB:
                        /* Wireless USB cap descriptor is handled by wusb */
                        break;
@@ -986,8 +1005,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                                (struct usb_ss_cap_descriptor *)buffer;
                        break;
                case USB_SSP_CAP_TYPE:
-                       dev->bos->ssp_cap =
-                               (struct usb_ssp_cap_descriptor *)buffer;
+                       ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+                       ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+                               USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+                       if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+                               dev->bos->ssp_cap = ssp_cap;
                        break;
                case CONTAINER_ID_TYPE:
                        dev->bos->ss_id =
index 705c573d0257e28b5142070caaca17e06e5a25fd..a3fad4ec9870d21e602fa80d8cf564fff5dd6d62 100644 (file)
@@ -1442,14 +1442,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        int number_of_packets = 0;
        unsigned int stream_id = 0;
        void *buf;
-
-       if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
-                               USBDEVFS_URB_SHORT_NOT_OK |
+       unsigned long mask =    USBDEVFS_URB_SHORT_NOT_OK |
                                USBDEVFS_URB_BULK_CONTINUATION |
                                USBDEVFS_URB_NO_FSBR |
                                USBDEVFS_URB_ZERO_PACKET |
-                               USBDEVFS_URB_NO_INTERRUPT))
-               return -EINVAL;
+                               USBDEVFS_URB_NO_INTERRUPT;
+       /* USBDEVFS_URB_ISO_ASAP is a special case */
+       if (uurb->type == USBDEVFS_URB_TYPE_ISO)
+               mask |= USBDEVFS_URB_ISO_ASAP;
+
+       if (uurb->flags & ~mask)
+                       return -EINVAL;
+
        if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
                return -EINVAL;
        if (uurb->buffer_length > 0 && !uurb->buffer)
index 19b5c4afeef205931226f0350680977de25d5f47..fc32391a34d5db0c4951d2f1a50c4be8ccb97f94 100644 (file)
@@ -788,9 +788,11 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
 EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
 
 /* timer callback */
-static void rh_timer_func (unsigned long _hcd)
+static void rh_timer_func (struct timer_list *t)
 {
-       usb_hcd_poll_rh_status((struct usb_hcd *) _hcd);
+       struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
+
+       usb_hcd_poll_rh_status(_hcd);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -2545,7 +2547,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
        hcd->self.bus_name = bus_name;
        hcd->self.uses_dma = (sysdev->dma_mask != NULL);
 
-       setup_timer(&hcd->rh_timer, rh_timer_func, (unsigned long)hcd);
+       timer_setup(&hcd->rh_timer, rh_timer_func, 0);
 #ifdef CONFIG_PM
        INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
 #endif
index 7ccdd3d4db84c9d4675084d4a575f458fd1e1d44..cf7bbcb9a63cc9acaa4dfe225bba01167ced2051 100644 (file)
@@ -4948,6 +4948,15 @@ loop:
                usb_put_dev(udev);
                if ((status == -ENOTCONN) || (status == -ENOTSUPP))
                        break;
+
+               /* When halfway through our retry count, power-cycle the port */
+               if (i == (SET_CONFIG_TRIES / 2) - 1) {
+                       dev_info(&port_dev->dev, "attempt power cycle\n");
+                       usb_hub_set_port_power(hdev, hub, port1, false);
+                       msleep(2 * hub_power_on_good_delay(hub));
+                       usb_hub_set_port_power(hdev, hub, port1, true);
+                       msleep(hub_power_on_good_delay(hub));
+               }
        }
        if (hub->hdev->parent ||
                        !hcd->driver->port_handed_over ||
index f1dbab6f798fdc75dbde8039b8d0c5339871fcde..a10b346b9777dba58abe8346cb4926e42d8bb7aa 100644 (file)
@@ -146,6 +146,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+       { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
        { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
 
index f66c94130cac0b49a02d7ca081568bd88367589c..31749c79045f3a95ed27b6b338f9b94baf7b8d03 100644 (file)
@@ -537,6 +537,7 @@ struct dwc2_core_params {
  *                       2 - Internal DMA
  * @power_optimized     Are power optimizations enabled?
  * @num_dev_ep          Number of device endpoints available
+ * @num_dev_in_eps      Number of device IN endpoints available
  * @num_dev_perio_in_ep Number of device periodic IN endpoints
  *                      available
  * @dev_token_q_depth   Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
  *                       2 - 8 or 16 bits
  * @snpsid:             Value from SNPSID register
  * @dev_ep_dirs:        Direction of device endpoints (GHWCFG1)
+ * @g_tx_fifo_size[]   Power-on values of TxFIFO sizes
  */
 struct dwc2_hw_params {
        unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
        unsigned fs_phy_type:2;
        unsigned i2c_enable:1;
        unsigned num_dev_ep:4;
+       unsigned num_dev_in_eps : 4;
        unsigned num_dev_perio_in_ep:4;
        unsigned total_fifo_size:16;
        unsigned power_optimized:1;
        unsigned utmi_phy_data_width:2;
        u32 snpsid;
        u32 dev_ep_dirs;
+       u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
 };
 
 /* Size of control and EP0 buffers */
index 88529d0925039f53112458e59c208c057a32a608..e4c3ce0de5de11ba5532fb34c8b8d72e1fd98511 100644 (file)
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
 {
        if (hsotg->hw_params.en_multiple_tx_fifo)
                /* In dedicated FIFO mode we need count of IN EPs */
-               return (dwc2_readl(hsotg->regs + GHWCFG4)  &
-                       GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
+               return hsotg->hw_params.num_dev_in_eps;
        else
                /* In shared FIFO mode we need count of Periodic IN EPs */
                return hsotg->hw_params.num_dev_perio_in_ep;
 }
 
-/**
- * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
- */
-static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
-{
-       int val = 0;
-       int i;
-       u32 ep_dirs;
-
-       /*
-        * Don't need additional space for ep info control registers in
-        * slave mode.
-        */
-       if (!using_dma(hsotg)) {
-               dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
-               return 0;
-       }
-
-       /*
-        * Buffer DMA mode - 1 location per endpoit
-        * Descriptor DMA mode - 4 locations per endpoint
-        */
-       ep_dirs = hsotg->hw_params.dev_ep_dirs;
-
-       for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
-               val += ep_dirs & 3 ? 1 : 2;
-               ep_dirs >>= 2;
-       }
-
-       if (using_desc_dma(hsotg))
-               val = val * 4;
-
-       return val;
-}
-
 /**
  * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
  * device mode TX FIFOs
  */
 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
 {
-       int ep_info_size;
        int addr;
        int tx_addr_max;
        u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
                                hsotg->params.g_np_tx_fifo_size);
 
        /* Get Endpoint Info Control block size in DWORDs. */
-       ep_info_size = dwc2_hsotg_ep_info_size(hsotg);
-       tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
+       tx_addr_max = hsotg->hw_params.total_fifo_size;
 
        addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
        if (tx_addr_max <= addr)
index 69eb40cd1b47ec195cec2f7a195b8a5051ed4f6a..7b6eb0ad513b26b33610c9b08531f8accac4a89f 100644 (file)
@@ -3314,9 +3314,9 @@ host:
        }
 }
 
-static void dwc2_wakeup_detected(unsigned long data)
+static void dwc2_wakeup_detected(struct timer_list *t)
 {
-       struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
+       struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
        u32 hprt0;
 
        dev_dbg(hsotg->dev, "%s()\n", __func__);
@@ -5155,8 +5155,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        }
        INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
 
-       setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
-                   (unsigned long)hsotg);
+       timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
 
        /* Initialize the non-periodic schedule */
        INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
index f472de238ac26ad9824f0322f0f80fc78c89f3c4..fcd1676c7f0b7eae8b942aa9a50f6bcfb8cdc813 100644 (file)
@@ -1275,9 +1275,9 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  *
  * @work: Pointer to a qh unreserve_work.
  */
-static void dwc2_unreserve_timer_fn(unsigned long data)
+static void dwc2_unreserve_timer_fn(struct timer_list *t)
 {
-       struct dwc2_qh *qh = (struct dwc2_qh *)data;
+       struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
        struct dwc2_hsotg *hsotg = qh->hsotg;
        unsigned long flags;
 
@@ -1467,8 +1467,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
 
        /* Initialize QH */
        qh->hsotg = hsotg;
-       setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
-                   (unsigned long)qh);
+       timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
        qh->ep_type = ep_type;
        qh->ep_is_in = ep_is_in;
 
index ef73af6e03a98828b7fadb46bd32ee291fd6302f..03fd20f0b49613aaffba14aa117e48a7781391a5 100644 (file)
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
        }
 
        for (fifo = 1; fifo <= fifo_count; fifo++) {
-               dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
-                       FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+               dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
 
                if (hsotg->params.g_tx_fifo_size[fifo] < min ||
                    hsotg->params.g_tx_fifo_size[fifo] >  dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
        struct dwc2_hw_params *hw = &hsotg->hw_params;
        bool forced;
        u32 gnptxfsiz;
+       int fifo, fifo_count;
 
        if (hsotg->dr_mode == USB_DR_MODE_HOST)
                return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
 
        gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
 
+       fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+       for (fifo = 1; fifo <= fifo_count; fifo++) {
+               hw->g_tx_fifo_size[fifo] =
+                       (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
+                        FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+       }
+
        if (forced)
                dwc2_clear_force_mode(hsotg);
 
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
        grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
 
-       /*
-        * Host specific hardware parameters. Reading these parameters
-        * requires the controller to be in host mode. The mode will
-        * be forced, if necessary, to read these values.
-        */
-       dwc2_get_host_hwparams(hsotg);
-       dwc2_get_dev_hwparams(hsotg);
-
        /* hwcfg1 */
        hw->dev_ep_dirs = hwcfg1;
 
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
        hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
                                  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+       hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
+                            GHWCFG4_NUM_IN_EPS_SHIFT;
        hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
        hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
        hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        /* fifo sizes */
        hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
                                GRXFSIZ_DEPTH_SHIFT;
+       /*
+        * Host specific hardware parameters. Reading these parameters
+        * requires the controller to be in host mode. The mode will
+        * be forced, if necessary, to read these values.
+        */
+       dwc2_get_host_hwparams(hsotg);
+       dwc2_get_dev_hwparams(hsotg);
 
        return 0;
 }
index c4a4d7bd27660225442e732e6be994c333b9b91b..7ae0eefc7cc7daf0382c7aeaa56c14659f025ffb 100644 (file)
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
 
                clk = of_clk_get(np, i);
                if (IS_ERR(clk)) {
-                       while (--i >= 0)
+                       while (--i >= 0) {
+                               clk_disable_unprepare(simple->clks[i]);
                                clk_put(simple->clks[i]);
+                       }
                        return PTR_ERR(clk);
                }
 
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
        .driver         = {
                .name   = "dwc3-of-simple",
                .of_match_table = of_dwc3_simple_match,
+               .pm     = &dwc3_of_simple_dev_pm_ops,
        },
 };
 
index 981fd986cf824804b752e64018289c79f191d646..639dd1b163a0e19e502ff2274c73101e3efaa777 100644 (file)
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 {
        const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
        struct dwc3             *dwc = dep->dwc;
-       u32                     timeout = 500;
+       u32                     timeout = 1000;
        u32                     reg;
 
        int                     cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
                         */
                        if (speed == USB_SPEED_HIGH) {
                                struct usb_ep *ep = &dep->endpoint;
-                               unsigned int mult = ep->mult - 1;
+                               unsigned int mult = 2;
                                unsigned int maxp = usb_endpoint_maxp(ep->desc);
 
                                if (length <= (2 * maxp))
index eec14e6ed20be0a43f414233fccd38efe51f7f10..77c7ecca816aa026677ec869e087f0dc747f55a7 100644 (file)
@@ -146,7 +146,6 @@ int config_ep_by_speed(struct usb_gadget *g,
                        struct usb_function *f,
                        struct usb_ep *_ep)
 {
-       struct usb_composite_dev        *cdev = get_gadget_data(g);
        struct usb_endpoint_descriptor *chosen_desc = NULL;
        struct usb_descriptor_header **speed_desc = NULL;
 
@@ -226,8 +225,12 @@ ep_found:
                        _ep->maxburst = comp_desc->bMaxBurst + 1;
                        break;
                default:
-                       if (comp_desc->bMaxBurst != 0)
+                       if (comp_desc->bMaxBurst != 0) {
+                               struct usb_composite_dev *cdev;
+
+                               cdev = get_gadget_data(g);
                                ERROR(cdev, "ep0 bMaxBurst must be 0\n");
+                       }
                        _ep->maxburst = 1;
                        break;
                }
index 97ea059a7aa471192710be177276c8b969619e2b..b6cf5ab5a0a135bb36e09de7b96e13645caa1abe 100644 (file)
@@ -1012,7 +1012,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                else
                        ret = ep->status;
                goto error_mutex;
-       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
                ret = -ENOMEM;
        } else {
                req->buf      = data;
@@ -2282,9 +2282,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                int i;
 
                if (len < sizeof(*d) ||
-                   d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   !d->Reserved1)
+                   d->bFirstInterfaceNumber >= ffs->interfaces_count)
                        return -EINVAL;
+               if (d->Reserved1 != 1) {
+                       /*
+                        * According to the spec, Reserved1 must be set to 1
+                        * but older kernels incorrectly rejected non-zero
+                        * values.  We fix it here to avoid returning EINVAL
+                        * in response to values we used to accept.
+                        */
+                       pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
+                       d->Reserved1 = 1;
+               }
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
                                return -EINVAL;
index a12fb459dbd9f6b8fccb06ce4d5fd18084f0b38b..784bf86dad4fcb0c8f9d10f2bd8652ee6a8d38d6 100644 (file)
@@ -479,7 +479,7 @@ endif
 # or video class gadget drivers), or specific hardware, here.
 config USB_G_WEBCAM
        tristate "USB Webcam Gadget"
-       depends on VIDEO_DEV
+       depends on VIDEO_V4L2
        select USB_LIBCOMPOSITE
        select VIDEOBUF2_VMALLOC
        select USB_F_UVC
index bfe278294e889058aa642077b78dd0b97263f7ff..ad743a8493be2eceabb2031a48a834c702b823b9 100644 (file)
@@ -1550,9 +1550,9 @@ static void at91_vbus_timer_work(struct work_struct *work)
                mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
 }
 
-static void at91_vbus_timer(unsigned long data)
+static void at91_vbus_timer(struct timer_list *t)
 {
-       struct at91_udc *udc = (struct at91_udc *)data;
+       struct at91_udc *udc = from_timer(udc, t, vbus_timer);
 
        /*
         * If we are polling vbus it is likely that the gpio is on an
@@ -1918,8 +1918,7 @@ static int at91udc_probe(struct platform_device *pdev)
 
                if (udc->board.vbus_polled) {
                        INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
-                       setup_timer(&udc->vbus_timer, at91_vbus_timer,
-                                   (unsigned long)udc);
+                       timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
                        mod_timer(&udc->vbus_timer,
                                  jiffies + VBUS_POLL_TIMEOUT);
                } else {
index d39f070acbd705573b49a1b8df1b1f0077470940..01b44e15962378d02e260ee37872ded457bc763d 100644 (file)
@@ -642,7 +642,6 @@ static const struct of_device_id bdc_of_match[] = {
 static struct platform_driver bdc_driver = {
        .driver         = {
                .name   = BRCM_BDC_NAME,
-               .owner  = THIS_MODULE,
                .pm = &bdc_pm_ops,
                .of_match_table = bdc_of_match,
        },
index 61422d624ad090f1dff586d59a9a31e2ce2ea0fb..93eff7dec2f5e9d7d9449b1189f8e5d2c91728e6 100644 (file)
@@ -1069,8 +1069,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
 static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
                                            enum usb_device_speed speed)
 {
-       if (udc->gadget->ops->udc_set_speed)
-               udc->gadget->ops->udc_set_speed(udc->gadget, speed);
+       if (udc->gadget->ops->udc_set_speed) {
+               enum usb_device_speed s;
+
+               s = min(speed, udc->gadget->max_speed);
+               udc->gadget->ops->udc_set_speed(udc->gadget, s);
+       }
 }
 
 /**
index 4f1b1809472c41446b34904fb857a965f33c1afd..d0128f92ec5af312af880d7822a316587992b1a8 100644 (file)
@@ -1771,9 +1771,9 @@ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
 /* drive both sides of the transfers; looks like irq handlers to
  * both drivers except the callbacks aren't in_irq().
  */
-static void dummy_timer(unsigned long _dum_hcd)
+static void dummy_timer(struct timer_list *t)
 {
-       struct dummy_hcd        *dum_hcd = (struct dummy_hcd *) _dum_hcd;
+       struct dummy_hcd        *dum_hcd = from_timer(dum_hcd, t, timer);
        struct dummy            *dum = dum_hcd->dum;
        struct urbp             *urbp, *tmp;
        unsigned long           flags;
@@ -2445,7 +2445,7 @@ static DEVICE_ATTR_RO(urbs);
 
 static int dummy_start_ss(struct dummy_hcd *dum_hcd)
 {
-       setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+       timer_setup(&dum_hcd->timer, dummy_timer, 0);
        dum_hcd->rh_state = DUMMY_RH_RUNNING;
        dum_hcd->stream_en_ep = 0;
        INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2474,7 +2474,7 @@ static int dummy_start(struct usb_hcd *hcd)
                return dummy_start_ss(dum_hcd);
 
        spin_lock_init(&dum_hcd->dum->lock);
-       setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+       timer_setup(&dum_hcd->timer, dummy_timer, 0);
        dum_hcd->rh_state = DUMMY_RH_RUNNING;
 
        INIT_LIST_HEAD(&dum_hcd->urbp_list);
index f19e6282a688d030e3304fcb17e227efbd30207a..a8288df6aadf09c5523d1922d6a71f0c646940fb 100644 (file)
@@ -1259,9 +1259,9 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
        return IRQ_HANDLED;
 }
 
-static void m66592_timer(unsigned long _m66592)
+static void m66592_timer(struct timer_list *t)
 {
-       struct m66592 *m66592 = (struct m66592 *)_m66592;
+       struct m66592 *m66592 = from_timer(m66592, t, timer);
        unsigned long flags;
        u16 tmp;
 
@@ -1589,7 +1589,7 @@ static int m66592_probe(struct platform_device *pdev)
        m66592->gadget.max_speed = USB_SPEED_HIGH;
        m66592->gadget.name = udc_name;
 
-       setup_timer(&m66592->timer, m66592_timer, (unsigned long)m66592);
+       timer_setup(&m66592->timer, m66592_timer, 0);
        m66592->reg = reg;
 
        ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
index fc7f810baef79e8a107dd8105656b4283b22f0d0..dc35a54bad9088589339539f88c736bb9f63bf7c 100644 (file)
@@ -1854,9 +1854,9 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
 #define PIO_OUT_TIMEOUT        (jiffies + HZ/3)
 #define HALF_FULL(f)   (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
 
-static void pio_out_timer(unsigned long _ep)
+static void pio_out_timer(struct timer_list *t)
 {
-       struct omap_ep  *ep = (void *) _ep;
+       struct omap_ep  *ep = from_timer(ep, t, timer);
        unsigned long   flags;
        u16             stat_flg;
 
@@ -2542,9 +2542,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
                }
                if (dbuf && addr)
                        epn_rxtx |= UDC_EPN_RX_DB;
-               init_timer(&ep->timer);
-               ep->timer.function = pio_out_timer;
-               ep->timer.data = (unsigned long) ep;
+               timer_setup(&ep->timer, pio_out_timer, 0);
        }
        if (addr)
                epn_rxtx |= UDC_EPN_RX_VALID;
index 8f135d9fa245984e5f2a40108eb5bc159cb7d8fc..0e3f5faa000e93cca3349930111ea0072ed7989b 100644 (file)
@@ -1624,9 +1624,9 @@ static inline void clear_ep_state (struct pxa25x_udc *dev)
                nuke(&dev->ep[i], -ECONNABORTED);
 }
 
-static void udc_watchdog(unsigned long _dev)
+static void udc_watchdog(struct timer_list *t)
 {
-       struct pxa25x_udc       *dev = (void *)_dev;
+       struct pxa25x_udc       *dev = from_timer(dev, t, timer);
 
        local_irq_disable();
        if (dev->ep0state == EP0_STALL
@@ -2413,7 +2413,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
                gpio_direction_output(dev->mach->gpio_pullup, 0);
        }
 
-       setup_timer(&dev->timer, udc_watchdog, (unsigned long)dev);
+       timer_setup(&dev->timer, udc_watchdog, 0);
 
        the_controller = dev;
        platform_set_drvdata(pdev, dev);
index 143122ed3c6646fdefb439daf1288cf3d4b4297a..a3ecce62662ba6cdc1e0f7ee65e90b2445537b56 100644 (file)
@@ -1514,9 +1514,9 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
        return IRQ_HANDLED;
 }
 
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
 {
-       struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+       struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
        unsigned long flags;
        u16 tmp;
 
@@ -1874,7 +1874,7 @@ static int r8a66597_probe(struct platform_device *pdev)
        r8a66597->gadget.max_speed = USB_SPEED_HIGH;
        r8a66597->gadget.name = udc_name;
 
-       setup_timer(&r8a66597->timer, r8a66597_timer, (unsigned long)r8a66597);
+       timer_setup(&r8a66597->timer, r8a66597_timer, 0);
        r8a66597->reg = reg;
 
        if (r8a66597->pdata->on_chip) {
index bc37f40baacf2b54fc2528c59f1b6d80ebae3a5b..6e87af2483679aac59f23e942dde9367a30d35e4 100644 (file)
 #define USB3_EP0_SS_MAX_PACKET_SIZE    512
 #define USB3_EP0_HSFS_MAX_PACKET_SIZE  64
 #define USB3_EP0_BUF_SIZE              8
-#define USB3_MAX_NUM_PIPES             30
+#define USB3_MAX_NUM_PIPES             6       /* This includes PIPE 0 */
 #define USB3_WAIT_US                   3
 #define USB3_DMA_NUM_SETTING_AREA      4
 /*
index 19f00424f53ed3b6246788dbec370e198a7b86a9..3ed75aaa09d9d37b786c7865af231274fa8d4542 100644 (file)
@@ -827,7 +827,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
                        default:                /* unknown */
                                break;
                        }
-                       temp = (cap >> 8) & 0xff;
+                       offset = (cap >> 8) & 0xff;
                }
        }
 #endif
index 10887e09e9bc0ce916df1eee3fe217421e8199b0..ee96763493332458da2397bc85a24e8dfa5b70e6 100644 (file)
@@ -80,7 +80,7 @@ static const char     hcd_name [] = "ohci_hcd";
 
 static void ohci_dump(struct ohci_hcd *ohci);
 static void ohci_stop(struct usb_hcd *hcd);
-static void io_watchdog_func(unsigned long _ohci);
+static void io_watchdog_func(struct timer_list *t);
 
 #include "ohci-hub.c"
 #include "ohci-dbg.c"
@@ -500,8 +500,7 @@ static int ohci_init (struct ohci_hcd *ohci)
        if (ohci->hcca)
                return 0;
 
-       setup_timer(&ohci->io_watchdog, io_watchdog_func,
-                       (unsigned long) ohci);
+       timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
 
        ohci->hcca = dma_alloc_coherent (hcd->self.controller,
                        sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -723,9 +722,9 @@ static int ohci_start(struct usb_hcd *hcd)
  * the unlink list.  As a result, URBs could never be dequeued and
  * endpoints could never be released.
  */
-static void io_watchdog_func(unsigned long _ohci)
+static void io_watchdog_func(struct timer_list *t)
 {
-       struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
+       struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
        bool            takeback_all_pending = false;
        u32             status;
        u32             head;
index 0bf7759aae789e9705cc52e90fbb5dca41d15701..c5e6e8d0b5ef5fe6428c7838df74e015262ddbe0 100644 (file)
@@ -2539,9 +2539,9 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd)
        return ret;
 }
 
-static void oxu_watchdog(unsigned long param)
+static void oxu_watchdog(struct timer_list *t)
 {
-       struct oxu_hcd  *oxu = (struct oxu_hcd *) param;
+       struct oxu_hcd  *oxu = from_timer(oxu, t, watchdog);
        unsigned long flags;
 
        spin_lock_irqsave(&oxu->lock, flags);
@@ -2577,7 +2577,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
 
        spin_lock_init(&oxu->lock);
 
-       setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
+       timer_setup(&oxu->watchdog, oxu_watchdog, 0);
 
        /*
         * hw default: 1K periodic list heads, one per frame.
index f3d9ba420a97b5a3d932fa51a0952df40d5480be..984892dd72f550a2de7dafac8a2d64ca8a0ab215 100644 (file)
@@ -1798,9 +1798,9 @@ static void r8a66597_td_timer(struct timer_list *t)
        spin_unlock_irqrestore(&r8a66597->lock, flags);
 }
 
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
 {
-       struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+       struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
        unsigned long flags;
        int port;
 
@@ -2472,8 +2472,7 @@ static int r8a66597_probe(struct platform_device *pdev)
                r8a66597->max_root_hub = 2;
 
        spin_lock_init(&r8a66597->lock);
-       setup_timer(&r8a66597->rh_timer, r8a66597_timer,
-                   (unsigned long)r8a66597);
+       timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
        r8a66597->reg = reg;
 
        /* make sure no interrupts are pending */
index 601fb00603cc1d4737f3cd0c3d238544447a4830..fa88a903fa2ea886e8f08e52e825049ae1b3864f 100644 (file)
@@ -1119,9 +1119,9 @@ sl811h_hub_descriptor (
 }
 
 static void
-sl811h_timer(unsigned long _sl811)
+sl811h_timer(struct timer_list *t)
 {
-       struct sl811    *sl811 = (void *) _sl811;
+       struct sl811    *sl811 = from_timer(sl811, t, timer);
        unsigned long   flags;
        u8              irqstat;
        u8              signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
@@ -1692,7 +1692,7 @@ sl811h_probe(struct platform_device *dev)
        spin_lock_init(&sl811->lock);
        INIT_LIST_HEAD(&sl811->async);
        sl811->board = dev_get_platdata(&dev->dev);
-       setup_timer(&sl811->timer, sl811h_timer, (unsigned long)sl811);
+       timer_setup(&sl811->timer, sl811h_timer, 0);
        sl811->addr_reg = addr_reg;
        sl811->data_reg = data_reg;
 
index babeefd84ffd06318f724abb835f3e537649d195..f5c90217777acb69aeb7a0623bffadeb264d582a 100644 (file)
@@ -585,8 +585,7 @@ static int uhci_start(struct usb_hcd *hcd)
                hcd->self.sg_tablesize = ~0;
 
        spin_lock_init(&uhci->lock);
-       setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
-                       (unsigned long) uhci);
+       timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
        INIT_LIST_HEAD(&uhci->idle_qh_list);
        init_waitqueue_head(&uhci->waitqh);
 
index 49d4edc03cc28bafef6ed4e20a0427c75148b4e7..d40438238938c0ecc04b58e72493ed426799b53d 100644 (file)
@@ -90,9 +90,9 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
        }
 }
 
-static void uhci_fsbr_timeout(unsigned long _uhci)
+static void uhci_fsbr_timeout(struct timer_list *t)
 {
-       struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
+       struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&uhci->lock, flags);
index e1fba4688509df32d0aee83be2f53abbf65f37b5..3a29b32a3bd06c43376bb493ae6f08a48e6f3174 100644 (file)
@@ -934,6 +934,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
        if (!vdev)
                return;
 
+       if (vdev->real_port == 0 ||
+                       vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+               xhci_dbg(xhci, "Bad vdev->real_port.\n");
+               goto out;
+       }
+
        tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
        list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
                /* is this a hub device that added a tt_info to the tts list */
@@ -947,6 +953,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
                        }
                }
        }
+out:
        /* we are now at a leaf device */
        xhci_debugfs_remove_slot(xhci, slot_id);
        xhci_free_virt_device(xhci, slot_id);
@@ -964,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
                return 0;
        }
 
-       xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
-       if (!xhci->devs[slot_id])
+       dev = kzalloc(sizeof(*dev), flags);
+       if (!dev)
                return 0;
-       dev = xhci->devs[slot_id];
 
        /* Allocate the (output) device context that will be used in the HC. */
        dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1008,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 
        trace_xhci_alloc_virt_device(dev);
 
+       xhci->devs[slot_id] = dev;
+
        return 1;
 fail:
-       xhci_free_virt_device(xhci, slot_id);
+
+       if (dev->in_ctx)
+               xhci_free_container_ctx(xhci, dev->in_ctx);
+       if (dev->out_ctx)
+               xhci_free_container_ctx(xhci, dev->out_ctx);
+       kfree(dev);
+
        return 0;
 }
 
index c239c688076cf924060eccdb9698b2a0a86ab75d..c5cbc685c6915ce9e5b6f884ec5cdd7ea0306dc4 100644 (file)
@@ -2477,12 +2477,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 */
                if (list_empty(&ep_ring->td_list)) {
                        /*
-                        * A stopped endpoint may generate an extra completion
-                        * event if the device was suspended.  Don't print
-                        * warnings.
+                        * Don't print wanings if it's due to a stopped endpoint
+                        * generating an extra completion event if the device
+                        * was suspended. Or, a event for the last TRB of a
+                        * short TD we already got a short event for.
+                        * The short TD is already removed from the TD list.
                         */
+
                        if (!(trb_comp_code == COMP_STOPPED ||
-                               trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+                             trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+                             ep_ring->last_td_was_short)) {
                                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
                                                TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
                                                ep_index);
@@ -3108,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
 {
        u32 maxp, total_packet_count;
 
-       /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
+       /* MTK xHCI 0.96 contains some features from 1.0 */
        if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
                return ((td_total_len - transferred) >> 10);
 
@@ -3117,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
            trb_buff_len == td_total_len)
                return 0;
 
-       /* for MTK xHCI, TD size doesn't include this TRB */
-       if (xhci->quirks & XHCI_MTK_HOST)
+       /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+       if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
                trb_buff_len = 0;
 
        maxp = usb_endpoint_maxp(&urb->ep->desc);
index 327ba8b8a98b8db77252b419d13ff74fcebd7df5..2424d3020ca364b22792376e36c21462af3b2f62 100644 (file)
@@ -395,14 +395,14 @@ static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 
 #endif
 
-static void compliance_mode_recovery(unsigned long arg)
+static void compliance_mode_recovery(struct timer_list *t)
 {
        struct xhci_hcd *xhci;
        struct usb_hcd *hcd;
        u32 temp;
        int i;
 
-       xhci = (struct xhci_hcd *)arg;
+       xhci = from_timer(xhci, t, comp_mode_recovery_timer);
 
        for (i = 0; i < xhci->num_usb3_ports; i++) {
                temp = readl(xhci->usb3_ports[i]);
@@ -443,8 +443,8 @@ static void compliance_mode_recovery(unsigned long arg)
 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
 {
        xhci->port_status_u0 = 0;
-       setup_timer(&xhci->comp_mode_recovery_timer,
-                   compliance_mode_recovery, (unsigned long)xhci);
+       timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
+                   0);
        xhci->comp_mode_recovery_timer.expires = jiffies +
                        msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
 
index 0397606a211b2a62ab58067334784753a4e10668..6c036de63272b432b8b88255a2fd97db587187e6 100644 (file)
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
                        musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
                        portstate(musb->port1_status |= USB_PORT_STAT_POWER);
                        del_timer(&musb->dev_timer);
-               } else {
+               } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
+                       /*
+                        * When babble condition happens, drvvbus interrupt
+                        * is also generated. Ignore this drvvbus interrupt
+                        * and let babble interrupt handler recovers the
+                        * controller; otherwise, the host-mode flag is lost
+                        * due to the MUSB_DEV_MODE() call below and babble
+                        * recovery logic will not be called.
+                        */
                        musb->is_active = 0;
                        MUSB_DEV_MODE(musb);
                        otg->default_a = 0;
index a859c2d33c29137fd9c604f75d3f0b03c10d7cc5..fdceb46d9fc61a0c5eea2f113abd494dc4cc693b 100644 (file)
@@ -555,9 +555,9 @@ static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
                        val, reg, NULL, 0, MOS_WDR_TIMEOUT);
 }
 
-static void mos7840_led_off(unsigned long arg)
+static void mos7840_led_off(struct timer_list *t)
 {
-       struct moschip_port *mcs = (struct moschip_port *) arg;
+       struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
 
        /* Turn off LED */
        mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
@@ -565,9 +565,9 @@ static void mos7840_led_off(unsigned long arg)
                                jiffies + msecs_to_jiffies(LED_OFF_MS));
 }
 
-static void mos7840_led_flag_off(unsigned long arg)
+static void mos7840_led_flag_off(struct timer_list *t)
 {
-       struct moschip_port *mcs = (struct moschip_port *) arg;
+       struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
 
        clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
 }
@@ -2289,12 +2289,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
                        goto error;
                }
 
-               setup_timer(&mos7840_port->led_timer1, mos7840_led_off,
-                           (unsigned long)mos7840_port);
+               timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0);
                mos7840_port->led_timer1.expires =
                        jiffies + msecs_to_jiffies(LED_ON_MS);
-               setup_timer(&mos7840_port->led_timer2, mos7840_led_flag_off,
-                           (unsigned long)mos7840_port);
+               timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off,
+                           0);
                mos7840_port->led_timer2.expires =
                        jiffies + msecs_to_jiffies(LED_OFF_MS);
 
index aaa7d901a06de68a7902bbba02707ec29d6bcbd5..3b3513874cfd1e75a5380ee208f02c1144919cd1 100644 (file)
@@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21                   0x0121
 #define QUECTEL_PRODUCT_EC25                   0x0125
+#define QUECTEL_PRODUCT_BG96                   0x0296
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -1182,6 +1183,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
index ab5a2ac4993ab234f55ecdb0c4669277e6700f2a..aaf4813e4971eeb98ccae06aa37b7047404baac2 100644 (file)
@@ -31,12 +31,14 @@ static const struct usb_device_id id_table[] = {
 };
 
 static const struct usb_device_id dbc_id_table[] = {
+       { USB_DEVICE(0x1d6b, 0x0010) },
        { USB_DEVICE(0x1d6b, 0x0011) },
        { },
 };
 
 static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(0x0525, 0x127a) },
+       { USB_DEVICE(0x1d6b, 0x0010) },
        { USB_DEVICE(0x1d6b, 0x0011) },
        { },
 };
index 48e2e32c97e8a3d912ddb594b8c4c45b6b371749..31b0244419387c52ec5dcd13138b4d6a0bd063f7 100644 (file)
@@ -751,9 +751,9 @@ static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
        mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
 }
 
-static void rts51x_suspend_timer_fn(unsigned long data)
+static void rts51x_suspend_timer_fn(struct timer_list *t)
 {
-       struct rts51x_chip *chip = (struct rts51x_chip *)data;
+       struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
        struct us_data *us = chip->us;
 
        switch (rts51x_get_stat(chip)) {
@@ -917,8 +917,7 @@ static int realtek_cr_autosuspend_setup(struct us_data *us)
        us->proto_handler = rts51x_invoke_transport;
 
        chip->timer_expires = 0;
-       setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn,
-                       (unsigned long)chip);
+       timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0);
        fw5895_init(us);
 
        /* enable autosuspend function of the usb device */
index 1fcd758a961f5e192082efef9fb69c8efe3bd962..3734a25e09e539f05f16e9f16b5dfb371d0e3799 100644 (file)
@@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
                }
        }
 
+       /* All Seagate disk enclosures have broken ATA pass-through support */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+               flags |= US_FL_NO_ATA_1X;
+
        usb_stor_adjust_quirks(udev, &flags);
 
        if (flags & US_FL_IGNORE_UAS) {
index 2968046e7c059229cd8cec3611e150ccc3e70f23..f72d045ee9ef11940c056921d2760c649bc34d17 100644 (file)
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA ),
 
+/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+               "JMicron",
+               "JMS567",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA),
+
 /*
  * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
  * JMicron responds to USN and several other SCSI ioctls with a
index d520374a824e19796862280dcc17832a0d66321c..e6127fb21c123f397099dcae0ba5947ca7772061 100644 (file)
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
 
+/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+               "JMicron",
+               "JMS567",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                "VIA",
index 465d7da849c3415dc0d7fa25068db3ca65e37b9b..bcb2744c59772ce6453bbf280afae83b948163d9 100644 (file)
@@ -1,13 +1,53 @@
 
-menu "USB Power Delivery and Type-C drivers"
+menuconfig TYPEC
+       tristate "USB Type-C Support"
+       help
+         USB Type-C Specification defines a cable and connector for USB where
+         only one type of plug is supported on both ends, i.e. there will not
+         be Type-A plug on one end of the cable and Type-B plug on the other.
+         Determination of the host-to-device relationship happens through a
+         specific Configuration Channel (CC) which goes through the USB Type-C
+         cable. The Configuration Channel may also be used to detect optional
+         Accessory Modes - Analog Audio and Debug - and if USB Power Delivery
+         is supported, the Alternate Modes, where the connector is used for
+         something else then USB communication.
+
+         USB Power Delivery Specification defines a protocol that can be used
+         to negotiate the voltage and current levels with the connected
+         partners. USB Power Delivery allows higher voltages then the normal
+         5V, up to 20V, and current up to 5A over the cable. The USB Power
+         Delivery protocol is also used to negotiate the optional Alternate
+         Modes when they are supported. USB Power Delivery does not depend on
+         USB Type-C connector, however it is mostly used together with USB
+         Type-C connectors.
+
+         USB Type-C and USB Power Delivery Specifications define a set of state
+         machines that need to be implemented in either software or firmware.
+         Simple USB Type-C PHYs, for example USB Type-C Port Controller
+         Interface Specification compliant "Port Controllers" need the state
+         machines to be handled in the OS, but stand-alone USB Type-C and Power
+         Delivery controllers handle the state machines inside their firmware.
+         The USB Type-C and Power Delivery controllers usually function
+         autonomously, and do not necessarily require drivers.
+
+         Enable this configurations option if you have USB Type-C connectors on
+         your system and 1) you know your USB Type-C hardware requires OS
+         control (a driver) to function, or 2) if you need to be able to read
+         the status of the USB Type-C ports in your system, or 3) if you need
+         to be able to swap the power role (decide are you supplying or
+         consuming power over the cable) or data role (host or device) when
+         both roles are supported.
+
+         For more information, see the kernel documentation for USB Type-C
+         Connector Class API (Documentation/driver-api/usb/typec.rst)
+         <https://www.kernel.org/doc/html/latest/driver-api/usb/typec.html>
+         and ABI (Documentation/ABI/testing/sysfs-class-typec).
 
-config TYPEC
-       tristate
+if TYPEC
 
 config TYPEC_TCPM
        tristate "USB Type-C Port Controller Manager"
        depends on USB
-       select TYPEC
        help
          The Type-C Port Controller Manager provides a USB PD and USB Type-C
          state machine for use with Type-C Port Controllers.
@@ -22,7 +62,6 @@ config TYPEC_WCOVE
        depends on INTEL_SOC_PMIC
        depends on INTEL_PMC_IPC
        depends on BXT_WC_PMIC_OPREGION
-       select TYPEC
        help
          This driver adds support for USB Type-C detection on Intel Broxton
          platforms that have Intel Whiskey Cove PMIC. The driver can detect the
@@ -31,14 +70,13 @@ config TYPEC_WCOVE
          To compile this driver as module, choose M here: the module will be
          called typec_wcove
 
-endif
+endif # TYPEC_TCPM
 
 source "drivers/usb/typec/ucsi/Kconfig"
 
 config TYPEC_TPS6598X
        tristate "TI TPS6598x USB Power Delivery controller driver"
        depends on I2C
-       select TYPEC
        help
          Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
          Delivery controller.
@@ -46,4 +84,4 @@ config TYPEC_TPS6598X
          If you choose to build this driver as a dynamically linked module, the
          module will be called tps6598x.ko.
 
-endmenu
+endif # TYPEC
index d0c31cee472099598eb1f727cb4f9164dade2a84..e36d6c73c4a4184c6246b14ab27b8e0ecb393be8 100644 (file)
@@ -1,7 +1,6 @@
 config TYPEC_UCSI
        tristate "USB Type-C Connector System Software Interface driver"
        depends on !CPU_BIG_ENDIAN
-       select TYPEC
        help
          USB Type-C Connector System Software Interface (UCSI) is a
          specification for an interface that allows the operating system to
index 536e037f541faa01f16188e45e1afe130abc57ea..493ac2928391accc4984e3ceddbfdadd2690a26c 100644 (file)
@@ -322,23 +322,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
        return priv;
 }
 
-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
 {
        struct usb_device *udev = sdev->udev;
        struct usb_host_endpoint *ep;
        struct usb_endpoint_descriptor *epd = NULL;
+       int epnum = pdu->base.ep;
+       int dir = pdu->base.direction;
+
+       if (epnum < 0 || epnum > 15)
+               goto err_ret;
 
        if (dir == USBIP_DIR_IN)
                ep = udev->ep_in[epnum & 0x7f];
        else
                ep = udev->ep_out[epnum & 0x7f];
-       if (!ep) {
-               dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
-                       epnum);
-               BUG();
-       }
+       if (!ep)
+               goto err_ret;
 
        epd = &ep->desc;
+
+       /* validate transfer_buffer_length */
+       if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
+               dev_err(&sdev->udev->dev,
+                       "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
+                       pdu->u.cmd_submit.transfer_buffer_length);
+               return -1;
+       }
+
        if (usb_endpoint_xfer_control(epd)) {
                if (dir == USBIP_DIR_OUT)
                        return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +372,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
        }
 
        if (usb_endpoint_xfer_isoc(epd)) {
+               /* validate packet size and number of packets */
+               unsigned int maxp, packets, bytes;
+
+               maxp = usb_endpoint_maxp(epd);
+               maxp *= usb_endpoint_maxp_mult(epd);
+               bytes = pdu->u.cmd_submit.transfer_buffer_length;
+               packets = DIV_ROUND_UP(bytes, maxp);
+
+               if (pdu->u.cmd_submit.number_of_packets < 0 ||
+                   pdu->u.cmd_submit.number_of_packets > packets) {
+                       dev_err(&sdev->udev->dev,
+                               "CMD_SUBMIT: isoc invalid num packets %d\n",
+                               pdu->u.cmd_submit.number_of_packets);
+                       return -1;
+               }
                if (dir == USBIP_DIR_OUT)
                        return usb_sndisocpipe(udev, epnum);
                else
                        return usb_rcvisocpipe(udev, epnum);
        }
 
+err_ret:
        /* NOT REACHED */
-       dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
-       return 0;
+       dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
+       return -1;
 }
 
 static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +460,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
        struct stub_priv *priv;
        struct usbip_device *ud = &sdev->ud;
        struct usb_device *udev = sdev->udev;
-       int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
+       int pipe = get_pipe(sdev, pdu);
+
+       if (pipe == -1)
+               return;
 
        priv = stub_priv_alloc(sdev, pdu);
        if (!priv)
@@ -452,7 +482,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
        }
 
        /* allocate urb transfer buffer, if needed */
-       if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
+       if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
+           pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
                priv->urb->transfer_buffer =
                        kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
                                GFP_KERNEL);
index b18bce96c212b35384d18ba108cbe98f27803cab..53172b1f6257cf9f8d72dac57212c0ec939a8dc4 100644 (file)
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
                memset(&pdu_header, 0, sizeof(pdu_header));
                memset(&msg, 0, sizeof(msg));
 
+               if (urb->actual_length > 0 && !urb->transfer_buffer) {
+                       dev_err(&sdev->udev->dev,
+                               "urb: actual_length %d transfer_buffer null\n",
+                               urb->actual_length);
+                       return -1;
+               }
+
                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
                        iovnum = 2 + urb->number_of_packets;
                else
index e5de35c8c5056b7ab9c38c715cbf57b67a263a92..473fb8a872893caa3494fe9a4a85b60600bdfd28 100644 (file)
@@ -256,6 +256,7 @@ struct usbip_device {
        /* lock for status */
        spinlock_t lock;
 
+       int sockfd;
        struct socket *tcp_socket;
 
        struct task_struct *tcp_rx;
index 713e941709632f61664d5e97ed081163883835ac..6b3278c4b72a0d745a724b3ef93cce0dc3dccd7d 100644 (file)
@@ -1098,7 +1098,6 @@ static int hcd_name_to_id(const char *name)
 static int vhci_setup(struct usb_hcd *hcd)
 {
        struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
-       hcd->self.sg_tablesize = ~0;
        if (usb_hcd_is_primary_hcd(hcd)) {
                vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
                vhci->vhci_hcd_hs->vhci = vhci;
index e78f7472cac496d4cfa2a5fcf0e4111568c242ab..091f76b7196d21da57d4adf1cca91fae29377fdd 100644 (file)
 
 /*
  * output example:
- * hub port sta spd dev      socket           local_busid
- * hs  0000 004 000 00000000         c5a7bb80 1-2.3
+ * hub port sta spd dev       sockfd    local_busid
+ * hs  0000 004 000 00000000  3         1-2.3
  * ................................................
- * ss  0008 004 000 00000000         d8cee980 2-3.4
+ * ss  0008 004 000 00000000  4         2-3.4
  * ................................................
  *
- * IP address can be retrieved from a socket pointer address by looking
- * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
- * port number and its peer IP address.
+ * Output includes socket fd instead of socket pointer address to avoid
+ * leaking kernel memory address in:
+ *     /sys/devices/platform/vhci_hcd.0/status and in debug output.
+ * The socket pointer address is not used at the moment and it was made
+ * visible as a convenient way to find IP address from socket pointer
+ * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
+ * hole, the change is made to use sockfd instead.
+ *
  */
 static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
 {
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
        if (vdev->ud.status == VDEV_ST_USED) {
                *out += sprintf(*out, "%03u %08x ",
                                      vdev->speed, vdev->devid);
-               *out += sprintf(*out, "%16p %s",
-                                     vdev->ud.tcp_socket,
+               *out += sprintf(*out, "%u %s",
+                                     vdev->ud.sockfd,
                                      dev_name(&vdev->udev->dev));
 
        } else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
        char *s = out;
 
        /*
-        * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2.
+        * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
+        * thus the * 2.
         */
        out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
        return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
 
        vdev->devid         = devid;
        vdev->speed         = speed;
+       vdev->ud.sockfd     = sockfd;
        vdev->ud.tcp_socket = socket;
        vdev->ud.status     = VDEV_ST_NOTASSIGNED;
 
index 38d0504a1bbc50a29a3d639a772648ef385e5692..625f706b8160c27067b106f50704d137c361cf78 100644 (file)
@@ -603,9 +603,9 @@ static void uwb_cnflt_update_work(struct work_struct *work)
        mutex_unlock(&rc->rsvs_mutex);
 }
 
-static void uwb_cnflt_timer(unsigned long arg)
+static void uwb_cnflt_timer(struct timer_list *t)
 {
-       struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
+       struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
 
        queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
 }
@@ -642,7 +642,7 @@ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_i
        }
 
        INIT_LIST_HEAD(&cnflt->rc_node);
-       setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
+       timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
 
        cnflt->rc = rc;
        INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
index 36b5cb62c15dba7b734ffa61460fdeb3a31e79cc..fbdca728bd9f8b947fa9af19103232a0850655c7 100644 (file)
@@ -115,7 +115,7 @@ struct uwb_rc_neh {
        struct list_head list_node;
 };
 
-static void uwb_rc_neh_timer(unsigned long arg);
+static void uwb_rc_neh_timer(struct timer_list *t);
 
 static void uwb_rc_neh_release(struct kref *kref)
 {
@@ -223,7 +223,7 @@ struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
 
        kref_init(&neh->kref);
        INIT_LIST_HEAD(&neh->list_node);
-       setup_timer(&neh->timer, uwb_rc_neh_timer, (unsigned long)neh);
+       timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
 
        neh->rc = rc;
        neh->evt_type = expected_type;
@@ -565,9 +565,9 @@ void uwb_rc_neh_error(struct uwb_rc *rc, int error)
 EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
 
 
-static void uwb_rc_neh_timer(unsigned long arg)
+static void uwb_rc_neh_timer(struct timer_list *t)
 {
-       struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+       struct uwb_rc_neh *neh = from_timer(neh, t, timer);
        struct uwb_rc *rc = neh->rc;
        unsigned long flags;
 
index f5e27247a38feac0a0294a57fa49ad0a260be637..fe25a8cc6fa10e5ce35ec218c29a16a8f5433658 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "uwb-internal.h"
 
-static void uwb_rsv_timer(unsigned long arg);
+static void uwb_rsv_timer(struct timer_list *t);
 
 static const char *rsv_states[] = {
        [UWB_RSV_STATE_NONE]                 = "none            ",
@@ -198,9 +198,9 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
        dev_dbg(dev, "put stream %d\n", rsv->stream);
 }
 
-void uwb_rsv_backoff_win_timer(unsigned long arg)
+void uwb_rsv_backoff_win_timer(struct timer_list *t)
 {
-       struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
+       struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
        struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
        struct device *dev = &rc->uwb_dev.dev;
 
@@ -470,7 +470,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
        INIT_LIST_HEAD(&rsv->rc_node);
        INIT_LIST_HEAD(&rsv->pal_node);
        kref_init(&rsv->kref);
-       setup_timer(&rsv->timer, uwb_rsv_timer, (unsigned long)rsv);
+       timer_setup(&rsv->timer, uwb_rsv_timer, 0);
 
        rsv->rc = rc;
        INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
@@ -939,9 +939,9 @@ static void uwb_rsv_alien_bp_work(struct work_struct *work)
        mutex_unlock(&rc->rsvs_mutex);
 }
 
-static void uwb_rsv_timer(unsigned long arg)
+static void uwb_rsv_timer(struct timer_list *t)
 {
-       struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+       struct uwb_rsv *rsv = from_timer(rsv, t, timer);
 
        queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
 }
@@ -987,8 +987,7 @@ void uwb_rsv_init(struct uwb_rc *rc)
        rc->bow.can_reserve_extra_mases = true;
        rc->bow.total_expired = 0;
        rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
-       setup_timer(&rc->bow.timer, uwb_rsv_backoff_win_timer,
-                       (unsigned long)&rc->bow);
+       timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0);
 
        bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
 }
index 353c0555a1f5dbd6d7e884ca24544ff4226550f4..91326ce093a78fd43572f6c6ad39581c24bb241c 100644 (file)
@@ -329,7 +329,7 @@ void uwb_rsv_put(struct uwb_rsv *rsv);
 bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
 void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
 int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
-void uwb_rsv_backoff_win_timer(unsigned long arg);
+void uwb_rsv_backoff_win_timer(struct timer_list *t);
 void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
 int uwb_rsv_status(struct uwb_rsv *rsv);
 int uwb_rsv_companion_status(struct uwb_rsv *rsv);
index 8d626d7c2e7e79db8d243278e805c96ad563bb3d..c7bdeb6556469efb93e2a6a7e742da3a37ad7e69 100644 (file)
@@ -778,16 +778,6 @@ static void handle_rx(struct vhost_net *net)
                /* On error, stop handling until the next kick. */
                if (unlikely(headcount < 0))
                        goto out;
-               if (nvq->rx_array)
-                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
-               /* On overrun, truncate and discard */
-               if (unlikely(headcount > UIO_MAXIOV)) {
-                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
-                       err = sock->ops->recvmsg(sock, &msg,
-                                                1, MSG_DONTWAIT | MSG_TRUNC);
-                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
-                       continue;
-               }
                /* OK, now we need to know about added descriptors. */
                if (!headcount) {
                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -800,6 +790,16 @@ static void handle_rx(struct vhost_net *net)
                         * they refilled. */
                        goto out;
                }
+               if (nvq->rx_array)
+                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+               /* On overrun, truncate and discard */
+               if (unlikely(headcount > UIO_MAXIOV)) {
+                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
+                       err = sock->ops->recvmsg(sock, &msg,
+                                                1, MSG_DONTWAIT | MSG_TRUNC);
+                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
+                       continue;
+               }
                /* We don't need to be notified again. */
                iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
                fixup = msg.msg_iter;
index 48230a5e12f262b67d28d87adc713f462e8ec5fc..bf7ff3934d7fff5169e5252cd8fc0a29ea25a133 100644 (file)
@@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev)
        /* device_register() causes the bus infrastructure to look for a
         * matching driver. */
        err = device_register(&dev->dev);
+       if (err)
+               ida_simple_remove(&virtio_index_ida, dev->index);
 out:
        if (err)
                virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
index 7960746f759788d545b9e85e384a56cbf99a7606..a1fb52cb3f0ab5c0f066d3d773a82c73665f54da 100644 (file)
@@ -174,13 +174,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
        while ((page = balloon_page_pop(&pages))) {
                balloon_page_enqueue(&vb->vb_dev_info, page);
 
-               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
-
                set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
                vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
                if (!virtio_has_feature(vb->vdev,
                                        VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
                        adjust_managed_page_count(page, -1);
+               vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
        }
 
        num_allocated_pages = vb->num_pfns;
index 74dc7170fd351e02d1732357b0705f66c507f7ce..c92131edfabaad76355036f2afe13adbadf0b63d 100644 (file)
@@ -493,7 +493,16 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
 };
 
 
-static void virtio_mmio_release_dev_empty(struct device *_d) {}
+static void virtio_mmio_release_dev(struct device *_d)
+{
+       struct virtio_device *vdev =
+                       container_of(_d, struct virtio_device, dev);
+       struct virtio_mmio_device *vm_dev =
+                       container_of(vdev, struct virtio_mmio_device, vdev);
+       struct platform_device *pdev = vm_dev->pdev;
+
+       devm_kfree(&pdev->dev, vm_dev);
+}
 
 /* Platform device */
 
@@ -514,10 +523,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
 
        vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
        if (!vm_dev)
-               return  -ENOMEM;
+               return -ENOMEM;
 
        vm_dev->vdev.dev.parent = &pdev->dev;
-       vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty;
+       vm_dev->vdev.dev.release = virtio_mmio_release_dev;
        vm_dev->vdev.config = &virtio_mmio_config_ops;
        vm_dev->pdev = pdev;
        INIT_LIST_HEAD(&vm_dev->virtqueues);
@@ -573,13 +582,16 @@ static int virtio_mmio_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, vm_dev);
 
-       return register_virtio_device(&vm_dev->vdev);
+       rc = register_virtio_device(&vm_dev->vdev);
+       if (rc)
+               put_device(&vm_dev->vdev.dev);
+
+       return rc;
 }
 
 static int virtio_mmio_remove(struct platform_device *pdev)
 {
        struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
-
        unregister_virtio_device(&vm_dev->vdev);
 
        return 0;
index 18e896eeca62352f3c5e2c73479aab506c064958..12f7ea62dddd85eee3bca19e5335e3981c23e0d7 100644 (file)
@@ -70,7 +70,7 @@ module_param(use_gpio, int, 0);
 MODULE_PARM_DESC(use_gpio,
                "Use the gpio watchdog (required by old cobalt boards).");
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(nowayout,
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long unused)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index 7e6acaf3ece495ac9056bcd74532c994feb8ba0f..88c05d0448b2f937494b1c1bd42be6fc3a998192 100644 (file)
@@ -120,9 +120,9 @@ static inline void at91_wdt_reset(struct at91wdt *wdt)
 /*
  * Timer tick
  */
-static void at91_ping(unsigned long data)
+static void at91_ping(struct timer_list *t)
 {
-       struct at91wdt *wdt = (struct at91wdt *)data;
+       struct at91wdt *wdt = from_timer(wdt, t, timer);
        if (time_before(jiffies, wdt->next_heartbeat) ||
            !watchdog_active(&wdt->wdd)) {
                at91_wdt_reset(wdt);
@@ -222,7 +222,7 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
                         "watchdog already configured differently (mr = %x expecting %x)\n",
                         tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
 
-       setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+       timer_setup(&wdt->timer, at91_ping, 0);
 
        /*
         * Use min_heartbeat the first time to avoid spurious watchdog reset:
index 236582809336bf7e50eb9269331ec457b41a9e9d..f41b756d6dd552722d0643ab9bd9336da97fd054 100644 (file)
@@ -106,9 +106,9 @@ static const struct watchdog_ops bcm47xx_wdt_hard_ops = {
        .restart        = bcm47xx_wdt_restart,
 };
 
-static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
+static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
 {
-       struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+       struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
        u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
 
        if (!atomic_dec_and_test(&wdt->soft_ticks)) {
@@ -133,7 +133,7 @@ static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd)
        struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
 
        bcm47xx_wdt_soft_keepalive(wdd);
-       bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
+       bcm47xx_wdt_soft_timer_tick(&wdt->soft_timer);
 
        return 0;
 }
@@ -190,8 +190,7 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
 
        if (soft) {
                wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
-               setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
-                           (long unsigned int)wdt);
+               timer_setup(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, 0);
        } else {
                wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
        }
index ab26fd90729ec664a4fb2caf8a79582ee0108769..8555afc70f9bf37552e8e1c76575edb7f97e93ad 100644 (file)
@@ -77,7 +77,7 @@ static void bcm63xx_wdt_isr(void *data)
        die(PFX " fire", regs);
 }
 
-static void bcm63xx_timer_tick(unsigned long unused)
+static void bcm63xx_timer_tick(struct timer_list *unused)
 {
        if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
                bcm63xx_wdt_hw_start();
@@ -240,7 +240,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
        int ret;
        struct resource *r;
 
-       setup_timer(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0L);
+       timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
index 6c3f78e45c265da4b6f9cfefdb8af16a96b30874..6cfb102c397c9454697d7e6b301b779e4afdcd37 100644 (file)
@@ -69,7 +69,7 @@ static struct {
 
 /* generic helper functions */
 
-static void cpu5wdt_trigger(unsigned long unused)
+static void cpu5wdt_trigger(struct timer_list *unused)
 {
        if (verbose > 2)
                pr_debug("trigger at %i ticks\n", ticks);
@@ -224,7 +224,7 @@ static int cpu5wdt_init(void)
 
        init_completion(&cpu5wdt_device.stop);
        cpu5wdt_device.queue = 0;
-       setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
+       timer_setup(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
        cpu5wdt_device.default_ticks = ticks;
 
        if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
index 8a616a57bb90441cc946c2d4d25c71dd5ae8f544..88d823d87a4b3895e4a139f9579ae6b08359f8dd 100644 (file)
@@ -121,7 +121,7 @@ module_param(action, int, 0);
 MODULE_PARM_DESC(action, "after watchdog resets, generate: "
                                "0 = RESET(*)  1 = SMI  2 = NMI  3 = SCI");
 
-static void zf_ping(unsigned long data);
+static void zf_ping(struct timer_list *unused);
 
 static int zf_action = GEN_RESET;
 static unsigned long zf_is_open;
@@ -237,7 +237,7 @@ static void zf_timer_on(void)
 }
 
 
-static void zf_ping(unsigned long data)
+static void zf_ping(struct timer_list *unused)
 {
        unsigned int ctrl_reg = 0;
        unsigned long flags;
index c9e38096ea91fff0bcfed82f8d8058ff0bb7be17..3cc07447c6558b3c311c6295249fa4eaab447583 100644 (file)
@@ -99,7 +99,7 @@ static struct {
        {0x0000, 0},
 };
 
-static void mixcomwd_timerfun(unsigned long d);
+static void mixcomwd_timerfun(struct timer_list *unused);
 
 static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
 
@@ -120,7 +120,7 @@ static void mixcomwd_ping(void)
        return;
 }
 
-static void mixcomwd_timerfun(unsigned long d)
+static void mixcomwd_timerfun(struct timer_list *unused)
 {
        mixcomwd_ping();
        mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
index 366e5c7e650bfd4b3da3fddcebd9bc9ab86a23ac..6610e9217dbc237a7e5352df3cb9587a6770be67 100644 (file)
@@ -80,9 +80,9 @@ static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
        spin_unlock(&ddata->lock);
 }
 
-static void mpc8xxx_wdt_timer_ping(unsigned long arg)
+static void mpc8xxx_wdt_timer_ping(struct timer_list *t)
 {
-       struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
+       struct mpc8xxx_wdt_ddata *ddata = from_timer(ddata, t, timer);
 
        mpc8xxx_wdt_keepalive(ddata);
        /* We're pinging it twice faster than needed, just to be sure. */
@@ -173,8 +173,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
        }
 
        spin_lock_init(&ddata->lock);
-       setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
-                   (unsigned long)ddata);
+       timer_setup(&ddata->timer, mpc8xxx_wdt_timer_ping, 0);
 
        ddata->wdd.info = &mpc8xxx_wdt_info,
        ddata->wdd.ops = &mpc8xxx_wdt_ops,
index ff27c4ac96e442dadec4129ecb98931ddc2ca150..ca360d204548fbd1f13a8750a9bbe6d091f9eaf4 100644 (file)
@@ -68,7 +68,7 @@ static struct {
        unsigned int gstate;
 } mtx1_wdt_device;
 
-static void mtx1_wdt_trigger(unsigned long unused)
+static void mtx1_wdt_trigger(struct timer_list *unused)
 {
        spin_lock(&mtx1_wdt_device.lock);
        if (mtx1_wdt_device.running)
@@ -219,7 +219,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
        init_completion(&mtx1_wdt_device.stop);
        mtx1_wdt_device.queue = 0;
        clear_bit(0, &mtx1_wdt_device.inuse);
-       setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
+       timer_setup(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0);
        mtx1_wdt_device.default_ticks = ticks;
 
        ret = misc_register(&mtx1_wdt_misc);
index d5bed78c4d9fbbec03ec2268e391676b1996d8ca..830bd04ff911f4f6ebd00bdadd9ff727405a5eeb 100644 (file)
@@ -216,7 +216,7 @@ static ssize_t nuc900_wdt_write(struct file *file, const char __user *data,
        return len;
 }
 
-static void nuc900_wdt_timer_ping(unsigned long data)
+static void nuc900_wdt_timer_ping(struct timer_list *unused)
 {
        if (time_before(jiffies, nuc900_wdt->next_heartbeat)) {
                nuc900_wdt_keepalive();
@@ -267,7 +267,7 @@ static int nuc900wdt_probe(struct platform_device *pdev)
 
        clk_enable(nuc900_wdt->wdt_clock);
 
-       setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
+       timer_setup(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
 
        ret = misc_register(&nuc900wdt_miscdev);
        if (ret) {
index 3ad5206d79357e8862078e16426895811aa3d87c..b72ce68eacd3daa577c4a5fe2156e19e52545290 100644 (file)
@@ -367,7 +367,7 @@ static void pcwd_show_card_info(void)
                pr_info("No previous trip detected - Cold boot or reset\n");
 }
 
-static void pcwd_timer_ping(unsigned long data)
+static void pcwd_timer_ping(struct timer_list *unused)
 {
        int wdrst_stat;
 
@@ -893,7 +893,7 @@ static int pcwd_isa_probe(struct device *dev, unsigned int id)
        /* clear the "card caused reboot" flag */
        pcwd_clear_status();
 
-       setup_timer(&pcwd_private.timer, pcwd_timer_ping, 0);
+       timer_setup(&pcwd_private.timer, pcwd_timer_ping, 0);
 
        /*  Disable the board  */
        pcwd_stop();
index e35cf5e87907c3f98520ab0440d6088e6976f803..e0a6f8c0f03cde84a32e5744a1f3ee24fd7cbd00 100644 (file)
@@ -85,7 +85,7 @@ static inline void pikawdt_reset(void)
 /*
  * Timer tick
  */
-static void pikawdt_ping(unsigned long data)
+static void pikawdt_ping(struct timer_list *unused)
 {
        if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
                        (!nowayout && !pikawdt_private.open)) {
@@ -269,7 +269,7 @@ static int __init pikawdt_init(void)
 
        iounmap(fpga);
 
-       setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
+       timer_setup(&pikawdt_private.timer, pikawdt_ping, 0);
 
        ret = misc_register(&pikawdt_miscdev);
        if (ret) {
index 47a8f1b1087d4f5a310ee9f01b67bce406251666..a281aa84bfb1402ff2f7567289902a2589ad223f 100644 (file)
@@ -67,7 +67,7 @@ static struct {
 
 /* generic helper functions */
 
-static void rdc321x_wdt_trigger(unsigned long unused)
+static void rdc321x_wdt_trigger(struct timer_list *unused)
 {
        unsigned long flags;
        u32 val;
@@ -262,7 +262,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
 
        clear_bit(0, &rdc321x_wdt_device.inuse);
 
-       setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+       timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
 
        rdc321x_wdt_device.default_ticks = ticks;
 
index 8d589939bc8447b7cb116ba8b3ff70bdb8f207b6..87333a41f75384ca7bd4ff3b5c66bad5602e5dd8 100644 (file)
@@ -112,7 +112,7 @@ MODULE_PARM_DESC(nowayout,
        "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -122,7 +122,7 @@ static char wdt_expect_close;
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index 3e9bbaa37bf46ac872b51da65fc4ed31ee1b9f2e..6aadb56e7faaa7161b8f32d8fd45a573e989e4a2 100644 (file)
@@ -123,7 +123,7 @@ MODULE_PARM_DESC(nowayout,
 
 static __u16 __iomem *wdtmrctl;
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index 517a733175ef84c8f8a7be3f79d8b97a3b6eed9e..a7d6425db807ff9d95c91aa24be73bbdfa15a9bd 100644 (file)
@@ -175,9 +175,9 @@ static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
        return 0;
 }
 
-static void sh_wdt_ping(unsigned long data)
+static void sh_wdt_ping(struct timer_list *t)
 {
-       struct sh_wdt *wdt = (struct sh_wdt *)data;
+       struct sh_wdt *wdt = from_timer(wdt, t, timer);
        unsigned long flags;
 
        spin_lock_irqsave(&wdt->lock, flags);
@@ -275,7 +275,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
                return rc;
        }
 
-       setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
+       timer_setup(&wdt->timer, sh_wdt_ping, 0);
        wdt->timer.expires      = next_ping_period(clock_division_ratio);
 
        dev_info(&pdev->dev, "initialized.\n");
index ad3c3be13b40981d5a4ddecbccfbbfb262071e71..b085ef1084ec4f99d9d4fd2364fe4f35c0b05230 100644 (file)
@@ -67,7 +67,7 @@ static struct watchdog_device wdt_dev;
 static struct resource wdt_res;
 static void __iomem *wdt_mem;
 static unsigned int mmio;
-static void wdt_timer_tick(unsigned long data);
+static void wdt_timer_tick(struct timer_list *unused);
 static DEFINE_TIMER(timer, wdt_timer_tick);
                                        /* The timer that pings the watchdog */
 static unsigned long next_heartbeat;   /* the next_heartbeat for the timer */
@@ -88,7 +88,7 @@ static inline void wdt_reset(void)
  *     then the external/userspace heartbeat).
  *  2) the watchdog timer has been stopped by userspace.
  */
-static void wdt_timer_tick(unsigned long data)
+static void wdt_timer_tick(struct timer_list *unused)
 {
        if (time_before(jiffies, next_heartbeat) ||
           (!watchdog_active(&wdt_dev))) {
index ba6b680af1000ebe795c0522691675494b28795a..05658ecc0aa4cdc87875ec05aba6eed23b4f1551 100644 (file)
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(nowayout,
                "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -108,7 +108,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index d8dd54678ab7100d32435fd68368531d98317375..e5d0c28372ea178a3177114694c7327f34089d24 100644 (file)
@@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
 
 config XEN_ACPI_PROCESSOR
        tristate "Xen ACPI processor"
-       depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
+       depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
        default m
        help
           This ACPI processor uploads Power Management information to the Xen
index 139e018a82b06079231fae25abc9d6dcc58da300..f45114fd8e1e76cf1359e3446acd5a9426791db1 100644 (file)
@@ -358,10 +358,10 @@ struct deferred_entry {
        struct page *page;
 };
 static LIST_HEAD(deferred_list);
-static void gnttab_handle_deferred(unsigned long);
+static void gnttab_handle_deferred(struct timer_list *);
 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 
-static void gnttab_handle_deferred(unsigned long unused)
+static void gnttab_handle_deferred(struct timer_list *unused)
 {
        unsigned int nr = 10;
        struct deferred_entry *first = NULL;
index 40caa92bff33deaebf1bf01aac44571453660c0f..d1e1d8d2b9d545b00eb87085b6f941f04900cd74 100644 (file)
@@ -1103,7 +1103,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
                        kfree(map);
                }
        }
-       if (bedata->ref >= 0)
+       if (bedata->ref != -1)
                gnttab_end_foreign_access(bedata->ref, 0, 0);
        kfree(bedata->ring.sring);
        kfree(bedata);
@@ -1128,6 +1128,8 @@ static int pvcalls_front_probe(struct xenbus_device *dev,
        }
 
        versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+       if (IS_ERR(versions))
+               return PTR_ERR(versions);
        if (!len)
                return -EINVAL;
        if (strcmp(versions, "1")) {
index 168094a3fae7ebb109c1ead9fe3b8c3e38201444..29641383e136024ec9f2993b1a29a7fc9c2fa94b 100644 (file)
@@ -59,6 +59,3 @@ endif
 
 targets := $(patsubst $(obj)/%,%, \
                                 $(shell find $(obj) -name \*.gen.S 2>/dev/null))
-# Without this, built-in.o won't be created when it's empty, and the
-# final vmlinux link will fail.
-obj- := dummy
index 8b75463cb2116895b4a44b28bf1faab7f1db8f71..af03c2a901eb4fe0518d0fbbd60359f48357ab64 100644 (file)
@@ -94,13 +94,13 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
        if (v9ses->cache)
                sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
 
-       sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
+       sb->s_flags |= SB_ACTIVE | SB_DIRSYNC | SB_NOATIME;
        if (!v9ses->cache)
-               sb->s_flags |= MS_SYNCHRONOUS;
+               sb->s_flags |= SB_SYNCHRONOUS;
 
 #ifdef CONFIG_9P_FS_POSIX_ACL
        if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
 #endif
 
        return 0;
index c9fdfb11293357f1d79c0d729bc4d5db1118509d..cfda2c7caedcec8b53d738f7c93924a723515370 100644 (file)
@@ -213,7 +213,7 @@ static int parse_options(struct super_block *sb, char *options)
 static int adfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return parse_options(sb, data);
 }
 
@@ -372,7 +372,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *root;
        int ret = -EINVAL;
 
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
 
        asb = kzalloc(sizeof(*asb), GFP_KERNEL);
        if (!asb)
index 185d5ab7e986af489612d795185d91e86a22ace3..0f0e6925e97dd123cc03f991f33f998599e57e5c 100644 (file)
@@ -453,7 +453,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
        pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf);
        if (!sb_rdonly(sb))
                pr_warn("Remounting filesystem read-only\n");
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        va_end(args);
 }
 
index 2b1399611d9e6595151a578724c4bc6d17996ff2..5ba9ef2742f6ee5e903b3df04c5f627fc4fc00ae 100644 (file)
@@ -250,12 +250,12 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
        int i, res = 0;
        struct affs_sb_info *sbi = AFFS_SB(sb);
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                return 0;
 
        if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
                pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id);
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
                return 0;
        }
 
@@ -288,7 +288,7 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
                if (affs_checksum_block(sb, bh)) {
                        pr_warn("Bitmap %u invalid - mounting %s read only.\n",
                                bm->bm_key, sb->s_id);
-                       *flags |= MS_RDONLY;
+                       *flags |= SB_RDONLY;
                        goto out;
                }
                pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key);
index 884bedab7266a528b60884c7ab3d91e9a703724d..1117e36134cc82e5127de496240f6f99b541b0a4 100644 (file)
@@ -356,7 +356,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic             = AFFS_SUPER_MAGIC;
        sb->s_op                = &affs_sops;
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
 
        sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
        if (!sbi)
@@ -466,7 +466,7 @@ got_root:
        if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
             || chksum == MUFS_DCOFS) && !sb_rdonly(sb)) {
                pr_notice("Dircache FS - mounting %s read only\n", sb->s_id);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        switch (chksum) {
        case MUFS_FS:
@@ -488,7 +488,7 @@ got_root:
                /* fall thru */
        case FS_OFS:
                affs_set_opt(sbi->s_flags, SF_OFS);
-               sb->s_flags |= MS_NOEXEC;
+               sb->s_flags |= SB_NOEXEC;
                break;
        case MUFS_DCOFS:
        case MUFS_INTLOFS:
@@ -497,7 +497,7 @@ got_root:
        case FS_INTLOFS:
                affs_set_opt(sbi->s_flags, SF_INTL);
                affs_set_opt(sbi->s_flags, SF_OFS);
-               sb->s_flags |= MS_NOEXEC;
+               sb->s_flags |= SB_NOEXEC;
                break;
        default:
                pr_err("Unknown filesystem on device %s: %08X\n",
@@ -513,7 +513,7 @@ got_root:
                        sig, sig[3] + '0', blocksize);
        }
 
-       sb->s_flags |= MS_NODEV | MS_NOSUID;
+       sb->s_flags |= SB_NODEV | SB_NOSUID;
 
        sbi->s_data_blksize = sb->s_blocksize;
        if (affs_test_opt(sbi->s_flags, SF_OFS))
@@ -570,7 +570,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
        pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
 
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
 
        memcpy(volume, sbi->s_volume, 32);
        if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
@@ -596,10 +596,10 @@ affs_remount(struct super_block *sb, int *flags, char *data)
        memcpy(sbi->s_volume, volume, 32);
        spin_unlock(&sbi->symlink_lock);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                affs_free_bitmap(sb);
        else
                res = affs_init_bitmap(sb, flags);
index 1858c91169e4fc213e77b548628386041790b0c0..9bb921d120d0f8b42bd1e68b508af1f285bbcdbc 100644 (file)
@@ -207,13 +207,8 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
                rcu_read_lock();
                cell = afs_lookup_cell_rcu(net, name, namesz);
                rcu_read_unlock();
-               if (!IS_ERR(cell)) {
-                       if (excl) {
-                               afs_put_cell(net, cell);
-                               return ERR_PTR(-EEXIST);
-                       }
+               if (!IS_ERR(cell))
                        goto wait_for_cell;
-               }
        }
 
        /* Assume we're probably going to create a cell and preallocate and
index ab618d32554c648848b6001a3964b279deb794dc..ff8d5bf4354f306227a297ca542078580ca57e34 100644 (file)
@@ -765,6 +765,8 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
        if (fc->ac.error < 0)
                return;
 
+       d_drop(new_dentry);
+
        inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
                         newfid, newstatus, newcb, fc->cbi);
        if (IS_ERR(inode)) {
@@ -775,9 +777,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
                return;
        }
 
-       d_instantiate(new_dentry, inode);
-       if (d_unhashed(new_dentry))
-               d_rehash(new_dentry);
+       d_add(new_dentry, inode);
 }
 
 /*
@@ -818,6 +818,8 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -972,7 +974,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        struct afs_fs_cursor fc;
        struct afs_file_status newstatus;
        struct afs_callback newcb;
-       struct afs_vnode *dvnode = dvnode = AFS_FS_I(dir);
+       struct afs_vnode *dvnode = AFS_FS_I(dir);
        struct afs_fid newfid;
        struct key *key;
        int ret;
@@ -1006,6 +1008,8 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -1053,7 +1057,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
                        afs_end_vnode_operation(&fc);
-                       return -ERESTARTSYS;
+                       goto error_key;
                }
 
                while (afs_select_fileserver(&fc)) {
@@ -1071,6 +1075,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -1130,6 +1136,8 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -1180,7 +1188,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (orig_dvnode != new_dvnode) {
                        if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
                                afs_end_vnode_operation(&fc);
-                               return -ERESTARTSYS;
+                               goto error_key;
                        }
                }
                while (afs_select_fileserver(&fc)) {
@@ -1199,14 +1207,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto error_key;
        }
 
-       key_put(key);
-       _leave(" = 0");
-       return 0;
-
 error_key:
        key_put(key);
 error:
-       d_drop(new_dentry);
        _leave(" = %d", ret);
        return ret;
 }
index 7571a5dfd5a35cbd674ddf24671029a8b6b522d1..c40ba2fe3cbeee50d1b529d5e1e9d2960633f995 100644 (file)
@@ -170,7 +170,7 @@ void afs_lock_work(struct work_struct *work)
 {
        struct afs_vnode *vnode =
                container_of(work, struct afs_vnode, lock_work.work);
-       struct file_lock *fl;
+       struct file_lock *fl, *next;
        afs_lock_type_t type;
        struct key *key;
        int ret;
@@ -179,117 +179,136 @@ void afs_lock_work(struct work_struct *work)
 
        spin_lock(&vnode->lock);
 
-       if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) {
+again:
+       _debug("wstate %u for %p", vnode->lock_state, vnode);
+       switch (vnode->lock_state) {
+       case AFS_VNODE_LOCK_NEED_UNLOCK:
                _debug("unlock");
+               vnode->lock_state = AFS_VNODE_LOCK_UNLOCKING;
                spin_unlock(&vnode->lock);
 
                /* attempt to release the server lock; if it fails, we just
-                * wait 5 minutes and it'll time out anyway */
-               ret = afs_release_lock(vnode, vnode->unlock_key);
+                * wait 5 minutes and it'll expire anyway */
+               ret = afs_release_lock(vnode, vnode->lock_key);
                if (ret < 0)
                        printk(KERN_WARNING "AFS:"
                               " Failed to release lock on {%x:%x} error %d\n",
                               vnode->fid.vid, vnode->fid.vnode, ret);
 
                spin_lock(&vnode->lock);
-               key_put(vnode->unlock_key);
-               vnode->unlock_key = NULL;
-               clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags);
-       }
+               key_put(vnode->lock_key);
+               vnode->lock_key = NULL;
+               vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+               if (list_empty(&vnode->pending_locks)) {
+                       spin_unlock(&vnode->lock);
+                       return;
+               }
+
+               /* The new front of the queue now owns the state variables. */
+               next = list_entry(vnode->pending_locks.next,
+                                 struct file_lock, fl_u.afs.link);
+               vnode->lock_key = afs_file_key(next->fl_file);
+               vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+               vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+               goto again;
 
-       /* if we've got a lock, then it must be time to extend that lock as AFS
-        * locks time out after 5 minutes */
-       if (!list_empty(&vnode->granted_locks)) {
+       /* If we've already got a lock, then it must be time to extend that
+        * lock as AFS locks time out after 5 minutes.
+        */
+       case AFS_VNODE_LOCK_GRANTED:
                _debug("extend");
 
-               if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
-                       BUG();
-               fl = list_entry(vnode->granted_locks.next,
-                               struct file_lock, fl_u.afs.link);
-               key = key_get(afs_file_key(fl->fl_file));
+               ASSERT(!list_empty(&vnode->granted_locks));
+
+               key = key_get(vnode->lock_key);
+               vnode->lock_state = AFS_VNODE_LOCK_EXTENDING;
                spin_unlock(&vnode->lock);
 
-               ret = afs_extend_lock(vnode, key);
-               clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+               ret = afs_extend_lock(vnode, key); /* RPC */
                key_put(key);
-               switch (ret) {
-               case 0:
+
+               if (ret < 0)
+                       pr_warning("AFS: Failed to extend lock on {%x:%x} error %d\n",
+                                  vnode->fid.vid, vnode->fid.vnode, ret);
+
+               spin_lock(&vnode->lock);
+
+               if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
+                       goto again;
+               vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+
+               if (ret == 0)
                        afs_schedule_lock_extension(vnode);
-                       break;
-               default:
-                       /* ummm... we failed to extend the lock - retry
-                        * extension shortly */
-                       printk(KERN_WARNING "AFS:"
-                              " Failed to extend lock on {%x:%x} error %d\n",
-                              vnode->fid.vid, vnode->fid.vnode, ret);
+               else
                        queue_delayed_work(afs_lock_manager, &vnode->lock_work,
                                           HZ * 10);
-                       break;
-               }
-               _leave(" [extend]");
+               spin_unlock(&vnode->lock);
+               _leave(" [ext]");
                return;
-       }
 
-       /* if we don't have a granted lock, then we must've been called back by
-        * the server, and so if might be possible to get a lock we're
-        * currently waiting for */
-       if (!list_empty(&vnode->pending_locks)) {
+               /* If we don't have a granted lock, then we must've been called
+                * back by the server, and so if might be possible to get a
+                * lock we're currently waiting for.
+                */
+       case AFS_VNODE_LOCK_WAITING_FOR_CB:
                _debug("get");
 
-               if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
-                       BUG();
-               fl = list_entry(vnode->pending_locks.next,
-                               struct file_lock, fl_u.afs.link);
-               key = key_get(afs_file_key(fl->fl_file));
-               type = (fl->fl_type == F_RDLCK) ?
-                       AFS_LOCK_READ : AFS_LOCK_WRITE;
+               key = key_get(vnode->lock_key);
+               type = vnode->lock_type;
+               vnode->lock_state = AFS_VNODE_LOCK_SETTING;
                spin_unlock(&vnode->lock);
 
-               ret = afs_set_lock(vnode, key, type);
-               clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+               ret = afs_set_lock(vnode, key, type); /* RPC */
+               key_put(key);
+
+               spin_lock(&vnode->lock);
                switch (ret) {
                case -EWOULDBLOCK:
                        _debug("blocked");
                        break;
                case 0:
                        _debug("acquired");
-                       if (type == AFS_LOCK_READ)
-                               set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
-                       else
-                               set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-                       ret = AFS_LOCK_GRANTED;
+                       vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+                       /* Fall through */
                default:
-                       spin_lock(&vnode->lock);
-                       /* the pending lock may have been withdrawn due to a
-                        * signal */
-                       if (list_entry(vnode->pending_locks.next,
-                                      struct file_lock, fl_u.afs.link) == fl) {
-                               fl->fl_u.afs.state = ret;
-                               if (ret == AFS_LOCK_GRANTED)
-                                       afs_grant_locks(vnode, fl);
-                               else
-                                       list_del_init(&fl->fl_u.afs.link);
-                               wake_up(&fl->fl_wait);
-                               spin_unlock(&vnode->lock);
-                       } else {
+                       /* Pass the lock or the error onto the first locker in
+                        * the list - if they're looking for this type of lock.
+                        * If they're not, we assume that whoever asked for it
+                        * took a signal.
+                        */
+                       if (list_empty(&vnode->pending_locks)) {
                                _debug("withdrawn");
-                               clear_bit(AFS_VNODE_READLOCKED, &vnode->flags);
-                               clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-                               spin_unlock(&vnode->lock);
-                               afs_release_lock(vnode, key);
-                               if (!list_empty(&vnode->pending_locks))
-                                       afs_lock_may_be_available(vnode);
+                               vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+                               goto again;
                        }
-                       break;
+
+                       fl = list_entry(vnode->pending_locks.next,
+                                       struct file_lock, fl_u.afs.link);
+                       type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+                       if (vnode->lock_type != type) {
+                               _debug("changed");
+                               vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+                               goto again;
+                       }
+
+                       fl->fl_u.afs.state = ret;
+                       if (ret == 0)
+                               afs_grant_locks(vnode, fl);
+                       else
+                               list_del_init(&fl->fl_u.afs.link);
+                       wake_up(&fl->fl_wait);
+                       spin_unlock(&vnode->lock);
+                       _leave(" [granted]");
+                       return;
                }
-               key_put(key);
-               _leave(" [pend]");
+
+       default:
+               /* Looks like a lock request was withdrawn. */
+               spin_unlock(&vnode->lock);
+               _leave(" [no]");
                return;
        }
-
-       /* looks like the lock request was withdrawn on a signal */
-       spin_unlock(&vnode->lock);
-       _leave(" [no locks]");
 }
 
 /*
@@ -298,15 +317,105 @@ void afs_lock_work(struct work_struct *work)
  * AF_RXRPC
  * - the caller must hold the vnode lock
  */
-static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
+static void afs_defer_unlock(struct afs_vnode *vnode)
 {
-       cancel_delayed_work(&vnode->lock_work);
-       if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) &&
-           !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
-               BUG();
-       if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags))
-               BUG();
-       vnode->unlock_key = key_get(key);
+       _enter("");
+
+       if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
+           vnode->lock_state == AFS_VNODE_LOCK_EXTENDING) {
+               cancel_delayed_work(&vnode->lock_work);
+
+               vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+               afs_lock_may_be_available(vnode);
+       }
+}
+
+/*
+ * Check that our view of the file metadata is up to date and check to see
+ * whether we think that we have a locking permit.
+ */
+static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
+                             afs_lock_type_t type, bool can_sleep)
+{
+       afs_access_t access;
+       int ret;
+
+       /* Make sure we've got a callback on this file and that our view of the
+        * data version is up to date.
+        */
+       ret = afs_validate(vnode, key);
+       if (ret < 0)
+               return ret;
+
+       /* Check the permission set to see if we're actually going to be
+        * allowed to get a lock on this file.
+        */
+       ret = afs_check_permit(vnode, key, &access);
+       if (ret < 0)
+               return ret;
+
+       /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
+        * read-lock a file and WRITE or INSERT perm to write-lock a file.
+        *
+        * We can't rely on the server to do this for us since if we want to
+        * share a read lock that we already have, we won't go the server.
+        */
+       if (type == AFS_LOCK_READ) {
+               if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
+                       return -EACCES;
+               if (vnode->status.lock_count == -1 && !can_sleep)
+                       return -EAGAIN; /* Write locked */
+       } else {
+               if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
+                       return -EACCES;
+               if (vnode->status.lock_count != 0 && !can_sleep)
+                       return -EAGAIN; /* Locked */
+       }
+
+       return 0;
+}
+
+/*
+ * Remove the front runner from the pending queue.
+ * - The caller must hold vnode->lock.
+ */
+static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
+{
+       struct file_lock *next;
+
+       _enter("");
+
+       /* ->lock_type, ->lock_key and ->lock_state only belong to this
+        * file_lock if we're at the front of the pending queue or if we have
+        * the lock granted or if the lock_state is NEED_UNLOCK or UNLOCKING.
+        */
+       if (vnode->granted_locks.next == &fl->fl_u.afs.link &&
+           vnode->granted_locks.prev == &fl->fl_u.afs.link) {
+               list_del_init(&fl->fl_u.afs.link);
+               afs_defer_unlock(vnode);
+               return;
+       }
+
+       if (!list_empty(&vnode->granted_locks) ||
+           vnode->pending_locks.next != &fl->fl_u.afs.link) {
+               list_del_init(&fl->fl_u.afs.link);
+               return;
+       }
+
+       list_del_init(&fl->fl_u.afs.link);
+       key_put(vnode->lock_key);
+       vnode->lock_key = NULL;
+       vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+       if (list_empty(&vnode->pending_locks))
+               return;
+
+       /* The new front of the queue now owns the state variables. */
+       next = list_entry(vnode->pending_locks.next,
+                         struct file_lock, fl_u.afs.link);
+       vnode->lock_key = afs_file_key(next->fl_file);
+       vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+       vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
        afs_lock_may_be_available(vnode);
 }
 
@@ -315,7 +424,7 @@ static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
  */
 static int afs_do_setlk(struct file *file, struct file_lock *fl)
 {
-       struct inode *inode = file_inode(file);
+       struct inode *inode = locks_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
        afs_lock_type_t type;
        struct key *key = afs_file_key(file);
@@ -333,165 +442,136 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
 
        type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
 
-       spin_lock(&inode->i_lock);
-
-       /* make sure we've got a callback on this file and that our view of the
-        * data version is up to date */
-       ret = afs_validate(vnode, key);
+       ret = afs_do_setlk_check(vnode, key, type, fl->fl_flags & FL_SLEEP);
        if (ret < 0)
-               goto error;
-
-       if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) {
-               ret = -EAGAIN;
-               goto error;
-       }
+               return ret;
 
        spin_lock(&vnode->lock);
 
-       /* if we've already got a readlock on the server then we can instantly
+       /* If we've already got a readlock on the server then we instantly
         * grant another readlock, irrespective of whether there are any
-        * pending writelocks */
+        * pending writelocks.
+        */
        if (type == AFS_LOCK_READ &&
-           vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
+           vnode->lock_state == AFS_VNODE_LOCK_GRANTED &&
+           vnode->lock_type == AFS_LOCK_READ) {
                _debug("instant readlock");
-               ASSERTCMP(vnode->flags &
-                         ((1 << AFS_VNODE_LOCKING) |
-                          (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
                ASSERT(!list_empty(&vnode->granted_locks));
-               goto sharing_existing_lock;
+               goto share_existing_lock;
        }
 
-       /* if there's no-one else with a lock on this vnode, then we need to
-        * ask the server for a lock */
-       if (list_empty(&vnode->pending_locks) &&
-           list_empty(&vnode->granted_locks)) {
-               _debug("not locked");
-               ASSERTCMP(vnode->flags &
-                         ((1 << AFS_VNODE_LOCKING) |
-                          (1 << AFS_VNODE_READLOCKED) |
-                          (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
-               list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
-               set_bit(AFS_VNODE_LOCKING, &vnode->flags);
-               spin_unlock(&vnode->lock);
+       list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
 
-               ret = afs_set_lock(vnode, key, type);
-               clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
-               switch (ret) {
-               case 0:
-                       _debug("acquired");
-                       goto acquired_server_lock;
-               case -EWOULDBLOCK:
-                       _debug("would block");
-                       spin_lock(&vnode->lock);
-                       ASSERT(list_empty(&vnode->granted_locks));
-                       ASSERTCMP(vnode->pending_locks.next, ==,
-                                 &fl->fl_u.afs.link);
-                       goto wait;
-               default:
-                       spin_lock(&vnode->lock);
-                       list_del_init(&fl->fl_u.afs.link);
-                       spin_unlock(&vnode->lock);
-                       goto error;
-               }
-       }
+       if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+               goto need_to_wait;
 
-       /* otherwise, we need to wait for a local lock to become available */
-       _debug("wait local");
-       list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
-wait:
-       if (!(fl->fl_flags & FL_SLEEP)) {
-               _debug("noblock");
-               ret = -EAGAIN;
-               goto abort_attempt;
-       }
+       /* We don't have a lock on this vnode and we aren't currently waiting
+        * for one either, so ask the server for a lock.
+        *
+        * Note that we need to be careful if we get interrupted by a signal
+        * after dispatching the request as we may still get the lock, even
+        * though we don't wait for the reply (it's not too bad a problem - the
+        * lock will expire in 10 mins anyway).
+        */
+       _debug("not locked");
+       vnode->lock_key = key_get(key);
+       vnode->lock_type = type;
+       vnode->lock_state = AFS_VNODE_LOCK_SETTING;
        spin_unlock(&vnode->lock);
 
-       /* now we need to sleep and wait for the lock manager thread to get the
-        * lock from the server */
-       _debug("sleep");
-       ret = wait_event_interruptible(fl->fl_wait,
-                                      fl->fl_u.afs.state <= AFS_LOCK_GRANTED);
-       if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
-               ret = fl->fl_u.afs.state;
-               if (ret < 0)
-                       goto error;
-               spin_lock(&vnode->lock);
-               goto given_lock;
-       }
-
-       /* we were interrupted, but someone may still be in the throes of
-        * giving us the lock */
-       _debug("intr");
-       ASSERTCMP(ret, ==, -ERESTARTSYS);
+       ret = afs_set_lock(vnode, key, type); /* RPC */
 
        spin_lock(&vnode->lock);
-       if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
-               ret = fl->fl_u.afs.state;
-               if (ret < 0) {
-                       spin_unlock(&vnode->lock);
-                       goto error;
-               }
-               goto given_lock;
-       }
+       switch (ret) {
+       default:
+               goto abort_attempt;
 
-abort_attempt:
-       /* we aren't going to get the lock, either because we're unwilling to
-        * wait, or because some signal happened */
-       _debug("abort");
-       if (list_empty(&vnode->granted_locks) &&
-           vnode->pending_locks.next == &fl->fl_u.afs.link) {
-               if (vnode->pending_locks.prev != &fl->fl_u.afs.link) {
-                       /* kick the next pending lock into having a go */
-                       list_del_init(&fl->fl_u.afs.link);
-                       afs_lock_may_be_available(vnode);
-               }
-       } else {
-               list_del_init(&fl->fl_u.afs.link);
+       case -EWOULDBLOCK:
+               /* The server doesn't have a lock-waiting queue, so the client
+                * will have to retry.  The server will break the outstanding
+                * callbacks on a file when a lock is released.
+                */
+               _debug("would block");
+               ASSERT(list_empty(&vnode->granted_locks));
+               ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
+               vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+               goto need_to_wait;
+
+       case 0:
+               _debug("acquired");
+               break;
        }
-       spin_unlock(&vnode->lock);
-       goto error;
 
-acquired_server_lock:
        /* we've acquired a server lock, but it needs to be renewed after 5
         * mins */
-       spin_lock(&vnode->lock);
+       vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
        afs_schedule_lock_extension(vnode);
-       if (type == AFS_LOCK_READ)
-               set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
-       else
-               set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-sharing_existing_lock:
+
+share_existing_lock:
        /* the lock has been granted as far as we're concerned... */
        fl->fl_u.afs.state = AFS_LOCK_GRANTED;
        list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+
 given_lock:
        /* ... but we do still need to get the VFS's blessing */
-       ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING)));
-       ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) |
-                               (1 << AFS_VNODE_WRITELOCKED))) != 0);
+       spin_unlock(&vnode->lock);
+
        ret = posix_lock_file(file, fl, NULL);
        if (ret < 0)
                goto vfs_rejected_lock;
-       spin_unlock(&vnode->lock);
 
-       /* again, make sure we've got a callback on this file and, again, make
+       /* Again, make sure we've got a callback on this file and, again, make
         * sure that our view of the data version is up to date (we ignore
-        * errors incurred here and deal with the consequences elsewhere) */
+        * errors incurred here and deal with the consequences elsewhere).
+        */
        afs_validate(vnode, key);
+       _leave(" = 0");
+       return 0;
 
-error:
-       spin_unlock(&inode->i_lock);
+need_to_wait:
+       /* We're going to have to wait.  Either this client doesn't have a lock
+        * on the server yet and we need to wait for a callback to occur, or
+        * the client does have a lock on the server, but it belongs to some
+        * other process(es) and is incompatible with the lock we want.
+        */
+       ret = -EAGAIN;
+       if (fl->fl_flags & FL_SLEEP) {
+               spin_unlock(&vnode->lock);
+
+               _debug("sleep");
+               ret = wait_event_interruptible(fl->fl_wait,
+                                              fl->fl_u.afs.state != AFS_LOCK_PENDING);
+
+               spin_lock(&vnode->lock);
+       }
+
+       if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
+               goto given_lock;
+       if (fl->fl_u.afs.state < 0)
+               ret = fl->fl_u.afs.state;
+
+abort_attempt:
+       /* we aren't going to get the lock, either because we're unwilling to
+        * wait, or because some signal happened */
+       _debug("abort");
+       afs_dequeue_lock(vnode, fl);
+
+error_unlock:
+       spin_unlock(&vnode->lock);
        _leave(" = %d", ret);
        return ret;
 
 vfs_rejected_lock:
-       /* the VFS rejected the lock we just obtained, so we have to discard
-        * what we just got */
+       /* The VFS rejected the lock we just obtained, so we have to discard
+        * what we just got.  We defer this to the lock manager work item to
+        * deal with.
+        */
        _debug("vfs refused %d", ret);
+       spin_lock(&vnode->lock);
        list_del_init(&fl->fl_u.afs.link);
        if (list_empty(&vnode->granted_locks))
-               afs_defer_unlock(vnode, key);
-       goto abort_attempt;
+               afs_defer_unlock(vnode);
+       goto error_unlock;
 }
 
 /*
@@ -499,34 +579,21 @@ vfs_rejected_lock:
  */
 static int afs_do_unlk(struct file *file, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
-       struct key *key = afs_file_key(file);
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
        int ret;
 
        _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
 
+       /* Flush all pending writes before doing anything with locks. */
+       vfs_fsync(file, 0);
+
        /* only whole-file unlocks are supported */
        if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
                return -EINVAL;
 
-       fl->fl_ops = &afs_lock_ops;
-       INIT_LIST_HEAD(&fl->fl_u.afs.link);
-       fl->fl_u.afs.state = AFS_LOCK_PENDING;
-
-       spin_lock(&vnode->lock);
        ret = posix_lock_file(file, fl, NULL);
-       if (ret < 0) {
-               spin_unlock(&vnode->lock);
-               _leave(" = %d [vfs]", ret);
-               return ret;
-       }
-
-       /* discard the server lock only if all granted locks are gone */
-       if (list_empty(&vnode->granted_locks))
-               afs_defer_unlock(vnode, key);
-       spin_unlock(&vnode->lock);
-       _leave(" = 0");
-       return 0;
+       _leave(" = %d [%u]", ret, vnode->lock_state);
+       return ret;
 }
 
 /*
@@ -534,7 +601,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
  */
 static int afs_do_getlk(struct file *file, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
        struct key *key = afs_file_key(file);
        int ret, lock_count;
 
@@ -542,29 +609,25 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
 
        fl->fl_type = F_UNLCK;
 
-       inode_lock(&vnode->vfs_inode);
-
        /* check local lock records first */
-       ret = 0;
        posix_test_lock(file, fl);
        if (fl->fl_type == F_UNLCK) {
                /* no local locks; consult the server */
                ret = afs_fetch_status(vnode, key);
                if (ret < 0)
                        goto error;
-               lock_count = vnode->status.lock_count;
-               if (lock_count) {
-                       if (lock_count > 0)
-                               fl->fl_type = F_RDLCK;
-                       else
-                               fl->fl_type = F_WRLCK;
-                       fl->fl_start = 0;
-                       fl->fl_end = OFFSET_MAX;
-               }
+
+               lock_count = READ_ONCE(vnode->status.lock_count);
+               if (lock_count > 0)
+                       fl->fl_type = F_RDLCK;
+               else
+                       fl->fl_type = F_WRLCK;
+               fl->fl_start = 0;
+               fl->fl_end = OFFSET_MAX;
        }
 
+       ret = 0;
 error:
-       inode_unlock(&vnode->vfs_inode);
        _leave(" = %d [%hd]", ret, fl->fl_type);
        return ret;
 }
@@ -574,7 +637,7 @@ error:
  */
 int afs_lock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
 
        _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -597,7 +660,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
  */
 int afs_flock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
 
        _enter("{%x:%u},%d,{t=%x,fl=%x}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -627,9 +690,13 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
  */
 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
 {
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
        _enter("");
 
+       spin_lock(&vnode->lock);
        list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
+       spin_unlock(&vnode->lock);
 }
 
 /*
@@ -638,7 +705,12 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
  */
 static void afs_fl_release_private(struct file_lock *fl)
 {
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
        _enter("");
 
-       list_del_init(&fl->fl_u.afs.link);
+       spin_lock(&vnode->lock);
+       afs_dequeue_lock(vnode, fl);
+       _debug("state %u for %p", vnode->lock_state, vnode);
+       spin_unlock(&vnode->lock);
 }
index bd8dcee7e066719439d5decb0d970e6142d167eb..804d1f905622075ab27feecf7f2c959be9c1ac01 100644 (file)
@@ -430,8 +430,21 @@ struct afs_volume {
        u8                      name[AFS_MAXVOLNAME + 1]; /* NUL-padded volume name */
 };
 
+enum afs_lock_state {
+       AFS_VNODE_LOCK_NONE,            /* The vnode has no lock on the server */
+       AFS_VNODE_LOCK_WAITING_FOR_CB,  /* We're waiting for the server to break the callback */
+       AFS_VNODE_LOCK_SETTING,         /* We're asking the server for a lock */
+       AFS_VNODE_LOCK_GRANTED,         /* We have a lock on the server */
+       AFS_VNODE_LOCK_EXTENDING,       /* We're extending a lock on the server */
+       AFS_VNODE_LOCK_NEED_UNLOCK,     /* We need to unlock on the server */
+       AFS_VNODE_LOCK_UNLOCKING,       /* We're telling the server to unlock */
+};
+
 /*
- * AFS inode private data
+ * AFS inode private data.
+ *
+ * Note that afs_alloc_inode() *must* reset anything that could incorrectly
+ * leak from one inode to another.
  */
 struct afs_vnode {
        struct inode            vfs_inode;      /* the VFS's inode record */
@@ -454,18 +467,16 @@ struct afs_vnode {
 #define AFS_VNODE_ZAP_DATA     3               /* set if vnode's data should be invalidated */
 #define AFS_VNODE_DELETED      4               /* set if vnode deleted on server */
 #define AFS_VNODE_MOUNTPOINT   5               /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_LOCKING      6               /* set if waiting for lock on vnode */
-#define AFS_VNODE_READLOCKED   7               /* set if vnode is read-locked on the server */
-#define AFS_VNODE_WRITELOCKED  8               /* set if vnode is write-locked on the server */
-#define AFS_VNODE_UNLOCKING    9               /* set if vnode is being unlocked on the server */
-#define AFS_VNODE_AUTOCELL     10              /* set if Vnode is an auto mount point */
-#define AFS_VNODE_PSEUDODIR    11              /* set if Vnode is a pseudo directory */
+#define AFS_VNODE_AUTOCELL     6               /* set if Vnode is an auto mount point */
+#define AFS_VNODE_PSEUDODIR    7               /* set if Vnode is a pseudo directory */
 
        struct list_head        wb_keys;        /* List of keys available for writeback */
        struct list_head        pending_locks;  /* locks waiting to be granted */
        struct list_head        granted_locks;  /* locks granted on this file */
        struct delayed_work     lock_work;      /* work to be done in locking */
-       struct key              *unlock_key;    /* key to be used in unlocking */
+       struct key              *lock_key;      /* Key to be used in lock ops */
+       enum afs_lock_state     lock_state : 8;
+       afs_lock_type_t         lock_type : 8;
 
        /* outstanding callback notification on this file */
        struct afs_cb_interest  *cb_interest;   /* Server on which this resides */
@@ -843,6 +854,7 @@ extern void afs_clear_permits(struct afs_vnode *);
 extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int);
 extern void afs_zap_permits(struct rcu_head *);
 extern struct key *afs_request_key(struct afs_cell *);
+extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
 extern int afs_permission(struct inode *, int);
 extern void __exit afs_clean_up_permit_cache(void);
 
index e728ca1776c9b36640dd8ad69178e5c0d07a0a29..d04511fb3879748cf7e6e98c5c53a807f90007ba 100644 (file)
@@ -46,8 +46,7 @@ bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode
                return false;
        }
 
-       if (test_bit(AFS_VNODE_READLOCKED, &vnode->flags) ||
-           test_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
+       if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
                fc->flags |= AFS_FS_CURSOR_CUR_ONLY;
        return true;
 }
@@ -117,7 +116,7 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
        case VSALVAGING:        m = "being salvaged";   break;
        default:                m = "busy";             break;
        }
-       
+
        pr_notice("kAFS: Volume %u '%s' is %s\n", volume->vid, volume->name, m);
 }
 
@@ -438,24 +437,67 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
 
        _enter("");
 
-       if (!cbi) {
-               fc->ac.error = -ESTALE;
+       switch (fc->ac.error) {
+       case SHRT_MAX:
+               if (!cbi) {
+                       fc->ac.error = -ESTALE;
+                       fc->flags |= AFS_FS_CURSOR_STOP;
+                       return false;
+               }
+
+               fc->cbi = afs_get_cb_interest(vnode->cb_interest);
+
+               read_lock(&cbi->server->fs_lock);
+               alist = rcu_dereference_protected(cbi->server->addresses,
+                                                 lockdep_is_held(&cbi->server->fs_lock));
+               afs_get_addrlist(alist);
+               read_unlock(&cbi->server->fs_lock);
+               if (!alist) {
+                       fc->ac.error = -ESTALE;
+                       fc->flags |= AFS_FS_CURSOR_STOP;
+                       return false;
+               }
+
+               fc->ac.alist = alist;
+               fc->ac.addr  = NULL;
+               fc->ac.start = READ_ONCE(alist->index);
+               fc->ac.index = fc->ac.start;
+               fc->ac.error = 0;
+               fc->ac.begun = false;
+               goto iterate_address;
+
+       case 0:
+       default:
+               /* Success or local failure.  Stop. */
                fc->flags |= AFS_FS_CURSOR_STOP;
+               _leave(" = f [okay/local %d]", fc->ac.error);
                return false;
-       }
 
-       read_lock(&cbi->server->fs_lock);
-       alist = afs_get_addrlist(cbi->server->addresses);
-       read_unlock(&cbi->server->fs_lock);
-       if (!alist) {
-               fc->ac.error = -ESTALE;
+       case -ECONNABORTED:
                fc->flags |= AFS_FS_CURSOR_STOP;
+               _leave(" = f [abort]");
                return false;
+
+       case -ENETUNREACH:
+       case -EHOSTUNREACH:
+       case -ECONNREFUSED:
+       case -ETIMEDOUT:
+       case -ETIME:
+               _debug("no conn");
+               goto iterate_address;
        }
 
-       fc->ac.alist = alist;
-       fc->ac.error = 0;
-       return true;
+iterate_address:
+       /* Iterate over the current server's address list to try and find an
+        * address on which it will respond to us.
+        */
+       if (afs_iterate_addresses(&fc->ac)) {
+               _leave(" = t");
+               return true;
+       }
+
+       afs_end_cursor(&fc->ac);
+       return false;
 }
 
 /*
index 46a881a4d08f6a48a2cb463ea332a14709dbeb47..b88b7d45fdaa029dc1239be8b295fe60dfba1f1c 100644 (file)
@@ -120,7 +120,7 @@ static void afs_hash_permits(struct afs_permits *permits)
 void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
                      unsigned int cb_break)
 {
-       struct afs_permits *permits, *xpermits, *replacement, *new = NULL;
+       struct afs_permits *permits, *xpermits, *replacement, *zap, *new = NULL;
        afs_access_t caller_access = READ_ONCE(vnode->status.caller_access);
        size_t size = 0;
        bool changed = false;
@@ -204,7 +204,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
        new = kzalloc(sizeof(struct afs_permits) +
                      sizeof(struct afs_permit) * size, GFP_NOFS);
        if (!new)
-               return;
+               goto out_put;
 
        refcount_set(&new->usage, 1);
        new->nr_permits = size;
@@ -229,8 +229,6 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
 
        afs_hash_permits(new);
 
-       afs_put_permits(permits);
-
        /* Now see if the permit list we want is actually already available */
        spin_lock(&afs_permits_lock);
 
@@ -262,11 +260,15 @@ found:
        kfree(new);
 
        spin_lock(&vnode->lock);
-       if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break) ||
-           permits != rcu_access_pointer(vnode->permit_cache))
-               goto someone_else_changed_it_unlock;
-       rcu_assign_pointer(vnode->permit_cache, replacement);
+       zap = rcu_access_pointer(vnode->permit_cache);
+       if (cb_break == (vnode->cb_break + vnode->cb_interest->server->cb_s_break) &&
+           zap == permits)
+               rcu_assign_pointer(vnode->permit_cache, replacement);
+       else
+               zap = replacement;
        spin_unlock(&vnode->lock);
+       afs_put_permits(zap);
+out_put:
        afs_put_permits(permits);
        return;
 
@@ -284,8 +286,8 @@ someone_else_changed_it:
  * permitted to be accessed with this authorisation, and if so, what access it
  * is granted
  */
-static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
-                           afs_access_t *_access)
+int afs_check_permit(struct afs_vnode *vnode, struct key *key,
+                    afs_access_t *_access)
 {
        struct afs_permits *permits;
        bool valid = false;
index 26bad7032bbaec8878941e3f9c492b44e7c58e24..0ab3f84578390e1edebb13c7748ef0c9dc9369c5 100644 (file)
@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
 {
        int i;
 
-       if (refcount_dec_and_test(&slist->usage)) {
+       if (slist && refcount_dec_and_test(&slist->usage)) {
                for (i = 0; i < slist->nr_servers; i++) {
                        afs_put_cb_interest(net, slist->servers[i].cb_interest);
                        afs_put_server(net, slist->servers[i].server);
index 875b5eb02242a0ae29b008b149770f4f6e1d3985..1037dd41a62210a3568c5a5144ffdc97273c20c3 100644 (file)
@@ -496,10 +496,10 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
                if (ret < 0)
                        goto error_sb;
                as = NULL;
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
        } else {
                _debug("reuse");
-               ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+               ASSERTCMP(sb->s_flags, &, SB_ACTIVE);
                afs_destroy_sbi(as);
                as = NULL;
        }
@@ -536,7 +536,9 @@ static void afs_kill_super(struct super_block *sb)
 }
 
 /*
- * initialise an inode cache slab element prior to any use
+ * Initialise an inode cache slab element prior to any use.  Note that
+ * afs_alloc_inode() *must* reset anything that could incorrectly leak from one
+ * inode to another.
  */
 static void afs_i_init_once(void *_vnode)
 {
@@ -568,11 +570,21 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
 
        atomic_inc(&afs_count_active_inodes);
 
+       /* Reset anything that shouldn't leak from one inode to the next. */
        memset(&vnode->fid, 0, sizeof(vnode->fid));
        memset(&vnode->status, 0, sizeof(vnode->status));
 
        vnode->volume           = NULL;
+       vnode->lock_key         = NULL;
+       vnode->permit_cache     = NULL;
+       vnode->cb_interest      = NULL;
+#ifdef CONFIG_AFS_FSCACHE
+       vnode->cache            = NULL;
+#endif
+
        vnode->flags            = 1 << AFS_VNODE_UNSET;
+       vnode->cb_type          = 0;
+       vnode->lock_state       = AFS_VNODE_LOCK_NONE;
 
        _leave(" = %p", &vnode->vfs_inode);
        return &vnode->vfs_inode;
index 18e46e31523ccc0295a35c058baa88d0ebf960ed..cb5f8a3df5773cba37c292e65ac6e985b0af11bc 100644 (file)
@@ -119,6 +119,11 @@ try_again:
        }
 
        if (f != t) {
+               if (PageWriteback(page)) {
+                       trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
+                                            page->index, priv);
+                       goto flush_conflicting_write;
+               }
                if (to < f || from > t)
                        goto flush_conflicting_write;
                if (from < f)
index d79ced9258614010128dd8f1ed6a0cff2b525f21..82e8f6edfb48d0e8670dd58e3fbdcfb4b5ceb85d 100644 (file)
@@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
                pr_debug("waiting for mount name=%pd\n", path->dentry);
                status = autofs4_wait(sbi, path, NFY_MOUNT);
                pr_debug("mount wait done status=%d\n", status);
-               ino->last_used = jiffies;
        }
+       ino->last_used = jiffies;
        return status;
 }
 
@@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
         */
        if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
                struct dentry *parent = dentry->d_parent;
+               struct autofs_info *ino;
                struct dentry *new;
 
                new = d_lookup(parent, &dentry->d_name);
                if (!new)
                        return NULL;
-               if (new == dentry)
-                       dput(new);
-               else {
-                       struct autofs_info *ino;
-
-                       ino = autofs4_dentry_ino(new);
-                       ino->last_used = jiffies;
-                       dput(path->dentry);
-                       path->dentry = new;
-               }
+               ino = autofs4_dentry_ino(new);
+               ino->last_used = jiffies;
+               dput(path->dentry);
+               path->dentry = new;
        }
        return path->dentry;
 }
index 8fc41705c7cd50af4c53f71851d8a4136673411b..961a12dc6dc81f369a71c36e62fbd604886ebf2c 100644 (file)
@@ -170,7 +170,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 
        mutex_unlock(&sbi->wq_mutex);
 
-       if (autofs4_write(sbi, pipe, &pkt, pktsz))
        switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
        case 0:
                break;
index 75a461cfaca620656b0d03c0e01b2eb4a8792ad6..16f2dfe8c2f742e4264bf60bc96562d5a6e8292d 100644 (file)
@@ -365,7 +365,7 @@ Version 0.4 (2001-10-28)
        (fs/befs/super.c)
 
 * Tell the kernel to only mount befs read-only. 
-       By setting the MS_RDONLY flag in befs_read_super().
+       By setting the SB_RDONLY flag in befs_read_super().
        Not that it was possible to write before. But now the kernel won't even try.
        (fs/befs/super.c)
 
index a92355cc453bf6cb09016e7f30fdba677c44bb48..ee236231cafac001ff6db87f5a94f515540e2adc 100644 (file)
@@ -841,7 +841,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        if (!sb_rdonly(sb)) {
                befs_warning(sb,
                             "No write support. Marking filesystem read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /*
@@ -948,7 +948,7 @@ static int
 befs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                return -EINVAL;
        return 0;
 }
index b35ce16b3df3c6550a69289077eb922bcd5d76cb..5982c8a71f02fde26b84fc54067dd075a993f8e6 100644 (file)
@@ -295,7 +295,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                 unsigned long len, u64 disk_start,
                                 unsigned long compressed_len,
                                 struct page **compressed_pages,
-                                unsigned long nr_pages)
+                                unsigned long nr_pages,
+                                unsigned int write_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct bio *bio = NULL;
@@ -327,7 +328,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
        bdev = fs_info->fs_devices->latest_bdev;
 
        bio = btrfs_bio_alloc(bdev, first_byte);
-       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+       bio->bi_opf = REQ_OP_WRITE | write_flags;
        bio->bi_private = cb;
        bio->bi_end_io = end_compressed_bio_write;
        refcount_set(&cb->pending_bios, 1);
@@ -374,7 +375,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                        bio_put(bio);
 
                        bio = btrfs_bio_alloc(bdev, first_byte);
-                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+                       bio->bi_opf = REQ_OP_WRITE | write_flags;
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
                        bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -1528,5 +1529,5 @@ unsigned int btrfs_compress_str2level(const char *str)
        if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
                return str[5] - '0';
 
-       return 0;
+       return BTRFS_ZLIB_DEFAULT_LEVEL;
 }
index da20755ebf2183f1800dce91feab4d10b84793d8..0868cc554f145a7e20b9a0bf281e0990af6ee9e2 100644 (file)
@@ -34,6 +34,8 @@
 /* Maximum size of data before compression */
 #define BTRFS_MAX_UNCOMPRESSED         (SZ_128K)
 
+#define        BTRFS_ZLIB_DEFAULT_LEVEL                3
+
 struct compressed_bio {
        /* number of bios pending for this compressed extent */
        refcount_t pending_bios;
@@ -91,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long len, u64 disk_start,
                                  unsigned long compressed_len,
                                  struct page **compressed_pages,
-                                 unsigned long nr_pages);
+                                 unsigned long nr_pages,
+                                 unsigned int write_flags);
 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
 
index 531e0a8645b08a1554b495c0852cc7e53e528770..1e74cf82653271e7d5c7b6cbf930aea09751bc49 100644 (file)
@@ -1032,14 +1032,17 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
                    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
                        ret = btrfs_inc_ref(trans, root, buf, 1);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
 
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID) {
                                ret = btrfs_dec_ref(trans, root, buf, 0);
-                               BUG_ON(ret); /* -ENOMEM */
+                               if (ret)
+                                       return ret;
                                ret = btrfs_inc_ref(trans, root, cow, 1);
-                               BUG_ON(ret); /* -ENOMEM */
+                               if (ret)
+                                       return ret;
                        }
                        new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
                } else {
@@ -1049,7 +1052,8 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                                ret = btrfs_inc_ref(trans, root, cow, 1);
                        else
                                ret = btrfs_inc_ref(trans, root, cow, 0);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
                }
                if (new_flags != 0) {
                        int level = btrfs_header_level(buf);
@@ -1068,9 +1072,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                                ret = btrfs_inc_ref(trans, root, cow, 1);
                        else
                                ret = btrfs_inc_ref(trans, root, cow, 0);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
                        ret = btrfs_dec_ref(trans, root, buf, 1);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret)
+                               return ret;
                }
                clean_tree_block(fs_info, buf);
                *last_ref = 1;
index f7df5536ab61e1f6de0512328341c5a637d040d3..13c260b525a1282aded755e5bb0141458ec52452 100644 (file)
@@ -2957,7 +2957,7 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
  */
 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
 {
-       return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
+       return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
 }
 
 static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@@ -3180,6 +3180,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
                               int nr);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             unsigned int extra_bits,
                              struct extent_state **cached_state, int dedupe);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
                             struct btrfs_root *new_root,
index efce9a2fa9be09e47f29095b58ff8d2cd355eb12..a8ecccfc36ded1d9470deb0d78f77bcdacb6d0cf 100644 (file)
@@ -610,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
         * that we don't try and read the other copies of this block, just
         * return -EIO.
         */
-       if (found_level == 0 && btrfs_check_leaf(root, eb)) {
+       if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
                set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
                ret = -EIO;
        }
@@ -3231,6 +3231,7 @@ static int write_dev_supers(struct btrfs_device *device,
        int errors = 0;
        u32 crc;
        u64 bytenr;
+       int op_flags;
 
        if (max_mirrors == 0)
                max_mirrors = BTRFS_SUPER_MIRROR_MAX;
@@ -3273,13 +3274,10 @@ static int write_dev_supers(struct btrfs_device *device,
                 * we fua the first super.  The others we allow
                 * to go down lazy.
                 */
-               if (i == 0) {
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE,
-                               REQ_SYNC | REQ_FUA | REQ_META | REQ_PRIO, bh);
-               } else {
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE,
-                               REQ_SYNC | REQ_META | REQ_PRIO, bh);
-               }
+               op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
+               if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
+                       op_flags |= REQ_FUA;
+               ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
                if (ret)
                        errors++;
        }
@@ -3848,7 +3846,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
                                         buf->len,
                                         fs_info->dirty_metadata_batch);
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-       if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(root, buf)) {
+       /*
+        * Since btrfs_mark_buffer_dirty() can be called with item pointer set
+        * but item data not updated.
+        * So here we should only check item pointers, not item data.
+        */
+       if (btrfs_header_level(buf) == 0 &&
+           btrfs_check_leaf_relaxed(root, buf)) {
                btrfs_print_leaf(buf);
                ASSERT(0);
        }
index 7208ecef70889833ac2caa7d3d5d8b4b634a4ee0..2f4328511ac84e11b8148952b09b25f3b4bb4e9d 100644 (file)
@@ -3502,13 +3502,6 @@ again:
                goto again;
        }
 
-       /* We've already setup this transaction, go ahead and exit */
-       if (block_group->cache_generation == trans->transid &&
-           i_size_read(inode)) {
-               dcs = BTRFS_DC_SETUP;
-               goto out_put;
-       }
-
        /*
         * We want to set the generation to 0, that way if anything goes wrong
         * from here on out we know not to trust this cache when we load up next
@@ -3532,6 +3525,13 @@ again:
        }
        WARN_ON(ret);
 
+       /* We've already setup this transaction, go ahead and exit */
+       if (block_group->cache_generation == trans->transid &&
+           i_size_read(inode)) {
+               dcs = BTRFS_DC_SETUP;
+               goto out_put;
+       }
+
        if (i_size_read(inode) > 0) {
                ret = btrfs_check_trunc_cache_free_space(fs_info,
                                        &fs_info->global_block_rsv);
@@ -9206,6 +9206,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
        ret = btrfs_del_root(trans, fs_info, &root->root_key);
        if (ret) {
                btrfs_abort_transaction(trans, ret);
+               err = ret;
                goto out_end_trans;
        }
 
index 16045ea86fc13ef6858289c4db8cfb8ed5469d7c..012d63870b99acfc180ef0cd05fb337e39730959 100644 (file)
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
        struct btrfs_bio *bbio = NULL;
        int ret;
 
-       ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
+       ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
        BUG_ON(!mirror_num);
 
        bio = btrfs_io_bio_alloc(1);
@@ -3253,7 +3253,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                                               delalloc_start,
                                               delalloc_end,
                                               &page_started,
-                                              nr_written);
+                                              nr_written, wbc);
                /* File system has been set read-only */
                if (ret) {
                        SetPageError(page);
index 4a8861379d3ef23ef49dfc1b418bbd22243402de..93dcae0c3183009c3668dfa34b449051c7576719 100644 (file)
@@ -116,7 +116,8 @@ struct extent_io_ops {
         */
        int (*fill_delalloc)(void *private_data, struct page *locked_page,
                             u64 start, u64 end, int *page_started,
-                            unsigned long *nr_written);
+                            unsigned long *nr_written,
+                            struct writeback_control *wbc);
 
        int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
        void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -365,10 +366,11 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                       struct extent_state **cached_state);
 
 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
-               u64 end, struct extent_state **cached_state)
+                                     u64 end, unsigned int extra_bits,
+                                     struct extent_state **cached_state)
 {
        return set_extent_bit(tree, start, end,
-                             EXTENT_DELALLOC | EXTENT_UPTODATE,
+                             EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
                              NULL, cached_state, GFP_NOFS);
 }
 
index f80254d82f409bedc91bbef14364726beeea174c..eb1bac7c8553c7a4172735027765bf16619e9d00 100644 (file)
@@ -477,6 +477,47 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
        }
 }
 
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+                                        const u64 start,
+                                        const u64 len,
+                                        struct extent_state **cached_state)
+{
+       u64 search_start = start;
+       const u64 end = start + len - 1;
+
+       while (search_start < end) {
+               const u64 search_len = end - search_start + 1;
+               struct extent_map *em;
+               u64 em_len;
+               int ret = 0;
+
+               em = btrfs_get_extent(inode, NULL, 0, search_start,
+                                     search_len, 0);
+               if (IS_ERR(em))
+                       return PTR_ERR(em);
+
+               if (em->block_start != EXTENT_MAP_HOLE)
+                       goto next;
+
+               em_len = em->len;
+               if (em->start < search_start)
+                       em_len -= search_start - em->start;
+               if (em_len > search_len)
+                       em_len = search_len;
+
+               ret = set_extent_bit(&inode->io_tree, search_start,
+                                    search_start + em_len - 1,
+                                    EXTENT_DELALLOC_NEW,
+                                    NULL, cached_state, GFP_NOFS);
+next:
+               search_start = extent_map_end(em);
+               free_extent_map(em);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
 /*
  * after copy_from_user, pages need to be dirtied and we need to make
  * sure holes are created between the current EOF and the start of
@@ -497,14 +538,34 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
        u64 end_of_last_block;
        u64 end_pos = pos + write_bytes;
        loff_t isize = i_size_read(inode);
+       unsigned int extra_bits = 0;
 
        start_pos = pos & ~((u64) fs_info->sectorsize - 1);
        num_bytes = round_up(write_bytes + pos - start_pos,
                             fs_info->sectorsize);
 
        end_of_last_block = start_pos + num_bytes - 1;
+
+       if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
+               if (start_pos >= isize &&
+                   !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
+                       /*
+                        * There can't be any extents following eof in this case
+                        * so just set the delalloc new bit for the range
+                        * directly.
+                        */
+                       extra_bits |= EXTENT_DELALLOC_NEW;
+               } else {
+                       err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
+                                                           start_pos,
+                                                           num_bytes, cached);
+                       if (err)
+                               return err;
+               }
+       }
+
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
-                                       cached, 0);
+                                       extra_bits, cached, 0);
        if (err)
                return err;
 
@@ -1404,47 +1465,6 @@ fail:
 
 }
 
-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
-                                        const u64 start,
-                                        const u64 len,
-                                        struct extent_state **cached_state)
-{
-       u64 search_start = start;
-       const u64 end = start + len - 1;
-
-       while (search_start < end) {
-               const u64 search_len = end - search_start + 1;
-               struct extent_map *em;
-               u64 em_len;
-               int ret = 0;
-
-               em = btrfs_get_extent(inode, NULL, 0, search_start,
-                                     search_len, 0);
-               if (IS_ERR(em))
-                       return PTR_ERR(em);
-
-               if (em->block_start != EXTENT_MAP_HOLE)
-                       goto next;
-
-               em_len = em->len;
-               if (em->start < search_start)
-                       em_len -= search_start - em->start;
-               if (em_len > search_len)
-                       em_len = search_len;
-
-               ret = set_extent_bit(&inode->io_tree, search_start,
-                                    search_start + em_len - 1,
-                                    EXTENT_DELALLOC_NEW,
-                                    NULL, cached_state, GFP_NOFS);
-next:
-               search_start = extent_map_end(em);
-               free_extent_map(em);
-               if (ret)
-                       return ret;
-       }
-       return 0;
-}
-
 /*
  * This function locks the extent and properly waits for data=ordered extents
  * to finish before allowing the pages to be modified if need.
@@ -1473,10 +1493,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                + round_up(pos + write_bytes - start_pos,
                           fs_info->sectorsize) - 1;
 
-       if (start_pos < inode->vfs_inode.i_size ||
-           (inode->flags & BTRFS_INODE_PREALLOC)) {
+       if (start_pos < inode->vfs_inode.i_size) {
                struct btrfs_ordered_extent *ordered;
-               unsigned int clear_bits;
 
                lock_extent_bits(&inode->io_tree, start_pos, last_pos,
                                cached_state);
@@ -1498,19 +1516,10 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                }
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
-               ret = btrfs_find_new_delalloc_bytes(inode, start_pos,
-                                                   last_pos - start_pos + 1,
-                                                   cached_state);
-               clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
-                       EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
-               if (ret)
-                       clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
-               clear_extent_bit(&inode->io_tree, start_pos,
-                                last_pos, clear_bits,
-                                (clear_bits & EXTENT_LOCKED) ? 1 : 0,
-                                0, cached_state, GFP_NOFS);
-               if (ret)
-                       return ret;
+               clear_extent_bit(&inode->io_tree, start_pos, last_pos,
+                                EXTENT_DIRTY | EXTENT_DELALLOC |
+                                EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+                                0, 0, cached_state, GFP_NOFS);
                *lockstart = start_pos;
                *lockend = last_pos;
                ret = 1;
@@ -2048,6 +2057,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
+       btrfs_init_log_ctx(&ctx, inode);
+
        /*
         * We write the dirty pages in the range and wait until they complete
         * out of the ->i_mutex. If so, we can flush the dirty pages by
@@ -2194,8 +2205,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        }
        trans->sync = true;
 
-       btrfs_init_log_ctx(&ctx, inode);
-
        ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
                /* Fallthrough and commit/free transaction. */
@@ -2253,6 +2262,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                ret = btrfs_end_transaction(trans);
        }
 out:
+       ASSERT(list_empty(&ctx.list));
        err = file_check_and_advance_wb_err(file);
        if (!ret)
                ret = err;
index cdc9f4015ec36c08688a81bb1ba2bf657a9845ef..4426d1c73e50f1d1b1105a182d9c982a28b8ff08 100644 (file)
@@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        /* Lock all pages first so we can lock the extent safely. */
        ret = io_ctl_prepare_pages(io_ctl, inode, 0);
        if (ret)
-               goto out;
+               goto out_unlock;
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         &cached_state);
@@ -1358,6 +1358,7 @@ out_nospc_locked:
 out_nospc:
        cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
 
+out_unlock:
        if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
                up_write(&block_group->data_rwsem);
 
index b93fe05a39c7643247298c4b7d12eccae39b9790..e1a7f3cb5be940561af3ec855b6657c3fce7c850 100644 (file)
@@ -378,6 +378,7 @@ struct async_cow {
        struct page *locked_page;
        u64 start;
        u64 end;
+       unsigned int write_flags;
        struct list_head extents;
        struct btrfs_work work;
 };
@@ -857,7 +858,8 @@ retry:
                                    async_extent->ram_size,
                                    ins.objectid,
                                    ins.offset, async_extent->pages,
-                                   async_extent->nr_pages)) {
+                                   async_extent->nr_pages,
+                                   async_cow->write_flags)) {
                        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
                        struct page *p = async_extent->pages[0];
                        const u64 start = async_extent->start;
@@ -1191,7 +1193,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
 
 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                                u64 start, u64 end, int *page_started,
-                               unsigned long *nr_written)
+                               unsigned long *nr_written,
+                               unsigned int write_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct async_cow *async_cow;
@@ -1208,6 +1211,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                async_cow->root = root;
                async_cow->locked_page = locked_page;
                async_cow->start = start;
+               async_cow->write_flags = write_flags;
 
                if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
                    !btrfs_test_opt(fs_info, FORCE_COMPRESS))
@@ -1577,11 +1581,13 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
  */
 static int run_delalloc_range(void *private_data, struct page *locked_page,
                              u64 start, u64 end, int *page_started,
-                             unsigned long *nr_written)
+                             unsigned long *nr_written,
+                             struct writeback_control *wbc)
 {
        struct inode *inode = private_data;
        int ret;
        int force_cow = need_force_cow(inode, start, end);
+       unsigned int write_flags = wbc_to_write_flags(wbc);
 
        if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
@@ -1596,7 +1602,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
                set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
                        &BTRFS_I(inode)->runtime_flags);
                ret = cow_file_range_async(inode, locked_page, start, end,
-                                          page_started, nr_written);
+                                          page_started, nr_written,
+                                          write_flags);
        }
        if (ret)
                btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
@@ -2025,11 +2032,12 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             unsigned int extra_bits,
                              struct extent_state **cached_state, int dedupe)
 {
        WARN_ON((end & (PAGE_SIZE - 1)) == 0);
        return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
-                                  cached_state);
+                                  extra_bits, cached_state);
 }
 
 /* see btrfs_writepage_start_hook for details on why this is required */
@@ -2090,7 +2098,7 @@ again:
                goto out;
         }
 
-       btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
+       btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
                                  0);
        ClearPageChecked(page);
        set_page_dirty(page);
@@ -2997,6 +3005,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                compress_type = ordered_extent->compress_type;
        if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
                BUG_ON(compress_type);
+               btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
+                                      ordered_extent->len);
                ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
                                                ordered_extent->file_offset,
                                                ordered_extent->file_offset +
@@ -4790,7 +4800,7 @@ again:
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
+       ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
                                        &cached_state, 0);
        if (ret) {
                unlock_extent_cached(io_tree, block_start, block_end,
@@ -5438,6 +5448,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
                goto out_err;
 
        btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
+       if (location->type != BTRFS_INODE_ITEM_KEY &&
+           location->type != BTRFS_ROOT_ITEM_KEY) {
+               btrfs_warn(root->fs_info,
+"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
+                          __func__, name, btrfs_ino(BTRFS_I(dir)),
+                          location->objectid, location->type, location->offset);
+               goto out_err;
+       }
 out:
        btrfs_free_path(path);
        return ret;
@@ -5754,8 +5772,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                return inode;
        }
 
-       BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
-
        index = srcu_read_lock(&fs_info->subvol_srcu);
        ret = fixup_tree_root_location(fs_info, dir, dentry,
                                       &location, &sub_root);
@@ -9150,7 +9166,7 @@ again:
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, page_start, end,
+       ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
                                        &cached_state, 0);
        if (ret) {
                unlock_extent_cached(io_tree, page_start, page_end,
index fd172a93d11a9bb531c43d7c5188631f3d44aefa..2ef8acaac68846ea1d29452a0b0f1d95d85cc6e5 100644 (file)
@@ -1172,7 +1172,7 @@ again:
        if (!i_done || ret)
                goto out;
 
-       if (!(inode->i_sb->s_flags & MS_ACTIVE))
+       if (!(inode->i_sb->s_flags & SB_ACTIVE))
                goto out;
 
        /*
@@ -1333,7 +1333,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                 * make sure we stop running if someone unmounts
                 * the FS
                 */
-               if (!(inode->i_sb->s_flags & MS_ACTIVE))
+               if (!(inode->i_sb->s_flags & SB_ACTIVE))
                        break;
 
                if (btrfs_defrag_cancelled(fs_info)) {
@@ -2206,7 +2206,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
        if (!path)
                return -ENOMEM;
 
-       ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
+       ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
 
        key.objectid = tree_id;
        key.type = BTRFS_ROOT_ITEM_KEY;
index 4cf2eb67eba6ceceeae466c69bf4b5dc188a7339..f0c3f00e97cbe76e1fa8484efc8933842856c8d5 100644 (file)
@@ -3268,7 +3268,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        nr++;
                }
 
-               btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
+               btrfs_set_extent_delalloc(inode, page_start, page_end, 0, NULL,
+                                         0);
                set_page_dirty(page);
 
                unlock_extent(&BTRFS_I(inode)->io_tree,
index c10e4c70f02d15b2bbaba994f406d7a0a27fbd70..20d3300bd26896a905502b0ba5b5a0372a59e849 100644 (file)
@@ -3521,7 +3521,40 @@ out:
 }
 
 /*
- * Check if ino ino1 is an ancestor of inode ino2 in the given root.
+ * Check if inode ino2, or any of its ancestors, is inode ino1.
+ * Return 1 if true, 0 if false and < 0 on error.
+ */
+static int check_ino_in_path(struct btrfs_root *root,
+                            const u64 ino1,
+                            const u64 ino1_gen,
+                            const u64 ino2,
+                            const u64 ino2_gen,
+                            struct fs_path *fs_path)
+{
+       u64 ino = ino2;
+
+       if (ino1 == ino2)
+               return ino1_gen == ino2_gen;
+
+       while (ino > BTRFS_FIRST_FREE_OBJECTID) {
+               u64 parent;
+               u64 parent_gen;
+               int ret;
+
+               fs_path_reset(fs_path);
+               ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
+               if (ret < 0)
+                       return ret;
+               if (parent == ino1)
+                       return parent_gen == ino1_gen;
+               ino = parent;
+       }
+       return 0;
+}
+
+/*
+ * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
+ * possible path (in case ino2 is not a directory and has multiple hard links).
  * Return 1 if true, 0 if false and < 0 on error.
  */
 static int is_ancestor(struct btrfs_root *root,
@@ -3530,36 +3563,91 @@ static int is_ancestor(struct btrfs_root *root,
                       const u64 ino2,
                       struct fs_path *fs_path)
 {
-       u64 ino = ino2;
-       bool free_path = false;
+       bool free_fs_path = false;
        int ret = 0;
+       struct btrfs_path *path = NULL;
+       struct btrfs_key key;
 
        if (!fs_path) {
                fs_path = fs_path_alloc();
                if (!fs_path)
                        return -ENOMEM;
-               free_path = true;
+               free_fs_path = true;
        }
 
-       while (ino > BTRFS_FIRST_FREE_OBJECTID) {
-               u64 parent;
-               u64 parent_gen;
+       path = alloc_path_for_send();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
-               fs_path_reset(fs_path);
-               ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
-               if (ret < 0) {
-                       if (ret == -ENOENT && ino == ino2)
-                               ret = 0;
-                       goto out;
+       key.objectid = ino2;
+       key.type = BTRFS_INODE_REF_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+
+       while (true) {
+               struct extent_buffer *leaf = path->nodes[0];
+               int slot = path->slots[0];
+               u32 cur_offset = 0;
+               u32 item_size;
+
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto out;
+                       if (ret > 0)
+                               break;
+                       continue;
                }
-               if (parent == ino1) {
-                       ret = parent_gen == ino1_gen ? 1 : 0;
-                       goto out;
+
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+               if (key.objectid != ino2)
+                       break;
+               if (key.type != BTRFS_INODE_REF_KEY &&
+                   key.type != BTRFS_INODE_EXTREF_KEY)
+                       break;
+
+               item_size = btrfs_item_size_nr(leaf, slot);
+               while (cur_offset < item_size) {
+                       u64 parent;
+                       u64 parent_gen;
+
+                       if (key.type == BTRFS_INODE_EXTREF_KEY) {
+                               unsigned long ptr;
+                               struct btrfs_inode_extref *extref;
+
+                               ptr = btrfs_item_ptr_offset(leaf, slot);
+                               extref = (struct btrfs_inode_extref *)
+                                       (ptr + cur_offset);
+                               parent = btrfs_inode_extref_parent(leaf,
+                                                                  extref);
+                               cur_offset += sizeof(*extref);
+                               cur_offset += btrfs_inode_extref_name_len(leaf,
+                                                                 extref);
+                       } else {
+                               parent = key.offset;
+                               cur_offset = item_size;
+                       }
+
+                       ret = get_inode_info(root, parent, NULL, &parent_gen,
+                                            NULL, NULL, NULL, NULL);
+                       if (ret < 0)
+                               goto out;
+                       ret = check_ino_in_path(root, ino1, ino1_gen,
+                                               parent, parent_gen, fs_path);
+                       if (ret)
+                               goto out;
                }
-               ino = parent;
+               path->slots[0]++;
        }
+       ret = 0;
  out:
-       if (free_path)
+       btrfs_free_path(path);
+       if (free_fs_path)
                fs_path_free(fs_path);
        return ret;
 }
index 65af029559b58a793961623412010eee71f80d83..3a4dce1536455416a4f609f99ec416b61811f592 100644 (file)
@@ -107,7 +107,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
                return;
 
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                btrfs_info(fs_info, "forced readonly");
                /*
                 * Note that a running device replace operation is not
@@ -137,7 +137,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
 
        /*
         * Special case: if the error is EROFS, and we're already
-        * under MS_RDONLY, then it is safe here.
+        * under SB_RDONLY, then it is safe here.
         */
        if (errno == -EROFS && sb_rdonly(sb))
                return;
@@ -168,7 +168,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
        set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 
        /* Don't go through full error handling during mount */
-       if (sb->s_flags & MS_BORN)
+       if (sb->s_flags & SB_BORN)
                btrfs_handle_error(fs_info);
 }
 
@@ -507,9 +507,18 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                            token == Opt_compress_force ||
                            strncmp(args[0].from, "zlib", 4) == 0) {
                                compress_type = "zlib";
+
                                info->compress_type = BTRFS_COMPRESS_ZLIB;
-                               info->compress_level =
-                                       btrfs_compress_str2level(args[0].from);
+                               info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
+                               /*
+                                * args[0] contains uninitialized data since
+                                * for these tokens we don't expect any
+                                * parameter.
+                                */
+                               if (token != Opt_compress &&
+                                   token != Opt_compress_force)
+                                       info->compress_level =
+                                         btrfs_compress_str2level(args[0].from);
                                btrfs_set_opt(info->mount_opt, COMPRESS);
                                btrfs_clear_opt(info->mount_opt, NODATACOW);
                                btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -625,7 +634,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        break;
                case Opt_acl:
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-                       info->sb->s_flags |= MS_POSIXACL;
+                       info->sb->s_flags |= SB_POSIXACL;
                        break;
 #else
                        btrfs_err(info, "support for ACL not compiled in!");
@@ -633,7 +642,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        goto out;
 #endif
                case Opt_noacl:
-                       info->sb->s_flags &= ~MS_POSIXACL;
+                       info->sb->s_flags &= ~SB_POSIXACL;
                        break;
                case Opt_notreelog:
                        btrfs_set_and_info(info, NOTREELOG,
@@ -851,7 +860,7 @@ check:
        /*
         * Extra check for current option against current flag
         */
-       if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
+       if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
                btrfs_err(info,
                          "nologreplay must be used with ro mount option");
                ret = -EINVAL;
@@ -1147,7 +1156,7 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_xattr = btrfs_xattr_handlers;
        sb->s_time_gran = 1;
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        sb->s_flags |= SB_I_VERSION;
        sb->s_iflags |= SB_I_CGROUPWB;
@@ -1180,7 +1189,7 @@ static int btrfs_fill_super(struct super_block *sb,
        }
 
        cleancache_init_fs(sb);
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
        return 0;
 
 fail_close:
@@ -1277,7 +1286,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
                seq_puts(seq, ",flushoncommit");
        if (btrfs_test_opt(info, DISCARD))
                seq_puts(seq, ",discard");
-       if (!(info->sb->s_flags & MS_POSIXACL))
+       if (!(info->sb->s_flags & SB_POSIXACL))
                seq_puts(seq, ",noacl");
        if (btrfs_test_opt(info, SPACE_CACHE))
                seq_puts(seq, ",space_cache");
@@ -1409,11 +1418,11 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
 
        mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
        if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
-               if (flags & MS_RDONLY) {
-                       mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY,
+               if (flags & SB_RDONLY) {
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~SB_RDONLY,
                                             device_name, newargs);
                } else {
-                       mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY,
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags | SB_RDONLY,
                                             device_name, newargs);
                        if (IS_ERR(mnt)) {
                                root = ERR_CAST(mnt);
@@ -1565,7 +1574,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        u64 subvol_objectid = 0;
        int error = 0;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        error = btrfs_parse_early_options(data, mode, fs_type,
@@ -1619,13 +1628,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (error)
                goto error_fs_info;
 
-       if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
+       if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
                error = -EACCES;
                goto error_close_devices;
        }
 
        bdev = fs_devices->latest_bdev;
-       s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC,
+       s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
                 fs_info);
        if (IS_ERR(s)) {
                error = PTR_ERR(s);
@@ -1635,7 +1644,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (s->s_root) {
                btrfs_close_devices(fs_devices);
                free_fs_info(fs_info);
-               if ((flags ^ s->s_flags) & MS_RDONLY)
+               if ((flags ^ s->s_flags) & SB_RDONLY)
                        error = -EBUSY;
        } else {
                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
@@ -1702,11 +1711,11 @@ static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
 {
        if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
            (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
-            (flags & MS_RDONLY))) {
+            (flags & SB_RDONLY))) {
                /* wait for any defraggers to finish */
                wait_event(fs_info->transaction_wait,
                           (atomic_read(&fs_info->defrag_running) == 0));
-               if (flags & MS_RDONLY)
+               if (flags & SB_RDONLY)
                        sync_filesystem(fs_info->sb);
        }
 }
@@ -1766,10 +1775,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
        btrfs_resize_thread_pool(fs_info,
                fs_info->thread_pool_size, old_thread_pool_size);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out;
 
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                /*
                 * this also happens on 'umount -rf' or on shutdown, when
                 * the filesystem is busy.
@@ -1781,10 +1790,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                /* avoid complains from lockdep et al. */
                up(&fs_info->uuid_tree_rescan_sem);
 
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
                /*
-                * Setting MS_RDONLY will put the cleaner thread to
+                * Setting SB_RDONLY will put the cleaner thread to
                 * sleep at the next loop if it's already active.
                 * If it's already asleep, we'll leave unused block
                 * groups on disk until we're mounted read-write again
@@ -1856,7 +1865,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                                goto restore;
                        }
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                set_bit(BTRFS_FS_OPEN, &fs_info->flags);
        }
@@ -1866,9 +1875,9 @@ out:
        return 0;
 
 restore:
-       /* We've hit an error - don't reset MS_RDONLY */
+       /* We've hit an error - don't reset SB_RDONLY */
        if (sb_rdonly(sb))
-               old_flags |= MS_RDONLY;
+               old_flags |= SB_RDONLY;
        sb->s_flags = old_flags;
        fs_info->mount_opt = old_opts;
        fs_info->compress_type = old_compress_type;
index d06b1c931d05b8ef2b06c8dbc26d7657c92010c7..2e7f64a3b22b7d55d0bc6abfccd09171ef943351 100644 (file)
@@ -114,7 +114,7 @@ static int test_find_delalloc(u32 sectorsize)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
+       set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -145,7 +145,7 @@ static int test_find_delalloc(u32 sectorsize)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
+       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -200,7 +200,7 @@ static int test_find_delalloc(u32 sectorsize)
         *
         * We are re-using our test_start from above since it works out well.
         */
-       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
+       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
index f797642c013dadc24f5391fd148bcdc6d320c563..30affb60da514848ef8fb7621a48e629e893feb3 100644 (file)
@@ -968,7 +968,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        btrfs_test_inode_set_ops(inode);
 
        /* [BTRFS_MAX_EXTENT_SIZE] */
-       ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
+       ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0,
                                        NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -984,7 +984,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
                                        BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
-                                       NULL, 0);
+                                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1018,7 +1018,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
                                        (BTRFS_MAX_EXTENT_SIZE >> 1)
                                        + sectorsize - 1,
-                                       NULL, 0);
+                                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1036,7 +1036,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
                        (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
-                       NULL, 0);
+                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1053,7 +1053,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        */
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + sectorsize,
-                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1089,7 +1089,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
         */
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + sectorsize,
-                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
index 114fc5f0ecc5efb30e2c16c6e1501ed1b6c3bca2..ce4ed6ec8f39276c7c7a5d3551b043a9fd8c08b4 100644 (file)
@@ -242,7 +242,8 @@ static int check_leaf_item(struct btrfs_root *root,
        return ret;
 }
 
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
+static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
+                     bool check_item_data)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        /* No valid key type is 0, so all key should be larger than this key */
@@ -361,10 +362,15 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
                        return -EUCLEAN;
                }
 
-               /* Check if the item size and content meet other criteria */
-               ret = check_leaf_item(root, leaf, &key, slot);
-               if (ret < 0)
-                       return ret;
+               if (check_item_data) {
+                       /*
+                        * Check if the item size and content meet other
+                        * criteria
+                        */
+                       ret = check_leaf_item(root, leaf, &key, slot);
+                       if (ret < 0)
+                               return ret;
+               }
 
                prev_key.objectid = key.objectid;
                prev_key.type = key.type;
@@ -374,6 +380,17 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
        return 0;
 }
 
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+       return check_leaf(root, leaf, true);
+}
+
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+                            struct extent_buffer *leaf)
+{
+       return check_leaf(root, leaf, false);
+}
+
 int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
 {
        unsigned long nr = btrfs_header_nritems(node);
index 96c486e95d7042eaaa6476eeb98ada600cf2afe6..3d53e8d6fda0ca8312dd2477d9dd36fc3ba245b8 100644 (file)
 #include "ctree.h"
 #include "extent_io.h"
 
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf);
+/*
+ * Comprehensive leaf checker.
+ * Will check not only the item pointers, but also every possible member
+ * in item data.
+ */
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
+
+/*
+ * Less strict leaf checker.
+ * Will only check item pointers, not reading item data.
+ */
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+                            struct extent_buffer *leaf);
 int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
 
 #endif
index aa7c71cff575a5a3e1d73b8194239936cc509877..7bf9b31561db14ec7159fd0b7479e6bdee149735 100644 (file)
@@ -4102,7 +4102,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
 
        if (ordered_io_err) {
                ctx->io_err = -EIO;
-               return 0;
+               return ctx->io_err;
        }
 
        btrfs_init_map_token(&token);
index f1ecb938ba4d71b4a83c1be50bf5880bc86add38..49810b70afd3941721246497d94c754ec2120619 100644 (file)
@@ -189,6 +189,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
                                    struct btrfs_device, dev_list);
                list_del(&device->dev_list);
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
        }
        kfree(fs_devices);
@@ -578,6 +579,7 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
                                fs_devs->num_devices--;
                                list_del(&dev->dev_list);
                                rcu_string_free(dev->name);
+                               bio_put(dev->flush_bio);
                                kfree(dev);
                        }
                        break;
@@ -630,6 +632,7 @@ static noinline int device_list_add(const char *path,
 
                name = rcu_string_strdup(path, GFP_NOFS);
                if (!name) {
+                       bio_put(device->flush_bio);
                        kfree(device);
                        return -ENOMEM;
                }
@@ -742,6 +745,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                        name = rcu_string_strdup(orig_dev->name->str,
                                        GFP_KERNEL);
                        if (!name) {
+                               bio_put(device->flush_bio);
                                kfree(device);
                                goto error;
                        }
@@ -807,6 +811,7 @@ again:
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
        }
 
@@ -1750,20 +1755,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
        key.offset = device->devid;
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-       if (ret < 0)
-               goto out;
-
-       if (ret > 0) {
-               ret = -ENOENT;
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
                goto out;
        }
 
        ret = btrfs_del_item(trans, root, path);
-       if (ret)
-               goto out;
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+       }
+
 out:
        btrfs_free_path(path);
-       btrfs_commit_transaction(trans);
+       if (!ret)
+               ret = btrfs_commit_transaction(trans);
        return ret;
 }
 
@@ -1993,7 +2002,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
        fs_devices = srcdev->fs_devices;
 
        list_del_rcu(&srcdev->dev_list);
-       list_del_rcu(&srcdev->dev_alloc_list);
+       list_del(&srcdev->dev_alloc_list);
        fs_devices->num_devices--;
        if (srcdev->missing)
                fs_devices->missing_devices--;
@@ -2349,6 +2358,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
 
        name = rcu_string_strdup(device_path, GFP_KERNEL);
        if (!name) {
+               bio_put(device->flush_bio);
                kfree(device);
                ret = -ENOMEM;
                goto error;
@@ -2358,6 +2368,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
                ret = PTR_ERR(trans);
                goto error;
@@ -2384,7 +2395,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
 
        if (seeding_dev) {
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
                ret = btrfs_prepare_sprout(fs_info);
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
@@ -2497,10 +2508,11 @@ error_sysfs:
        btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
 error_trans:
        if (seeding_dev)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        if (trans)
                btrfs_end_transaction(trans);
        rcu_string_free(device->name);
+       bio_put(device->flush_bio);
        kfree(device);
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -2567,6 +2579,7 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
 
        name = rcu_string_strdup(device_path, GFP_KERNEL);
        if (!name) {
+               bio_put(device->flush_bio);
                kfree(device);
                ret = -ENOMEM;
                goto error;
@@ -6284,6 +6297,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
 
                ret = find_next_devid(fs_info, &tmp);
                if (ret) {
+                       bio_put(dev->flush_bio);
                        kfree(dev);
                        return ERR_PTR(ret);
                }
index ab69dcb70e8ae342733f589338c02dc226f95356..1b468250e94752e6eedf63283cba95a236fb0380 100644 (file)
@@ -1440,6 +1440,29 @@ static int __close_session(struct ceph_mds_client *mdsc,
        return request_close_session(mdsc, session);
 }
 
+static bool drop_negative_children(struct dentry *dentry)
+{
+       struct dentry *child;
+       bool all_negative = true;
+
+       if (!d_is_dir(dentry))
+               goto out;
+
+       spin_lock(&dentry->d_lock);
+       list_for_each_entry(child, &dentry->d_subdirs, d_child) {
+               if (d_really_is_positive(child)) {
+                       all_negative = false;
+                       break;
+               }
+       }
+       spin_unlock(&dentry->d_lock);
+
+       if (all_negative)
+               shrink_dcache_parent(dentry);
+out:
+       return all_negative;
+}
+
 /*
  * Trim old(er) caps.
  *
@@ -1490,16 +1513,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        if ((used | wanted) & ~oissued & mine)
                goto out;   /* we need these caps */
 
-       session->s_trim_caps--;
        if (oissued) {
                /* we aren't the only cap.. just remove us */
                __ceph_remove_cap(cap, true);
+               session->s_trim_caps--;
        } else {
+               struct dentry *dentry;
                /* try dropping referring dentries */
                spin_unlock(&ci->i_ceph_lock);
-               d_prune_aliases(inode);
-               dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
-                    inode, cap, atomic_read(&inode->i_count));
+               dentry = d_find_any_alias(inode);
+               if (dentry && drop_negative_children(dentry)) {
+                       int count;
+                       dput(dentry);
+                       d_prune_aliases(inode);
+                       count = atomic_read(&inode->i_count);
+                       if (count == 1)
+                               session->s_trim_caps--;
+                       dout("trim_caps_cb %p cap %p pruned, count now %d\n",
+                            inode, cap, count);
+               } else {
+                       dput(dentry);
+               }
                return 0;
        }
 
index fe9fbb3f13f7c7128c00e103888cbbd8d1136717..a62d2a9841dc2b0487181155373c03eac60f8a02 100644 (file)
@@ -331,11 +331,11 @@ static int parse_fsopt_token(char *c, void *private)
                break;
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
        case Opt_acl:
-               fsopt->sb_flags |= MS_POSIXACL;
+               fsopt->sb_flags |= SB_POSIXACL;
                break;
 #endif
        case Opt_noacl:
-               fsopt->sb_flags &= ~MS_POSIXACL;
+               fsopt->sb_flags &= ~SB_POSIXACL;
                break;
        default:
                BUG_ON(token);
@@ -520,7 +520,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",nopoolperm");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
-       if (fsopt->sb_flags & MS_POSIXACL)
+       if (fsopt->sb_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
        else
                seq_puts(m, ",noacl");
@@ -988,7 +988,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        dout("ceph_mount\n");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
-       flags |= MS_POSIXACL;
+       flags |= SB_POSIXACL;
 #endif
        err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
        if (err < 0) {
index cbd216b572390ca76e481aabf9a311e4b7749d7c..350fa55a1bf79878f9f390a3883998ce7cf198a4 100644 (file)
@@ -42,7 +42,7 @@
 #define CIFS_MOUNT_MULTIUSER   0x20000 /* multiuser mount */
 #define CIFS_MOUNT_STRICT_IO   0x40000 /* strict cache mode */
 #define CIFS_MOUNT_RWPIDFORWARD        0x80000 /* use pid forwarding for rw */
-#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
+#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of SB_POSIXACL in mnt_cifs_flags */
 #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
 #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
 #define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
index 8c8b75d33f310ce5e258042ff489f942379cdd27..31b7565b161756e01e9b0f10cf358e5bfccab3e4 100644 (file)
@@ -125,7 +125,7 @@ cifs_read_super(struct super_block *sb)
        tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
 
        if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -497,7 +497,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_puts(s, ",cifsacl");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
                seq_puts(s, ",dynperm");
-       if (root->d_sb->s_flags & MS_POSIXACL)
+       if (root->d_sb->s_flags & SB_POSIXACL)
                seq_puts(s, ",acl");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
                seq_puts(s, ",mfsymlinks");
@@ -573,7 +573,7 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 static int cifs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return 0;
 }
 
@@ -708,7 +708,7 @@ cifs_do_mount(struct file_system_type *fs_type,
 
        rc = cifs_mount(cifs_sb, volume_info);
        if (rc) {
-               if (!(flags & MS_SILENT))
+               if (!(flags & SB_SILENT))
                        cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
                                 rc);
                root = ERR_PTR(rc);
@@ -720,7 +720,7 @@ cifs_do_mount(struct file_system_type *fs_type,
        mnt_data.flags = flags;
 
        /* BB should we make this contingent on mount parm? */
-       flags |= MS_NODIRATIME | MS_NOATIME;
+       flags |= SB_NODIRATIME | SB_NOATIME;
 
        sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
        if (IS_ERR(sb)) {
@@ -739,7 +739,7 @@ cifs_do_mount(struct file_system_type *fs_type,
                        goto out_super;
                }
 
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
        }
 
        root = cifs_get_root(volume_info, sb);
index e185b2853eab7b1116dafc7ca8aeeb6d09b10687..b16583594d1ad481d694036828cc7a718a94f37a 100644 (file)
@@ -559,8 +559,8 @@ struct smb_vol {
                         CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
                         CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
 
-#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
-                     MS_NODEV | MS_SYNCHRONOUS)
+#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
+                     SB_NODEV | SB_SYNCHRONOUS)
 
 struct cifs_mnt_data {
        struct cifs_sb_info *cifs_sb;
index 7c732cb4416411e597f2e1a4af96fd8bf7e49beb..ecb99079363ab7a85c0cdf7496f76061fa43a6e4 100644 (file)
@@ -985,7 +985,7 @@ retry_iget5_locked:
                }
 
                cifs_fattr_to_inode(inode, fattr);
-               if (sb->s_flags & MS_NOATIME)
+               if (sb->s_flags & SB_NOATIME)
                        inode->i_flags |= S_NOATIME | S_NOCMTIME;
                if (inode->i_state & I_NEW) {
                        inode->i_ino = hash;
index e06740436b92080b915bd6c414fec38b0e868cd8..ed88ab8a477434b78aa94ace68c92794a6c4e082 100644 (file)
@@ -1406,7 +1406,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
        } while (rc == -EAGAIN);
 
        if (rc) {
-               cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
+               if (rc != -ENOENT)
+                       cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
                goto out;
        }
 
index 5331631386a23bd4a7458ecb5fb96efe1773cf71..01346b8b6edb38498c1b48c37e1c9210f4d5fe09 100644 (file)
@@ -2678,27 +2678,27 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        cifs_small_buf_release(req);
 
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
-       shdr = get_sync_hdr(rsp);
 
-       if (shdr->Status == STATUS_END_OF_FILE) {
+       if (rc) {
+               if (rc != -ENODATA) {
+                       cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
+                       cifs_dbg(VFS, "Send error in read = %d\n", rc);
+               }
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
-               return 0;
+               return rc == -ENODATA ? 0 : rc;
        }
 
-       if (rc) {
-               cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
-               cifs_dbg(VFS, "Send error in read = %d\n", rc);
-       } else {
-               *nbytes = le32_to_cpu(rsp->DataLength);
-               if ((*nbytes > CIFS_MAX_MSGSIZE) ||
-                   (*nbytes > io_parms->length)) {
-                       cifs_dbg(FYI, "bad length %d for count %d\n",
-                                *nbytes, io_parms->length);
-                       rc = -EIO;
-                       *nbytes = 0;
-               }
+       *nbytes = le32_to_cpu(rsp->DataLength);
+       if ((*nbytes > CIFS_MAX_MSGSIZE) ||
+           (*nbytes > io_parms->length)) {
+               cifs_dbg(FYI, "bad length %d for count %d\n",
+                        *nbytes, io_parms->length);
+               rc = -EIO;
+               *nbytes = 0;
        }
 
+       shdr = get_sync_hdr(rsp);
+
        if (*buf) {
                memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes);
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
index 52f975d848a076e6873d55b6429c0c62588ecb51..316af84674f110764a6e1245b099b71c47aa5854 100644 (file)
@@ -117,7 +117,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
 #ifdef CONFIG_CIFS_POSIX
                if (!value)
                        goto out;
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
                                value, (const int)size,
                                ACL_TYPE_ACCESS, cifs_sb->local_nls,
@@ -129,7 +129,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
 #ifdef CONFIG_CIFS_POSIX
                if (!value)
                        goto out;
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
                                value, (const int)size,
                                ACL_TYPE_DEFAULT, cifs_sb->local_nls,
@@ -266,7 +266,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
 
        case XATTR_ACL_ACCESS:
 #ifdef CONFIG_CIFS_POSIX
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
                                value, size, ACL_TYPE_ACCESS,
                                cifs_sb->local_nls,
@@ -276,7 +276,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
 
        case XATTR_ACL_DEFAULT:
 #ifdef CONFIG_CIFS_POSIX
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
                                value, size, ACL_TYPE_DEFAULT,
                                cifs_sb->local_nls,
index 6f0a6a4d5faa95aff7b7f0e948ba7b55bed99eb7..97424cf206c08af0519d74f82057180e5f8d6248 100644 (file)
@@ -96,7 +96,7 @@ void coda_destroy_inodecache(void)
 static int coda_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
@@ -188,7 +188,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
        mutex_unlock(&vc->vc_mutex);
 
        sb->s_fs_info = vc;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
        sb->s_blocksize = 4096; /* XXXXX  what do we put here?? */
        sb->s_blocksize_bits = 12;
        sb->s_magic = CODA_SUPER_MAGIC;
index f937082f32449a9f3316e581675c47e51f7f6d30..58e2fe40b2a04423de26729613bae16c233c8920 100644 (file)
@@ -34,6 +34,7 @@ config CRAMFS_BLOCKDEV
 config CRAMFS_MTD
        bool "Support CramFs image directly mapped in physical memory"
        depends on CRAMFS && MTD
+       depends on CRAMFS=m || MTD=y
        default y if !CRAMFS_BLOCKDEV
        help
          This option allows the CramFs driver to load data directly from
index 9a2ab419ba624bbb86a5f978313e35f984627440..017b0ab19bc4d98625349ce65109f7f48f5551c8 100644 (file)
@@ -505,7 +505,7 @@ static void cramfs_kill_sb(struct super_block *sb)
 static int cramfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -592,7 +592,7 @@ static int cramfs_finalize_super(struct super_block *sb,
        struct inode *root;
 
        /* Set it all up.. */
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        sb->s_op = &cramfs_ops;
        root = get_cramfs_inode(sb, cramfs_root, 0);
        if (IS_ERR(root))
index f2677c90d96e1ea140bb908c14dcab3aac49bd7e..025d66a705db6bf41ba52ada490806780d101083 100644 (file)
@@ -560,8 +560,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
         * Set the POSIX ACL flag based on whether they're enabled in the lower
         * mount.
         */
-       s->s_flags = flags & ~MS_POSIXACL;
-       s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+       s->s_flags = flags & ~SB_POSIXACL;
+       s->s_flags |= path.dentry->d_sb->s_flags & SB_POSIXACL;
 
        /**
         * Force a read-only eCryptfs mount when:
@@ -569,7 +569,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
         *   2) The ecryptfs_encrypted_view mount option is specified
         */
        if (sb_rdonly(path.dentry->d_sb) || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
 
        s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
        s->s_blocksize = path.dentry->d_sb->s_blocksize;
@@ -602,7 +602,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
        ecryptfs_set_dentry_private(s->s_root, root_info);
        root_info->lower_path = path;
 
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
        return dget(s->s_root);
 
 out_free:
index 65b59009555b6e3c8cc8d6f647ced0de09de3319..6ffb7ba1547a66508d3179fe3dab86362f70419d 100644 (file)
@@ -116,7 +116,7 @@ static void destroy_inodecache(void)
 static int efs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -311,7 +311,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
 #ifdef DEBUG
                pr_info("forcing read-only mode\n");
 #endif
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
        }
        s->s_op   = &efs_superblock_operations;
        s->s_export_op = &efs_export_ops;
index 1d6243d9f2b653e679165099be9332776805b8bd..5688b5e1b9378107597a6117c8c3732889f951d2 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1216,15 +1216,14 @@ killed:
        return -EAGAIN;
 }
 
-char *get_task_comm(char *buf, struct task_struct *tsk)
+char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
 {
-       /* buf must be at least sizeof(tsk->comm) in size */
        task_lock(tsk);
-       strncpy(buf, tsk->comm, sizeof(tsk->comm));
+       strncpy(buf, tsk->comm, buf_size);
        task_unlock(tsk);
        return buf;
 }
-EXPORT_SYMBOL_GPL(get_task_comm);
+EXPORT_SYMBOL_GPL(__get_task_comm);
 
 /*
  * These functions flushes out all traces of the currently running executable
index e1b3724bebf23bb78622f81aa3a085f4d304e532..33db13365c5eb8c52265218a327f302dbac2fed5 100644 (file)
@@ -548,7 +548,7 @@ do_more:
        }
 
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        group_adjust_blocks(sb, block_group, desc, bh2, group_freed);
@@ -1424,7 +1424,7 @@ allocated:
        percpu_counter_sub(&sbi->s_freeblocks_counter, num);
 
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        *errp = 0;
index a1fc3dabca41b979db18ec1688d2cda0307a5680..6484199b35d1ec1bb63879593e5bb20f0af2bb77 100644 (file)
@@ -145,7 +145,7 @@ void ext2_free_inode (struct inode * inode)
        else
                ext2_release_inode(sb, block_group, is_directory);
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        brelse(bitmap_bh);
@@ -517,7 +517,7 @@ repeat_in_this_group:
        goto fail;
 got:
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
        brelse(bitmap_bh);
 
index e2b6be03e69b5aee2987c13bdfac9055e66d9064..7646818ab266ff81b86003b28f661fdbd7d9ba2f 100644 (file)
@@ -75,7 +75,7 @@ void ext2_error(struct super_block *sb, const char *function,
        if (test_opt(sb, ERRORS_RO)) {
                ext2_msg(sb, KERN_CRIT,
                             "error: remounting filesystem read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 }
 
@@ -656,7 +656,7 @@ static int ext2_setup_super (struct super_block * sb,
                ext2_msg(sb, KERN_ERR,
                        "error: revision level too high, "
                        "forcing read-only mode");
-               res = MS_RDONLY;
+               res = SB_RDONLY;
        }
        if (read_only)
                return res;
@@ -924,9 +924,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_resuid = opts.s_resuid;
        sbi->s_resgid = opts.s_resgid;
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
                ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
-                MS_POSIXACL : 0);
+                SB_POSIXACL : 0);
        sb->s_iflags |= SB_I_CGROUPWB;
 
        if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
@@ -1178,7 +1178,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
                ext2_msg(sb, KERN_WARNING,
                        "warning: mounting ext3 filesystem as ext2");
        if (ext2_setup_super (sb, es, sb_rdonly(sb)))
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        ext2_write_super(sb);
        return 0;
 
@@ -1341,9 +1341,9 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
                         "dax flag with busy inodes while remounting");
                new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
        }
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out_set;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
                    !(sbi->s_mount_state & EXT2_VALID_FS))
                        goto out_set;
@@ -1379,7 +1379,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
                 */
                sbi->s_mount_state = le16_to_cpu(es->s_state);
                if (!ext2_setup_super (sb, es, 0))
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
                spin_unlock(&sbi->s_lock);
 
                ext2_write_super(sb);
@@ -1392,8 +1392,8 @@ out_set:
        sbi->s_mount_opt = new_opts.s_mount_opt;
        sbi->s_resuid = new_opts.s_resuid;
        sbi->s_resgid = new_opts.s_resgid;
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
        spin_unlock(&sbi->s_lock);
 
        return 0;
index 07bca11749d406fd4e3130e31026f1db9ac0d879..c941251ac0c008587b1aaee10fb52e27a50e684a 100644 (file)
@@ -4722,6 +4722,7 @@ retry:
                                                    EXT4_INODE_EOFBLOCKS);
                }
                ext4_mark_inode_dirty(handle, inode);
+               ext4_update_inode_fsync_trans(handle, inode, 1);
                ret2 = ext4_journal_stop(handle);
                if (ret2)
                        break;
index b4267d72f24955c314d78f350c4247e6c7373cc2..b32cf263750d1d3b2024847e78bf1c6181e8a44f 100644 (file)
@@ -816,6 +816,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
                struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
 
+               if (IS_ERR(p))
+                       return ERR_CAST(p);
                if (p) {
                        int acl_size = p->a_count * sizeof(ext4_acl_entry);
 
index 0992d76f7ab15b94b12014d312d164666f226870..534a9130f62578931a24477f317c17b42c71ffc3 100644 (file)
@@ -149,6 +149,15 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
  */
 int ext4_inode_is_fast_symlink(struct inode *inode)
 {
+       if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+               int ea_blocks = EXT4_I(inode)->i_file_acl ?
+                               EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
+
+               if (ext4_has_inline_data(inode))
+                       return 0;
+
+               return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
+       }
        return S_ISLNK(inode->i_mode) && inode->i_size &&
               (inode->i_size < EXT4_N_BLOCKS * 4);
 }
@@ -2742,7 +2751,7 @@ static int ext4_writepages(struct address_space *mapping,
         * If the filesystem has aborted, it is read-only, so return
         * right away instead of dumping stack traces later on that
         * will obscure the real source of the problem.  We test
-        * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
+        * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
         * the latter could be true if the filesystem is mounted
         * read-only, and in that case, ext4_writepages should
         * *never* be called, so if that ever happens, we would want
@@ -5183,7 +5192,7 @@ static int ext4_do_update_inode(handle_t *handle,
 
        ext4_inode_csum_set(inode, raw_inode, ei);
        spin_unlock(&ei->i_raw_lock);
-       if (inode->i_sb->s_flags & MS_LAZYTIME)
+       if (inode->i_sb->s_flags & SB_LAZYTIME)
                ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
                                              bh->b_data);
 
index 798b3ac680db1b4f8c4510a0bd66d0510d11216a..e750d68fbcb50c0447e13556905da8401f5f6b03 100644 (file)
@@ -1399,6 +1399,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
                               "falling back\n"));
        }
        nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+       if (!nblocks) {
+               ret = NULL;
+               goto cleanup_and_exit;
+       }
        start = EXT4_I(dir)->i_dir_start_lookup;
        if (start >= nblocks)
                start = 0;
index 0556cd036b69ebc4c06e8497216182b8144198c1..7c46693a14d763d53b84eec9602f4a854918bb4d 100644 (file)
@@ -422,7 +422,7 @@ static void ext4_handle_error(struct super_block *sb)
                 * before ->s_flags update
                 */
                smp_wmb();
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if (test_opt(sb, ERRORS_PANIC)) {
                if (EXT4_SB(sb)->s_journal &&
@@ -635,7 +635,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
                 * before ->s_flags update
                 */
                smp_wmb();
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                if (EXT4_SB(sb)->s_journal)
                        jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
                save_error_info(sb, function, line);
@@ -1682,10 +1682,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                sb->s_flags |= SB_I_VERSION;
                return 1;
        case Opt_lazytime:
-               sb->s_flags |= MS_LAZYTIME;
+               sb->s_flags |= SB_LAZYTIME;
                return 1;
        case Opt_nolazytime:
-               sb->s_flags &= ~MS_LAZYTIME;
+               sb->s_flags &= ~SB_LAZYTIME;
                return 1;
        }
 
@@ -2116,7 +2116,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
        if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
                ext4_msg(sb, KERN_ERR, "revision level too high, "
                         "forcing read-only mode");
-               res = MS_RDONLY;
+               res = SB_RDONLY;
        }
        if (read_only)
                goto done;
@@ -2429,7 +2429,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
 
        if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
                /* don't clear list on RO mount w/ errors */
-               if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
+               if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
                        ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
                                  "clearing orphan list.\n");
                        es->s_last_orphan = 0;
@@ -2438,19 +2438,19 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                return;
        }
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
        }
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
 
        /*
         * Turn on quotas which were not enabled for read-only mounts if
         * filesystem has quota feature, so that they are updated correctly.
         */
-       if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+       if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
                int ret = ext4_enable_quotas(sb);
 
                if (!ret)
@@ -2539,7 +2539,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                }
        }
 #endif
-       sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 }
 
 /*
@@ -2741,7 +2741,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
 
        if (ext4_has_feature_readonly(sb)) {
                ext4_msg(sb, KERN_INFO, "filesystem is read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                return 1;
        }
 
@@ -3623,8 +3623,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                sb->s_iflags |= SB_I_CGROUPWB;
        }
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
 
        if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
            (ext4_has_compat_features(sb) ||
@@ -4199,7 +4199,7 @@ no_journal:
        }
 
        if (ext4_setup_super(sb, es, sb_rdonly(sb)))
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
        /* determine the minimum size of new large inodes, if present */
        if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
@@ -4693,7 +4693,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
         * the clock is set in the future, and this will cause e2fsck
         * to complain and force a full file system check.
         */
-       if (!(sb->s_flags & MS_RDONLY))
+       if (!(sb->s_flags & SB_RDONLY))
                es->s_wtime = cpu_to_le32(get_seconds());
        if (sb->s_bdev->bd_part)
                es->s_kbytes_written =
@@ -5047,8 +5047,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
                ext4_abort(sb, "Abort forced by user");
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
 
        es = sbi->s_es;
 
@@ -5057,16 +5057,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
        }
 
-       if (*flags & MS_LAZYTIME)
-               sb->s_flags |= MS_LAZYTIME;
+       if (*flags & SB_LAZYTIME)
+               sb->s_flags |= SB_LAZYTIME;
 
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
                if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
                        err = -EROFS;
                        goto restore_opts;
                }
 
-               if (*flags & MS_RDONLY) {
+               if (*flags & SB_RDONLY) {
                        err = sync_filesystem(sb);
                        if (err < 0)
                                goto restore_opts;
@@ -5078,7 +5078,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                         * First of all, the unconditional stuff we have to do
                         * to disable replay of the journal when we next remount
                         */
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
 
                        /*
                         * OK, test if we are remounting a valid rw partition
@@ -5140,7 +5140,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                ext4_clear_journal_err(sb, es);
                        sbi->s_mount_state = le16_to_cpu(es->s_state);
                        if (!ext4_setup_super(sb, es, 0))
-                               sb->s_flags &= ~MS_RDONLY;
+                               sb->s_flags &= ~SB_RDONLY;
                        if (ext4_has_feature_mmp(sb))
                                if (ext4_multi_mount_protect(sb,
                                                le64_to_cpu(es->s_mmp_block))) {
@@ -5164,7 +5164,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        ext4_setup_system_zone(sb);
-       if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+       if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
                ext4_commit_super(sb, 1);
 
 #ifdef CONFIG_QUOTA
@@ -5182,7 +5182,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 #endif
 
-       *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
+       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
        ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
        kfree(orig_data);
        return 0;
index dd2e73e10857a33428c6bfd35eba738d4d232e45..4aa69bc1c70af31fbad8fe0f37ef5a007ada5893 100644 (file)
@@ -617,17 +617,17 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
        if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
                return 0;
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sbi->sb->s_flags &= ~MS_RDONLY;
+               sbi->sb->s_flags &= ~SB_RDONLY;
        }
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sbi->sb->s_flags |= MS_ACTIVE;
+       sbi->sb->s_flags |= SB_ACTIVE;
 
        /* Turn on quotas so that they are updated correctly */
-       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 #endif
 
        start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -658,7 +658,7 @@ out:
        if (quota_enabled)
                f2fs_quota_off_umount(sbi->sb);
 #endif
-       sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 
        return err;
 }
index f4e094e816c63df79bd40f62b097147826037dae..6abf26c31d01885bc61abaa5625720a38a25cf6d 100644 (file)
@@ -2378,7 +2378,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
 
 static inline int f2fs_readonly(struct super_block *sb)
 {
-       return sb->s_flags & MS_RDONLY;
+       return sb->s_flags & SB_RDONLY;
 }
 
 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
index 5d5bba462f26390512a50c4359ebc99b3b3481dc..d844dcb805703ef721a443cfa23194a298c5de23 100644 (file)
@@ -1005,7 +1005,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
 
        cpc.reason = __get_cp_reason(sbi);
 gc_more:
-       if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
+       if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
                ret = -EINVAL;
                goto stop;
        }
index 92c57ace1939b0a5d086cee4366f3f2168926c36..b3a14b0429f23c65afcd3185f138ca3e49e71760 100644 (file)
@@ -598,16 +598,16 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
        int quota_enabled;
 #endif
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sbi->sb->s_flags &= ~MS_RDONLY;
+               sbi->sb->s_flags &= ~SB_RDONLY;
        }
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sbi->sb->s_flags |= MS_ACTIVE;
+       sbi->sb->s_flags |= SB_ACTIVE;
        /* Turn on quotas so that they are updated correctly */
-       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 #endif
 
        fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -671,7 +671,7 @@ out:
        if (quota_enabled)
                f2fs_quota_off_umount(sbi->sb);
 #endif
-       sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 
        return ret ? ret: err;
 }
index a6c5dd450002daa7d1c43f414d0ffbaa17ff1b04..708155d9c2e42810ed05ae7ecd953ce5ad7069de 100644 (file)
@@ -534,10 +534,10 @@ static int parse_options(struct super_block *sb, char *options)
 #endif
                        break;
                case Opt_lazytime:
-                       sb->s_flags |= MS_LAZYTIME;
+                       sb->s_flags |= SB_LAZYTIME;
                        break;
                case Opt_nolazytime:
-                       sb->s_flags &= ~MS_LAZYTIME;
+                       sb->s_flags &= ~SB_LAZYTIME;
                        break;
 #ifdef CONFIG_QUOTA
                case Opt_quota:
@@ -1168,7 +1168,7 @@ static void default_options(struct f2fs_sb_info *sbi)
        set_opt(sbi, INLINE_DENTRY);
        set_opt(sbi, EXTENT_CACHE);
        set_opt(sbi, NOHEAP);
-       sbi->sb->s_flags |= MS_LAZYTIME;
+       sbi->sb->s_flags |= SB_LAZYTIME;
        set_opt(sbi, FLUSH_MERGE);
        if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
                set_opt_mode(sbi, F2FS_MOUNT_LFS);
@@ -1236,7 +1236,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 #endif
 
        /* recover superblocks we couldn't write due to previous RO mount */
-       if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+       if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
                err = f2fs_commit_super(sbi, false);
                f2fs_msg(sb, KERN_INFO,
                        "Try to recover all the superblocks, ret: %d", err);
@@ -1255,17 +1255,17 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * Previous and new state of filesystem is RO,
         * so skip checking GC and FLUSH_MERGE conditions.
         */
-       if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
+       if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
                goto skip;
 
 #ifdef CONFIG_QUOTA
-       if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+       if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
                err = dquot_suspend(sb, -1);
                if (err < 0)
                        goto restore_opts;
        } else {
                /* dquot_resume needs RW */
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
                if (sb_any_quota_suspended(sb)) {
                        dquot_resume(sb, -1);
                } else if (f2fs_sb_has_quota_ino(sb)) {
@@ -1288,7 +1288,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * or if background_gc = off is passed in mount
         * option. Also sync the filesystem.
         */
-       if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
+       if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
                if (sbi->gc_thread) {
                        stop_gc_thread(sbi);
                        need_restart_gc = true;
@@ -1300,7 +1300,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                need_stop_gc = true;
        }
 
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                writeback_inodes_sb(sb, WB_REASON_SYNC);
                sync_inodes_sb(sb);
 
@@ -1314,7 +1314,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * We stop issue flush thread if FS is mounted as RO
         * or if flush_merge is not passed in mount option.
         */
-       if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+       if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
                clear_opt(sbi, FLUSH_MERGE);
                destroy_flush_cmd_control(sbi, false);
        } else {
@@ -1329,8 +1329,8 @@ skip:
                kfree(s_qf_names[i]);
 #endif
        /* Update the POSIXACL Flag */
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
 
        return 0;
 restore_gc:
@@ -2472,8 +2472,8 @@ try_onemore:
        sb->s_export_op = &f2fs_export_ops;
        sb->s_magic = F2FS_SUPER_MAGIC;
        sb->s_time_gran = 1;
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
        memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 
        /* init f2fs-specific super block info */
index 48b2336692f9f70a3d8c230ee3a9169af5e634be..bac10de678cc9645af9eccd2584d0ffc548b9c93 100644 (file)
@@ -392,7 +392,7 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
                        memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
                        set_buffer_uptodate(c_bh);
                        mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
-                       if (sb->s_flags & MS_SYNCHRONOUS)
+                       if (sb->s_flags & SB_SYNCHRONOUS)
                                err = sync_dirty_buffer(c_bh);
                        brelse(c_bh);
                        if (err)
@@ -597,7 +597,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                }
 
                if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
-                       if (sb->s_flags & MS_SYNCHRONOUS) {
+                       if (sb->s_flags & SB_SYNCHRONOUS) {
                                err = fat_sync_bhs(bhs, nr_bhs);
                                if (err)
                                        goto error;
@@ -612,7 +612,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                fat_collect_bhs(bhs, &nr_bhs, &fatent);
        } while (cluster != FAT_ENT_EOF);
 
-       if (sb->s_flags & MS_SYNCHRONOUS) {
+       if (sb->s_flags & SB_SYNCHRONOUS) {
                err = fat_sync_bhs(bhs, nr_bhs);
                if (err)
                        goto error;
index 30c52394a7adbc4041c5331d1c10dd9b52ee4bec..20a0a89eaca589de58d70d89c9625ca9a30d0143 100644 (file)
@@ -779,14 +779,14 @@ static void __exit fat_destroy_inodecache(void)
 
 static int fat_remount(struct super_block *sb, int *flags, char *data)
 {
-       int new_rdonly;
+       bool new_rdonly;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
-       *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+       *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
 
        sync_filesystem(sb);
 
        /* make sure we update state on remount. */
-       new_rdonly = *flags & MS_RDONLY;
+       new_rdonly = *flags & SB_RDONLY;
        if (new_rdonly != sb_rdonly(sb)) {
                if (new_rdonly)
                        fat_set_state(sb, 0, 0);
@@ -1352,7 +1352,7 @@ out:
        if (opts->unicode_xlate)
                opts->utf8 = 0;
        if (opts->nfs == FAT_NFS_NOSTALE_RO) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                sb->s_export_op = &fat_export_ops_nostale;
        }
 
@@ -1608,7 +1608,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                return -ENOMEM;
        sb->s_fs_info = sbi;
 
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
        sb->s_magic = MSDOS_SUPER_MAGIC;
        sb->s_op = &fat_sops;
        sb->s_export_op = &fat_export_ops;
index acc3aa30ee54988bd99e172e3db2b8ae83067c31..f9bdc1e01c98e7969d0e49d21bb468bef2eaaddb 100644 (file)
@@ -33,7 +33,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
        if (opts->errors == FAT_ERRORS_PANIC)
                panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id);
        else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                fat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
        }
 }
index 7d6a105d601b52d3d5b356e94c0c7ab87a818263..d24d2758a36327d35c8eb635f835851ee8de3c78 100644 (file)
@@ -646,7 +646,7 @@ static void setup(struct super_block *sb)
 {
        MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
        sb->s_d_op = &msdos_dentry_operations;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
 }
 
 static int msdos_fill_super(struct super_block *sb, void *data, int silent)
index 455ce5b77e9bf9eea279dccdf4a31a2a2154cd74..f989efa051a0d52423ede82a2b049c5f2f04d03d 100644 (file)
@@ -116,7 +116,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
 static int vxfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -220,7 +220,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
        int ret = -EINVAL;
        u32 j;
 
-       sbp->s_flags |= MS_RDONLY;
+       sbp->s_flags |= SB_RDONLY;
 
        infp = kzalloc(sizeof(*infp), GFP_KERNEL);
        if (!infp) {
index 08f5debd07d10135b7a472b6e93394b681aa6228..cea4836385b72ee16f1a6d0f8b347f4b4698512d 100644 (file)
@@ -490,7 +490,7 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 
        /* while holding I_WB_SWITCH, no one else can update the association */
        spin_lock(&inode->i_lock);
-       if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+       if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
            inode->i_state & (I_WB_SWITCH | I_FREEING) ||
            inode_to_wb(inode) == isw->new_wb) {
                spin_unlock(&inode->i_lock);
index 2f504d615d9236663bfcc7c38eca354ce472897d..624f18bbfd2b3430e4a5d67c54cf84af4312a440 100644 (file)
@@ -130,7 +130,7 @@ static void fuse_evict_inode(struct inode *inode)
 {
        truncate_inode_pages_final(&inode->i_data);
        clear_inode(inode);
-       if (inode->i_sb->s_flags & MS_ACTIVE) {
+       if (inode->i_sb->s_flags & SB_ACTIVE) {
                struct fuse_conn *fc = get_fuse_conn(inode);
                struct fuse_inode *fi = get_fuse_inode(inode);
                fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
@@ -141,7 +141,7 @@ static void fuse_evict_inode(struct inode *inode)
 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (*flags & MS_MANDLOCK)
+       if (*flags & SB_MANDLOCK)
                return -EINVAL;
 
        return 0;
@@ -1056,10 +1056,10 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        int is_bdev = sb->s_bdev != NULL;
 
        err = -EINVAL;
-       if (sb->s_flags & MS_MANDLOCK)
+       if (sb->s_flags & SB_MANDLOCK)
                goto err;
 
-       sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
+       sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
 
        if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
@@ -1109,9 +1109,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                goto err_dev_free;
 
        /* Handle umasking inside the fuse code */
-       if (sb->s_flags & MS_POSIXACL)
+       if (sb->s_flags & SB_POSIXACL)
                fc->dont_mask = 1;
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 
        fc->default_permissions = d.default_permissions;
        fc->allow_other = d.allow_other;
index a3711f543405218ddc6f88c45d60505955f11740..ad55eb86a2504bd048ddd6c674b78ec7c5e9df27 100644 (file)
@@ -1065,15 +1065,15 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
        sdp->sd_args = *args;
 
        if (sdp->sd_args.ar_spectator) {
-                sb->s_flags |= MS_RDONLY;
+                sb->s_flags |= SB_RDONLY;
                set_bit(SDF_RORECOVERY, &sdp->sd_flags);
        }
        if (sdp->sd_args.ar_posix_acl)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        if (sdp->sd_args.ar_nobarrier)
                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
 
-       sb->s_flags |= MS_NOSEC;
+       sb->s_flags |= SB_NOSEC;
        sb->s_magic = GFS2_MAGIC;
        sb->s_op = &gfs2_super_ops;
        sb->s_d_op = &gfs2_dops;
@@ -1257,7 +1257,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
        struct gfs2_args args;
        struct gfs2_sbd *sdp;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1313,15 +1313,15 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
 
        if (s->s_root) {
                error = -EBUSY;
-               if ((flags ^ s->s_flags) & MS_RDONLY)
+               if ((flags ^ s->s_flags) & SB_RDONLY)
                        goto error_super;
        } else {
                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
                sb_set_blocksize(s, block_size(bdev));
-               error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
+               error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
                if (error)
                        goto error_super;
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
                bdev->bd_super = s;
        }
 
@@ -1365,7 +1365,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
                pr_warn("gfs2 mount does not exist\n");
                return ERR_CAST(s);
        }
-       if ((flags ^ s->s_flags) & MS_RDONLY) {
+       if ((flags ^ s->s_flags) & SB_RDONLY) {
                deactivate_locked_super(s);
                return ERR_PTR(-EBUSY);
        }
index 9cb5c9a97d69d04cf564ca69e48bfd15f701ea7c..d81d46e19726445801a60feb65a0513cb190b59f 100644 (file)
@@ -1256,10 +1256,10 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
                return -EINVAL;
 
        if (sdp->sd_args.ar_spectator)
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
 
-       if ((sb->s_flags ^ *flags) & MS_RDONLY) {
-               if (*flags & MS_RDONLY)
+       if ((sb->s_flags ^ *flags) & SB_RDONLY) {
+               if (*flags & SB_RDONLY)
                        error = gfs2_make_fs_ro(sdp);
                else
                        error = gfs2_make_fs_rw(sdp);
@@ -1269,9 +1269,9 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
 
        sdp->sd_args = args;
        if (sdp->sd_args.ar_posix_acl)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        else
-               sb->s_flags &= ~MS_POSIXACL;
+               sb->s_flags &= ~SB_POSIXACL;
        if (sdp->sd_args.ar_nobarrier)
                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
        else
index a85ca8b2c9ba4aa43439e2f350acbab9dda73b91..ca8b72d0a8315384161403e814983877b823f502 100644 (file)
@@ -117,7 +117,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
                kfree(tr);
        up_read(&sdp->sd_log_flush_lock);
 
-       if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
+       if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
                gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
        if (alloced)
                sb_end_intwrite(sdp->sd_vfs);
index 894994d2c88501cfb27fc1f46561fcc21e4e0d1e..460281b1299eb1eff15b5a4644a130e3e0f441ef 100644 (file)
@@ -204,11 +204,11 @@ int hfs_mdb_get(struct super_block *sb)
        attrib = mdb->drAtrb;
        if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
                pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended.  mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
                pr_warn("filesystem is marked locked, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if (!sb_rdonly(sb)) {
                /* Mark the volume uncleanly unmounted in case we crash */
index 7e0d65e9586c7dae01d929bfe2e5b6ba373d20c5..173876782f73fd33838bec0e5508cbe79fa5c868 100644 (file)
@@ -114,18 +114,18 @@ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int hfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       *flags |= SB_NODIRATIME;
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (!(*flags & MS_RDONLY)) {
+       if (!(*flags & SB_RDONLY)) {
                if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
                        pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended.  leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
                        pr_warn("filesystem is marked locked, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                }
        }
        return 0;
@@ -407,7 +407,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_op = &hfs_super_operations;
        sb->s_xattr = hfs_xattr_handlers;
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
        mutex_init(&sbi->bitmap_lock);
 
        res = hfs_mdb_get(sb);
index e5bb2de2262ae68c64a061673f93220ae8872383..1d458b7169572c60628ccf6391846d8b2c1adea6 100644 (file)
@@ -329,9 +329,9 @@ static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (!(*flags & MS_RDONLY)) {
+       if (!(*flags & SB_RDONLY)) {
                struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
                int force = 0;
 
@@ -340,20 +340,20 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
 
                if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
                        pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (force) {
                        /* nothing */
                } else if (vhdr->attributes &
                                cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                        pr_warn("filesystem is marked locked, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (vhdr->attributes &
                                cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
                        pr_warn("filesystem is marked journaled, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                }
        }
        return 0;
@@ -455,16 +455,16 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
 
        if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
                pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
                /* nothing */
        } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                pr_warn("Filesystem is marked locked, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
                        !sb_rdonly(sb)) {
                pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        err = -EINVAL;
index 8d6b7e35faf9a76d91ce9835598c13495adb2032..c83ece7facc5e466eb4b17fac368c0f63cf1048a 100644 (file)
@@ -150,7 +150,6 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx)
                        if (unlikely(ret < 0))
                                goto out;
                        ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
-                       file->f_version = inode->i_version;
                }
                next_pos = ctx->pos;
                if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) {
index 3b834563b1f161352806249f0ef62b3e552ecee6..a4ad18afbdec7ffba2eb5cd46c59879433ba6da9 100644 (file)
@@ -419,7 +419,6 @@ int hpfs_add_dirent(struct inode *i,
                c = 1;
                goto ret;
        }       
-       i->i_version++;
        c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0);
        ret:
        return c;
@@ -726,7 +725,6 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
                        return 2;
                }
        }
-       i->i_version++;
        for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1);
        hpfs_delete_de(i->i_sb, dnode, de);
        hpfs_mark_4buffers_dirty(qbh);
index e0e60b1484006f9ca3d396399221983d4171eab9..7c49f1ef0c850320b351397e0c1f2d93f267131a 100644 (file)
@@ -288,7 +288,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
                                        goto bail;
                                }
                                if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
-                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
+                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok;
                                        hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
                                        goto bail;
                                }
index 1516fb4e28f409d045e00685b4a2c55732c088f0..f2c3ebcd309c326d6ac08bd168f13b8b460379ec 100644 (file)
@@ -78,7 +78,7 @@ void hpfs_error(struct super_block *s, const char *fmt, ...)
                        else {
                                pr_cont("; remounting read-only\n");
                                mark_dirty(s, 0);
-                               s->s_flags |= MS_RDONLY;
+                               s->s_flags |= SB_RDONLY;
                        }
                } else if (sb_rdonly(s))
                                pr_cont("; going on - but anything won't be destroyed because it's read-only\n");
@@ -235,7 +235,6 @@ static struct inode *hpfs_alloc_inode(struct super_block *sb)
        ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
        if (!ei)
                return NULL;
-       ei->vfs_inode.i_version = 1;
        return &ei->vfs_inode;
 }
 
@@ -457,7 +456,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
 
        sync_filesystem(s);
 
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
 
        hpfs_lock(s);
        uid = sbi->sb_uid; gid = sbi->sb_gid;
@@ -488,7 +487,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
        sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
        sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
 
-       if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
+       if (!(*flags & SB_RDONLY)) mark_dirty(s, 1);
 
        hpfs_unlock(s);
        return 0;
@@ -614,7 +613,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                goto bail4;
        }
 
-       s->s_flags |= MS_NOATIME;
+       s->s_flags |= SB_NOATIME;
 
        /* Fill superblock stuff */
        s->s_magic = HPFS_SUPER_MAGIC;
index 1e76730aac0deb99df8e39165d959063da93225d..8a85f3f53446521991550583a0f106dd7af042c7 100644 (file)
@@ -639,11 +639,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
                /*
-                * page_put due to reference from alloc_huge_page()
                 * unlock_page because locked by add_to_page_cache()
+                * page_put due to reference from alloc_huge_page()
                 */
-               put_page(page);
                unlock_page(page);
+               put_page(page);
        }
 
        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
index fd401028a309e2d260ace4aaba996b8838b13fb1..03102d6ef044d484ee9d7d1c9731437a4418d2c9 100644 (file)
@@ -416,7 +416,7 @@ void inode_add_lru(struct inode *inode)
 {
        if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
                                I_FREEING | I_WILL_FREE)) &&
-           !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
+           !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
                inode_lru_list_add(inode);
 }
 
@@ -595,7 +595,7 @@ static void dispose_list(struct list_head *head)
  * @sb:                superblock to operate on
  *
  * Make sure that no inodes with zero refcount are retained.  This is
- * called by superblock shutdown after having MS_ACTIVE flag removed,
+ * called by superblock shutdown after having SB_ACTIVE flag removed,
  * so any inode reaching zero refcount during or after that call will
  * be immediately evicted.
  */
@@ -1492,7 +1492,7 @@ static void iput_final(struct inode *inode)
        else
                drop = generic_drop_inode(inode);
 
-       if (!drop && (sb->s_flags & MS_ACTIVE)) {
+       if (!drop && (sb->s_flags & SB_ACTIVE)) {
                inode_add_lru(inode);
                spin_unlock(&inode->i_lock);
                return;
@@ -1644,7 +1644,7 @@ int generic_update_time(struct inode *inode, struct timespec *time, int flags)
        if (flags & S_MTIME)
                inode->i_mtime = *time;
 
-       if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
+       if (!(inode->i_sb->s_flags & SB_LAZYTIME) || (flags & S_VERSION))
                iflags |= I_DIRTY_SYNC;
        __mark_inode_dirty(inode, iflags);
        return 0;
@@ -1691,7 +1691,7 @@ bool __atime_needs_update(const struct path *path, struct inode *inode,
 
        if (IS_NOATIME(inode))
                return false;
-       if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+       if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
                return false;
 
        if (mnt->mnt_flags & MNT_NOATIME)
index 447a24d77b894ef733412ba201cadcaa9a226f7e..bc258a4402f6afbc921a508db37f95002f2ff7f0 100644 (file)
@@ -114,7 +114,7 @@ static void destroy_inodecache(void)
 static int isofs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                return -EROFS;
        return 0;
 }
index e96c6b05e43e786ae2dc61bd2cea74fc88631887..d8c274d39ddb986c38cc9482c11de8711b9a8964 100644 (file)
@@ -409,10 +409,10 @@ int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
                mutex_unlock(&c->alloc_sem);
        }
 
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                jffs2_start_garbage_collect_thread(c);
 
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
index 824e61ede465fd6ec4c433785dcdfb2da96dc848..c2fbec19c6167c8ea1d0393e9f3b57d6d851b361 100644 (file)
@@ -59,7 +59,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
 }
 
 
-#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & SB_RDONLY)
 
 #define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
index 153f1c6eb16932a9b87fc9a3ecd2e06b5955c245..f60dee7faf0373f12bdfcf7f3d5f3883ec86b16e 100644 (file)
@@ -301,10 +301,10 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_op = &jffs2_super_operations;
        sb->s_export_op = &jffs2_export_ops;
-       sb->s_flags = sb->s_flags | MS_NOATIME;
+       sb->s_flags = sb->s_flags | SB_NOATIME;
        sb->s_xattr = jffs2_xattr_handlers;
 #ifdef CONFIG_JFFS2_FS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        ret = jffs2_do_fill_super(sb, data, silent);
        return ret;
index 2f7b3af5b8b7aa9fef38db0c464f5a4d013aff7b..90373aebfdca16057bb14bff3097b1581b9d205d 100644 (file)
@@ -87,7 +87,7 @@ static void jfs_handle_error(struct super_block *sb)
        else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
                jfs_err("ERROR: (device %s): remounting filesystem as read-only",
                        sb->s_id);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /* nothing is done for continue beyond marking the superblock dirty */
@@ -477,7 +477,7 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
                        return rc;
        }
 
-       if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+       if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
                /*
                 * Invalidate any previously read metadata.  fsck may have
                 * changed the on-disk data since we mounted r/o
@@ -488,12 +488,12 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
                ret = jfs_mount_rw(sb, 1);
 
                /* mark the fs r/w for quota activity */
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                dquot_resume(sb, -1);
                return ret;
        }
-       if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+       if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
                rc = dquot_suspend(sb, -1);
                if (rc < 0)
                        return rc;
@@ -545,7 +545,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
        sbi->flag = flag;
 
 #ifdef CONFIG_JFS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
 
        if (newLVSize) {
index 95a7c88baed9d32284c073c46e248d670abca1b5..26dd9a50f38382a069c2680260d0a619d0db60e3 100644 (file)
@@ -335,7 +335,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
                        deactivate_locked_super(sb);
                        return ERR_PTR(error);
                }
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
 
                mutex_lock(&kernfs_mutex);
                list_add(&info->node, &root->supers);
index 3aabe553fc4500a864459bb08f520dd820c94420..7ff3cb904acdf8042c7c169dc9816764f0319629 100644 (file)
@@ -246,7 +246,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
        struct inode *root;
        struct qstr d_name = QSTR_INIT(name, strlen(name));
 
-       s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+       s = sget_userns(fs_type, NULL, set_anon_super, SB_KERNMOUNT|SB_NOUSER,
                        &init_user_ns, NULL);
        if (IS_ERR(s))
                return ERR_CAST(s);
@@ -277,7 +277,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
        d_instantiate(dentry, root);
        s->s_root = dentry;
        s->s_d_op = dops;
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
        return dget(s->s_root);
 
 Enomem:
@@ -578,7 +578,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c
        spin_lock(&pin_fs_lock);
        if (unlikely(!*mount)) {
                spin_unlock(&pin_fs_lock);
-               mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL);
+               mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
                if (IS_ERR(mnt))
                        return PTR_ERR(mnt);
                spin_lock(&pin_fs_lock);
index 0d4e590e05498b69a7e93a35b7dcde8d93e56a7d..826a89184f90fc7d104980b15922dfa565146cf1 100644 (file)
@@ -578,8 +578,10 @@ static void nlm_complain_hosts(struct net *net)
 
                if (ln->nrhosts == 0)
                        return;
-               printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net);
-               dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net);
+               pr_warn("lockd: couldn't shutdown host module for net %x!\n",
+                       net->ns.inum);
+               dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts,
+                       net->ns.inum);
        } else {
                if (nrhosts == 0)
                        return;
@@ -590,9 +592,9 @@ static void nlm_complain_hosts(struct net *net)
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
-               dprintk("       %s (cnt %d use %d exp %ld net %p)\n",
+               dprintk("       %s (cnt %d use %d exp %ld net %x)\n",
                        host->h_name, atomic_read(&host->h_count),
-                       host->h_inuse, host->h_expires, host->net);
+                       host->h_inuse, host->h_expires, host->net->ns.inum);
        }
 }
 
@@ -605,7 +607,8 @@ nlm_shutdown_hosts_net(struct net *net)
        mutex_lock(&nlm_host_mutex);
 
        /* First, make all hosts eligible for gc */
-       dprintk("lockd: nuking all hosts in net %p...\n", net);
+       dprintk("lockd: nuking all hosts in net %x...\n",
+               net ? net->ns.inum : 0);
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
@@ -618,9 +621,8 @@ nlm_shutdown_hosts_net(struct net *net)
 
        /* Then, perform a garbage collection pass */
        nlm_gc_hosts(net);
-       mutex_unlock(&nlm_host_mutex);
-
        nlm_complain_hosts(net);
+       mutex_unlock(&nlm_host_mutex);
 }
 
 /*
@@ -646,7 +648,8 @@ nlm_gc_hosts(struct net *net)
        struct hlist_node *next;
        struct nlm_host *host;
 
-       dprintk("lockd: host garbage collection for net %p\n", net);
+       dprintk("lockd: host garbage collection for net %x\n",
+               net ? net->ns.inum : 0);
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
@@ -662,9 +665,10 @@ nlm_gc_hosts(struct net *net)
                if (atomic_read(&host->h_count) || host->h_inuse
                 || time_before(jiffies, host->h_expires)) {
                        dprintk("nlm_gc_hosts skipping %s "
-                               "(cnt %d use %d exp %ld net %p)\n",
+                               "(cnt %d use %d exp %ld net %x)\n",
                                host->h_name, atomic_read(&host->h_count),
-                               host->h_inuse, host->h_expires, host->net);
+                               host->h_inuse, host->h_expires,
+                               host->net->ns.inum);
                        continue;
                }
                nlm_destroy_host_locked(host);
index 9fbbd11f9ecbbcc9e29aa989c33a58fc8ad727ff..96cfb2967ac7571d0ab6c7640198fbd7415deb22 100644 (file)
@@ -110,7 +110,8 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
        clnt = nsm_create(host->net, host->nodename);
        if (IS_ERR(clnt)) {
                dprintk("lockd: failed to create NSM upcall transport, "
-                       "status=%ld, net=%p\n", PTR_ERR(clnt), host->net);
+                       "status=%ld, net=%x\n", PTR_ERR(clnt),
+                       host->net->ns.inum);
                return PTR_ERR(clnt);
        }
 
index a8e3777c94dc6c44ae050168bf7906eecf71d085..9c36d614bf89602121427c27d443365689d31aac 100644 (file)
@@ -57,6 +57,9 @@ static struct task_struct     *nlmsvc_task;
 static struct svc_rqst         *nlmsvc_rqst;
 unsigned long                  nlmsvc_timeout;
 
+atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
+
 unsigned int lockd_net_id;
 
 /*
@@ -259,7 +262,7 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
        if (error < 0)
                goto err_bind;
        set_grace_period(net);
-       dprintk("lockd_up_net: per-net data created; net=%p\n", net);
+       dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum);
        return 0;
 
 err_bind:
@@ -274,12 +277,15 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
        if (ln->nlmsvc_users) {
                if (--ln->nlmsvc_users == 0) {
                        nlm_shutdown_hosts_net(net);
+                       cancel_delayed_work_sync(&ln->grace_period_end);
+                       locks_end_grace(&ln->lockd_manager);
                        svc_shutdown_net(serv, net);
-                       dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
+                       dprintk("%s: per-net data destroyed; net=%x\n",
+                               __func__, net->ns.inum);
                }
        } else {
-               printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
-                               nlmsvc_task, net);
+               pr_err("%s: no users! task=%p, net=%x\n",
+                       __func__, nlmsvc_task, net->ns.inum);
                BUG();
        }
 }
@@ -290,7 +296,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
        struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
        struct sockaddr_in sin;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nlm_ntf_refcnt))
                goto out;
 
        if (nlmsvc_rqst) {
@@ -301,6 +308,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
                svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
                        (struct sockaddr *)&sin);
        }
+       atomic_dec(&nlm_ntf_refcnt);
+       wake_up(&nlm_ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -317,7 +326,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
        struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
        struct sockaddr_in6 sin6;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nlm_ntf_refcnt))
                goto out;
 
        if (nlmsvc_rqst) {
@@ -329,6 +339,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
                svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
                        (struct sockaddr *)&sin6);
        }
+       atomic_dec(&nlm_ntf_refcnt);
+       wake_up(&nlm_ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -345,10 +357,12 @@ static void lockd_unregister_notifiers(void)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
 #endif
+       wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
 }
 
 static void lockd_svc_exit_thread(void)
 {
+       atomic_dec(&nlm_ntf_refcnt);
        lockd_unregister_notifiers();
        svc_exit_thread(nlmsvc_rqst);
 }
@@ -373,6 +387,7 @@ static int lockd_start_svc(struct svc_serv *serv)
                goto out_rqst;
        }
 
+       atomic_inc(&nlm_ntf_refcnt);
        svc_sock_update_bufs(serv);
        serv->sv_maxconn = nlm_max_connections;
 
@@ -676,6 +691,17 @@ static int lockd_init_net(struct net *net)
 
 static void lockd_exit_net(struct net *net)
 {
+       struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+       WARN_ONCE(!list_empty(&ln->lockd_manager.list),
+                 "net %x %s: lockd_manager.list is not empty\n",
+                 net->ns.inum, __func__);
+       WARN_ONCE(!list_empty(&ln->nsm_handles),
+                 "net %x %s: nsm_handles list is not empty\n",
+                 net->ns.inum, __func__);
+       WARN_ONCE(delayed_work_pending(&ln->grace_period_end),
+                 "net %x %s: grace_period_end was not cancelled\n",
+                 net->ns.inum, __func__);
 }
 
 static struct pernet_operations lockd_net_ops = {
index a563ddbc19e6935e91fe42a6946ee937efab482c..4ec3d6e03e76dc0909c25fd90b4c9e2ee0dd33a8 100644 (file)
@@ -370,7 +370,7 @@ nlmsvc_mark_resources(struct net *net)
 {
        struct nlm_host hint;
 
-       dprintk("lockd: nlmsvc_mark_resources for net %p\n", net);
+       dprintk("lockd: %s for net %x\n", __func__, net ? net->ns.inum : 0);
        hint.net = net;
        nlm_traverse_files(&hint, nlmsvc_mark_host, NULL);
 }
index 1bd71c4d663a8c0356c457905293b5e9e35f2c9a..21b4dfa289eea6e6575c78e966efa691eaeff494 100644 (file)
 
 static inline bool is_remote_lock(struct file *filp)
 {
-       return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
+       return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
 }
 
 static bool lease_breaking(struct file_lock *fl)
index d818fd23678700bf8435af2fa64ed62dfdba4d2c..b8b8b9ced9f81c47a1f76da3ef61c2f9d4645c78 100644 (file)
@@ -269,6 +269,9 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
        struct mb_cache *cache = container_of(shrink, struct mb_cache,
                                              c_shrink);
 
+       /* Unlikely, but not impossible */
+       if (unlikely(cache->c_entry_count < 0))
+               return 0;
        return cache->c_entry_count;
 }
 
index b6829d67964324783f182be42be939753f07a7d0..72e308c3e66b91fa9a915ebcb6c31f7f62c432bc 100644 (file)
@@ -125,9 +125,9 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
 
        sync_filesystem(sb);
        ms = sbi->s_ms;
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                if (ms->s_state & MINIX_VALID_FS ||
                    !(sbi->s_mount_state & MINIX_VALID_FS))
                        return 0;
index f0c7a7b9b6ca7562217746369cd8d5d82d43a99f..9cc91fb7f156541bd53243b35c2823bbf9ca1133 100644 (file)
@@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
         * of the daemon to instantiate them before they can be used.
         */
        if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                          LOOKUP_OPEN | LOOKUP_CREATE |
-                          LOOKUP_AUTOMOUNT))) {
-               /* Positive dentry that isn't meant to trigger an
-                * automount, EISDIR will allow it to be used,
-                * otherwise there's no mount here "now" so return
-                * ENOENT.
-                */
-               if (path->dentry->d_inode)
-                       return -EISDIR;
-               else
-                       return -ENOENT;
-       }
+                          LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
 
        if (path->dentry->d_sb->s_user_ns != &init_user_ns)
                return -EACCES;
index e158ec6b527b2d72341e096f76e628b5e61ea4cf..9d1374ab6e06f2cd7b57aedf196c53a745a1a683 100644 (file)
@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                            SB_DIRSYNC |
                            SB_SILENT |
                            SB_POSIXACL |
+                           SB_LAZYTIME |
                            SB_I_VERSION);
 
        if (flags & MS_REMOUNT)
index 129f1937fa2c11527633c98a8840c2aae011fa0e..41de88cdc053fa4c6f8921cd2ee29f1a911642a4 100644 (file)
@@ -103,7 +103,7 @@ static void destroy_inodecache(void)
 static int ncp_remount(struct super_block *sb, int *flags, char* data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return 0;
 }
 
@@ -547,7 +547,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
        else
                default_bufsize = 1024;
 
-       sb->s_flags |= MS_NODIRATIME;   /* probably even noatime */
+       sb->s_flags |= SB_NODIRATIME;   /* probably even noatime */
        sb->s_maxbytes = 0xFFFFFFFFU;
        sb->s_blocksize = 1024; /* Eh...  Is this correct? */
        sb->s_blocksize_bits = 10;
index 0ac2fb1c6b634626cf1f98f4423841ad103d08c2..b9129e2befeaa4186138bbd84e78d7ca2d128370 100644 (file)
@@ -291,12 +291,23 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
        const struct sockaddr *sap = data->addr;
        struct nfs_net *nn = net_generic(data->net, nfs_net_id);
 
+again:
        list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
                const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
                /* Don't match clients that failed to initialise properly */
                if (clp->cl_cons_state < 0)
                        continue;
 
+               /* If a client is still initializing then we need to wait */
+               if (clp->cl_cons_state > NFS_CS_READY) {
+                       refcount_inc(&clp->cl_count);
+                       spin_unlock(&nn->nfs_client_lock);
+                       nfs_wait_client_init_complete(clp);
+                       nfs_put_client(clp);
+                       spin_lock(&nn->nfs_client_lock);
+                       goto again;
+               }
+
                /* Different NFS versions cannot share the same nfs_client */
                if (clp->rpc_ops != data->nfs_mod->rpc_ops)
                        continue;
index e51ae52ed14ff5ef6bc28dd4d9e60d1f33a9fb6c..2f3f86726f5b96cdbdf426a0ee7264b6fd3aa5ae 100644 (file)
@@ -1256,7 +1256,7 @@ static int nfs_dentry_delete(const struct dentry *dentry)
                /* Unhash it, so that ->d_iput() would be called */
                return 1;
        }
-       if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
+       if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
                /* Unhash it, so that ancestors of killed async unlink
                 * files will be cleaned up during umount */
                return 1;
index 38b93d54c02e2d64111e5ff522795badf27fdcf7..b992d2382ffa373d038f84761ab19f9bfaa6a9e9 100644 (file)
@@ -752,7 +752,7 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
         * Note that we only have to check the vfsmount flags here:
         *  - NFS always sets S_NOATIME by so checking it would give a
         *    bogus result
-        *  - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
+        *  - NFS never sets SB_NOATIME or SB_NODIRATIME so there is
         *    no point in checking those.
         */
        if ((path->mnt->mnt_flags & MNT_NOATIME) ||
index 5ab17fd4700a69bff2894a9d382488c5da717411..8357ff69962f22caae59d1ac357058d89800e963 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/nfs_page.h>
 #include <linux/wait_bit.h>
 
-#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
 
 extern const struct export_operations nfs_export_ops;
 
index 12bbab0becb420463bb37d4eefe0c2a9ec56796d..65a7e5da508c3e3019dab617b3ba92521b96cd0b 100644 (file)
@@ -404,15 +404,19 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        if (error < 0)
                goto error;
 
-       if (!nfs4_has_session(clp))
-               nfs_mark_client_ready(clp, NFS_CS_READY);
-
        error = nfs4_discover_server_trunking(clp, &old);
        if (error < 0)
                goto error;
 
-       if (clp != old)
+       if (clp != old) {
                clp->cl_preserve_clid = true;
+               /*
+                * Mark the client as having failed initialization so other
+                * processes walking the nfs_client_list in nfs_match_client()
+                * won't try to use it.
+                */
+               nfs_mark_client_ready(clp, -EPERM);
+       }
        nfs_put_client(clp);
        clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
        return old;
@@ -539,6 +543,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
 
+               if (pos == new)
+                       goto found;
+
                status = nfs4_match_client(pos, new, &prev, nn);
                if (status < 0)
                        goto out_unlock;
@@ -559,6 +566,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
                 * way that a SETCLIENTID_CONFIRM to pos can succeed is
                 * if new and pos point to the same server:
                 */
+found:
                refcount_inc(&pos->cl_count);
                spin_unlock(&nn->nfs_client_lock);
 
@@ -572,6 +580,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
                case 0:
                        nfs4_swap_callback_idents(pos, new);
                        pos->cl_confirm = new->cl_confirm;
+                       nfs_mark_client_ready(pos, NFS_CS_READY);
 
                        prev = NULL;
                        *result = pos;
index 54fd56d715a8c34a20e0dae0054bfee16eb830bf..e4f4a09ed9f49afd5841852ee82647b3dc286e13 100644 (file)
@@ -71,8 +71,8 @@ const nfs4_stateid zero_stateid = {
 };
 const nfs4_stateid invalid_stateid = {
        {
-               .seqid = cpu_to_be32(0xffffffffU),
-               .other = { 0 },
+               /* Funky initialiser keeps older gcc versions happy */
+               .data = { 0xff, 0xff, 0xff, 0xff, 0 },
        },
        .type = NFS4_INVALID_STATEID_TYPE,
 };
index 43cadb28db6ef5a2f4e4a1d98eb6d98b1d9207b3..29bacdc56f6a9fcf83225844360088e180d32ad7 100644 (file)
@@ -813,9 +813,9 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
         */
        seq_printf(m, "\n\topts:\t");
        seq_puts(m, sb_rdonly(root->d_sb) ? "ro" : "rw");
-       seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
-       seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
-       seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_SYNCHRONOUS ? ",sync" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_NOATIME ? ",noatime" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_NODIRATIME ? ",nodiratime" : "");
        nfs_show_mount_options(m, nfss, 1);
 
        seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
@@ -2296,11 +2296,11 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
        /*
         * noac is a special case. It implies -o sync, but that's not
         * necessarily reflected in the mtab options. do_remount_sb
-        * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
+        * will clear SB_SYNCHRONOUS if -o sync wasn't specified in the
         * remount options, so we have to explicitly reset it.
         */
        if (data->flags & NFS_MOUNT_NOAC)
-               *flags |= MS_SYNCHRONOUS;
+               *flags |= SB_SYNCHRONOUS;
 
        /* compare new mount options with old ones */
        error = nfs_compare_remount_data(nfss, data);
@@ -2349,7 +2349,7 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
                /* The VFS shouldn't apply the umask to mode bits. We will do
                 * so ourselves when necessary.
                 */
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
                sb->s_time_gran = 1;
                sb->s_export_op = &nfs_export_ops;
        }
@@ -2379,7 +2379,7 @@ static void nfs_clone_super(struct super_block *sb,
                /* The VFS shouldn't apply the umask to mode bits. We will do
                 * so ourselves when necessary.
                 */
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        }
 
        nfs_initialise_sb(sb);
@@ -2600,11 +2600,11 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
 
        /* -o noac implies -o sync */
        if (server->flags & NFS_MOUNT_NOAC)
-               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+               sb_mntdata.mntflags |= SB_SYNCHRONOUS;
 
        if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
-               if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
-                       sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+               if (mount_info->cloned->sb->s_flags & SB_SYNCHRONOUS)
+                       sb_mntdata.mntflags |= SB_SYNCHRONOUS;
 
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
@@ -2641,7 +2641,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
        if (error)
                goto error_splat_root;
 
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
 
 out:
        return mntroot;
index 5b5f464f6f2ada7cdfd2ea80d95d0c048ccee3f6..4a379d7918f23e1130468c2f58bfea3623035116 100644 (file)
@@ -1890,6 +1890,8 @@ int nfs_commit_inode(struct inode *inode, int how)
        if (res)
                error = nfs_generic_commit_list(inode, &head, how, &cinfo);
        nfs_commit_end(cinfo.mds);
+       if (res == 0)
+               return res;
        if (error < 0)
                goto out_error;
        if (!may_wait)
index 897b299db55e01e291641b35d25f4c903962fec5..5be08f02a76bcb7f405bce8169f53ebf34c452a2 100644 (file)
@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
        struct list_head *grace_list = net_generic(net, grace_net_id);
 
        spin_lock(&grace_lock);
-       list_add(&lm->list, grace_list);
+       if (list_empty(&lm->list))
+               list_add(&lm->list, grace_list);
+       else
+               WARN(1, "double list_add attempt detected in net %x %s\n",
+                    net->ns.inum, (net == &init_net) ? "(init_net)" : "");
        spin_unlock(&grace_lock);
 }
 EXPORT_SYMBOL_GPL(locks_start_grace);
@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
 {
        struct list_head *grace_list = net_generic(net, grace_net_id);
 
-       BUG_ON(!list_empty(grace_list));
+       WARN_ONCE(!list_empty(grace_list),
+                 "net %x %s: grace_list is not empty\n",
+                 net->ns.inum, __func__);
 }
 
 static struct pernet_operations grace_net_ops = {
index 697f8ae7792d1304e3cec035fd56185eb2ee02df..f650e475d8f0d84af1bb3013b6b35ef77421cde2 100644 (file)
@@ -60,6 +60,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
                                gi->gid[i] = exp->ex_anon_gid;
                        else
                                gi->gid[i] = rqgi->gid[i];
+
+                       /* Each thread allocates its own gi, no race */
+                       groups_sort(gi);
                }
        } else {
                gi = get_group_info(rqgi);
index 46b48dbbdd3255260c2b505b49d87b8d2e874100..8ceb25a10ea0df002cae637d137d0f0e0d55a93b 100644 (file)
@@ -232,7 +232,7 @@ static struct cache_head *expkey_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_expkey_cache_template = {
+static const struct cache_detail svc_expkey_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPKEY_HASHMAX,
        .name           = "nfsd.fh",
@@ -748,7 +748,7 @@ static struct cache_head *svc_export_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_export_cache_template = {
+static const struct cache_detail svc_export_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPORT_HASHMAX,
        .name           = "nfsd.export",
@@ -1230,7 +1230,7 @@ nfsd_export_init(struct net *net)
        int rv;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: initializing export module (net: %p).\n", net);
+       dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum);
 
        nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
        if (IS_ERR(nn->svc_export_cache))
@@ -1278,7 +1278,7 @@ nfsd_export_shutdown(struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: shutting down export module (net: %p).\n", net);
+       dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum);
 
        cache_unregister_net(nn->svc_expkey_cache, net);
        cache_unregister_net(nn->svc_export_cache, net);
@@ -1286,5 +1286,5 @@ nfsd_export_shutdown(struct net *net)
        cache_destroy_net(nn->svc_export_cache, net);
        svcauth_unix_purge(net);
 
-       dprintk("nfsd: export shutdown complete (net: %p).\n", net);
+       dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum);
 }
index 1c91391f48055699bb5c9ffbe64ca31d709c7518..36358d435cb044a3a740756ff8e247e77020bfac 100644 (file)
@@ -119,6 +119,9 @@ struct nfsd_net {
        u32 clverifier_counter;
 
        struct svc_serv *nfsd_serv;
+
+       wait_queue_head_t ntf_wq;
+       atomic_t ntf_refcnt;
 };
 
 /* Simple check to find out if a given net was properly initialized */
index 6b9b6cca469f427fed55ec5d892141e38be23eb4..a5bb76593ce72c280ddbd8e5957beddea49ec413 100644 (file)
@@ -178,7 +178,7 @@ static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
 static struct ent *idtoname_update(struct cache_detail *, struct ent *,
                                   struct ent *);
 
-static struct cache_detail idtoname_cache_template = {
+static const struct cache_detail idtoname_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.idtoname",
@@ -341,7 +341,7 @@ static struct ent *nametoid_update(struct cache_detail *, struct ent *,
                                   struct ent *);
 static int         nametoid_parse(struct cache_detail *, char *, int);
 
-static struct cache_detail nametoid_cache_template = {
+static const struct cache_detail nametoid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.nametoid",
index b82817767b9da4ea6e8e3fc0cde8e6f068756ca7..b29b5a185a2cb444f95fce2685381755d05d9429 100644 (file)
@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
 static const stateid_t currentstateid = {
        .si_generation = 1,
 };
+static const stateid_t close_stateid = {
+       .si_generation = 0xffffffffU,
+};
 
 static u64 current_sessionid = 1;
 
 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
+#define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
 
 /* forward declarations */
 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
@@ -83,6 +87,11 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
  */
 static DEFINE_SPINLOCK(state_lock);
 
+enum nfsd4_st_mutex_lock_subclass {
+       OPEN_STATEID_MUTEX = 0,
+       LOCK_STATEID_MUTEX = 1,
+};
+
 /*
  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
  * the refcount on the open stateid to drop.
@@ -3562,7 +3571,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
                /* ignore lock owners */
                if (local->st_stateowner->so_is_open_owner == 0)
                        continue;
-               if (local->st_stateowner == &oo->oo_owner) {
+               if (local->st_stateowner != &oo->oo_owner)
+                       continue;
+               if (local->st_stid.sc_type == NFS4_OPEN_STID) {
                        ret = local;
                        refcount_inc(&ret->st_stid.sc_count);
                        break;
@@ -3571,6 +3582,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
        return ret;
 }
 
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+       __be32 ret = nfs_ok;
+
+       switch (s->sc_type) {
+       default:
+               break;
+       case NFS4_CLOSED_STID:
+       case NFS4_CLOSED_DELEG_STID:
+               ret = nfserr_bad_stateid;
+               break;
+       case NFS4_REVOKED_DELEG_STID:
+               ret = nfserr_deleg_revoked;
+       }
+       return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+       __be32 ret;
+
+       mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
+       ret = nfsd4_verify_open_stid(&stp->st_stid);
+       if (ret != nfs_ok)
+               mutex_unlock(&stp->st_mutex);
+       return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+       struct nfs4_ol_stateid *stp;
+       for (;;) {
+               spin_lock(&fp->fi_lock);
+               stp = nfsd4_find_existing_open(fp, open);
+               spin_unlock(&fp->fi_lock);
+               if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+                       break;
+               nfs4_put_stid(&stp->st_stid);
+       }
+       return stp;
+}
+
 static struct nfs4_openowner *
 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
                           struct nfsd4_compound_state *cstate)
@@ -3613,8 +3670,9 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
        stp = open->op_stp;
        /* We are moving these outside of the spinlocks to avoid the warnings */
        mutex_init(&stp->st_mutex);
-       mutex_lock(&stp->st_mutex);
+       mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
 
+retry:
        spin_lock(&oo->oo_owner.so_client->cl_lock);
        spin_lock(&fp->fi_lock);
 
@@ -3639,7 +3697,11 @@ out_unlock:
        spin_unlock(&fp->fi_lock);
        spin_unlock(&oo->oo_owner.so_client->cl_lock);
        if (retstp) {
-               mutex_lock(&retstp->st_mutex);
+               /* Handle races with CLOSE */
+               if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+                       nfs4_put_stid(&retstp->st_stid);
+                       goto retry;
+               }
                /* To keep mutex tracking happy */
                mutex_unlock(&stp->st_mutex);
                stp = retstp;
@@ -4449,6 +4511,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfs4_ol_stateid *stp = NULL;
        struct nfs4_delegation *dp = NULL;
        __be32 status;
+       bool new_stp = false;
 
        /*
         * Lookup file; if found, lookup stateid and check open request,
@@ -4460,9 +4523,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                status = nfs4_check_deleg(cl, open, &dp);
                if (status)
                        goto out;
-               spin_lock(&fp->fi_lock);
-               stp = nfsd4_find_existing_open(fp, open);
-               spin_unlock(&fp->fi_lock);
+               stp = nfsd4_find_and_lock_existing_open(fp, open);
        } else {
                open->op_file = NULL;
                status = nfserr_bad_stateid;
@@ -4470,35 +4531,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                        goto out;
        }
 
+       if (!stp) {
+               stp = init_open_stateid(fp, open);
+               if (!open->op_stp)
+                       new_stp = true;
+       }
+
        /*
         * OPEN the file, or upgrade an existing OPEN.
         * If truncate fails, the OPEN fails.
+        *
+        * stp is already locked.
         */
-       if (stp) {
+       if (!new_stp) {
                /* Stateid was found, this is an OPEN upgrade */
-               mutex_lock(&stp->st_mutex);
                status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
                if (status) {
                        mutex_unlock(&stp->st_mutex);
                        goto out;
                }
        } else {
-               /* stp is returned locked. */
-               stp = init_open_stateid(fp, open);
-               /* See if we lost the race to some other thread */
-               if (stp->st_access_bmap != 0) {
-                       status = nfs4_upgrade_open(rqstp, fp, current_fh,
-                                               stp, open);
-                       if (status) {
-                               mutex_unlock(&stp->st_mutex);
-                               goto out;
-                       }
-                       goto upgrade_out;
-               }
                status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       mutex_unlock(&stp->st_mutex);
+                       stp->st_stid.sc_type = NFS4_CLOSED_STID;
                        release_open_stateid(stp);
+                       mutex_unlock(&stp->st_mutex);
                        goto out;
                }
 
@@ -4507,7 +4564,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                if (stp->st_clnt_odstate == open->op_odstate)
                        open->op_odstate = NULL;
        }
-upgrade_out:
+
        nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
        mutex_unlock(&stp->st_mutex);
 
@@ -4734,7 +4791,7 @@ nfs4_laundromat(struct nfsd_net *nn)
        spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
-               nbl = list_first_entry(&nn->blocked_locks_lru,
+               nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
                posix_unblock_lock(&nbl->nbl_lock);
@@ -4855,6 +4912,18 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
        return nfserr_old_stateid;
 }
 
+static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
+{
+       __be32 ret;
+
+       spin_lock(&s->sc_lock);
+       ret = nfsd4_verify_open_stid(s);
+       if (ret == nfs_ok)
+               ret = check_stateid_generation(in, &s->sc_stateid, has_session);
+       spin_unlock(&s->sc_lock);
+       return ret;
+}
+
 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
 {
        if (ols->st_stateowner->so_is_open_owner &&
@@ -4868,7 +4937,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
        struct nfs4_stid *s;
        __be32 status = nfserr_bad_stateid;
 
-       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+               CLOSE_STATEID(stateid))
                return status;
        /* Client debugging aid. */
        if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
@@ -4883,7 +4953,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
        s = find_stateid_locked(cl, stateid);
        if (!s)
                goto out_unlock;
-       status = check_stateid_generation(stateid, &s->sc_stateid, 1);
+       status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
        if (status)
                goto out_unlock;
        switch (s->sc_type) {
@@ -4926,7 +4996,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
        else if (typemask & NFS4_DELEG_STID)
                typemask |= NFS4_REVOKED_DELEG_STID;
 
-       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+               CLOSE_STATEID(stateid))
                return nfserr_bad_stateid;
        status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
        if (status == nfserr_stale_clientid) {
@@ -5044,7 +5115,7 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                                &s, nn);
        if (status)
                return status;
-       status = check_stateid_generation(stateid, &s->sc_stateid,
+       status = nfsd4_stid_check_stateid_generation(stateid, s,
                        nfsd4_has_session(cstate));
        if (status)
                goto out;
@@ -5098,7 +5169,9 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
        struct nfs4_ol_stateid *stp = openlockstateid(s);
        __be32 ret;
 
-       mutex_lock(&stp->st_mutex);
+       ret = nfsd4_lock_ol_stateid(stp);
+       if (ret)
+               goto out_put_stid;
 
        ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
        if (ret)
@@ -5109,11 +5182,13 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
                            lockowner(stp->st_stateowner)))
                goto out;
 
+       stp->st_stid.sc_type = NFS4_CLOSED_STID;
        release_lock_stateid(stp);
        ret = nfs_ok;
 
 out:
        mutex_unlock(&stp->st_mutex);
+out_put_stid:
        nfs4_put_stid(s);
        return ret;
 }
@@ -5133,6 +5208,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        s = find_stateid_locked(cl, stateid);
        if (!s)
                goto out_unlock;
+       spin_lock(&s->sc_lock);
        switch (s->sc_type) {
        case NFS4_DELEG_STID:
                ret = nfserr_locks_held;
@@ -5144,11 +5220,13 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                ret = nfserr_locks_held;
                break;
        case NFS4_LOCK_STID:
+               spin_unlock(&s->sc_lock);
                refcount_inc(&s->sc_count);
                spin_unlock(&cl->cl_lock);
                ret = nfsd4_free_lock_stateid(stateid, s);
                goto out;
        case NFS4_REVOKED_DELEG_STID:
+               spin_unlock(&s->sc_lock);
                dp = delegstateid(s);
                list_del_init(&dp->dl_recall_lru);
                spin_unlock(&cl->cl_lock);
@@ -5157,6 +5235,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                goto out;
        /* Default falls through and returns nfserr_bad_stateid */
        }
+       spin_unlock(&s->sc_lock);
 out_unlock:
        spin_unlock(&cl->cl_lock);
 out:
@@ -5179,15 +5258,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
        status = nfsd4_check_seqid(cstate, sop, seqid);
        if (status)
                return status;
-       if (stp->st_stid.sc_type == NFS4_CLOSED_STID
-               || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
-               /*
-                * "Closed" stateid's exist *only* to return
-                * nfserr_replay_me from the previous step, and
-                * revoked delegations are kept only for free_stateid.
-                */
-               return nfserr_bad_stateid;
-       mutex_lock(&stp->st_mutex);
+       status = nfsd4_lock_ol_stateid(stp);
+       if (status != nfs_ok)
+               return status;
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status == nfs_ok)
                status = nfs4_check_fh(current_fh, &stp->st_stid);
@@ -5367,7 +5440,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
        bool unhashed;
        LIST_HEAD(reaplist);
 
-       s->st_stid.sc_type = NFS4_CLOSED_STID;
        spin_lock(&clp->cl_lock);
        unhashed = unhash_open_stateid(s, &reaplist);
 
@@ -5407,10 +5479,17 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        nfsd4_bump_seqid(cstate, status);
        if (status)
                goto out; 
+
+       stp->st_stid.sc_type = NFS4_CLOSED_STID;
        nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-       mutex_unlock(&stp->st_mutex);
 
        nfsd4_close_open_stateid(stp);
+       mutex_unlock(&stp->st_mutex);
+
+       /* See RFC5661 sectionm 18.2.4 */
+       if (stp->st_stid.sc_client->cl_minorversion)
+               memcpy(&close->cl_stateid, &close_stateid,
+                               sizeof(close->cl_stateid));
 
        /* put reference from nfs4_preprocess_seqid_op */
        nfs4_put_stid(&stp->st_stid);
@@ -5436,7 +5515,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                goto out;
        dp = delegstateid(s);
-       status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
+       status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
        if (status)
                goto put_stateid;
 
@@ -5642,14 +5721,41 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
        return ret;
 }
 
-static void
+static struct nfs4_ol_stateid *
+find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
+{
+       struct nfs4_ol_stateid *lst;
+       struct nfs4_client *clp = lo->lo_owner.so_client;
+
+       lockdep_assert_held(&clp->cl_lock);
+
+       list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
+               if (lst->st_stid.sc_type != NFS4_LOCK_STID)
+                       continue;
+               if (lst->st_stid.sc_file == fp) {
+                       refcount_inc(&lst->st_stid.sc_count);
+                       return lst;
+               }
+       }
+       return NULL;
+}
+
+static struct nfs4_ol_stateid *
 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
                  struct nfs4_file *fp, struct inode *inode,
                  struct nfs4_ol_stateid *open_stp)
 {
        struct nfs4_client *clp = lo->lo_owner.so_client;
+       struct nfs4_ol_stateid *retstp;
 
-       lockdep_assert_held(&clp->cl_lock);
+       mutex_init(&stp->st_mutex);
+       mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+retry:
+       spin_lock(&clp->cl_lock);
+       spin_lock(&fp->fi_lock);
+       retstp = find_lock_stateid(lo, fp);
+       if (retstp)
+               goto out_unlock;
 
        refcount_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_LOCK_STID;
@@ -5659,29 +5765,22 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
-       mutex_init(&stp->st_mutex);
        list_add(&stp->st_locks, &open_stp->st_locks);
        list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
-       spin_lock(&fp->fi_lock);
        list_add(&stp->st_perfile, &fp->fi_stateids);
+out_unlock:
        spin_unlock(&fp->fi_lock);
-}
-
-static struct nfs4_ol_stateid *
-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
-{
-       struct nfs4_ol_stateid *lst;
-       struct nfs4_client *clp = lo->lo_owner.so_client;
-
-       lockdep_assert_held(&clp->cl_lock);
-
-       list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
-               if (lst->st_stid.sc_file == fp) {
-                       refcount_inc(&lst->st_stid.sc_count);
-                       return lst;
+       spin_unlock(&clp->cl_lock);
+       if (retstp) {
+               if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+                       nfs4_put_stid(&retstp->st_stid);
+                       goto retry;
                }
+               /* To keep mutex tracking happy */
+               mutex_unlock(&stp->st_mutex);
+               stp = retstp;
        }
-       return NULL;
+       return stp;
 }
 
 static struct nfs4_ol_stateid *
@@ -5694,26 +5793,25 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
        struct nfs4_openowner *oo = openowner(ost->st_stateowner);
        struct nfs4_client *clp = oo->oo_owner.so_client;
 
+       *new = false;
        spin_lock(&clp->cl_lock);
        lst = find_lock_stateid(lo, fi);
-       if (lst == NULL) {
-               spin_unlock(&clp->cl_lock);
-               ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
-               if (ns == NULL)
-                       return NULL;
-
-               spin_lock(&clp->cl_lock);
-               lst = find_lock_stateid(lo, fi);
-               if (likely(!lst)) {
-                       lst = openlockstateid(ns);
-                       init_lock_stateid(lst, lo, fi, inode, ost);
-                       ns = NULL;
-                       *new = true;
-               }
-       }
        spin_unlock(&clp->cl_lock);
-       if (ns)
+       if (lst != NULL) {
+               if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
+                       goto out;
+               nfs4_put_stid(&lst->st_stid);
+       }
+       ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+       if (ns == NULL)
+               return NULL;
+
+       lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
+       if (lst == openlockstateid(ns))
+               *new = true;
+       else
                nfs4_put_stid(ns);
+out:
        return lst;
 }
 
@@ -5750,7 +5848,6 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
        struct nfs4_lockowner *lo;
        struct nfs4_ol_stateid *lst;
        unsigned int strhashval;
-       bool hashed;
 
        lo = find_lockowner_str(cl, &lock->lk_new_owner);
        if (!lo) {
@@ -5766,25 +5863,12 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
                        goto out;
        }
 
-retry:
        lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
        if (lst == NULL) {
                status = nfserr_jukebox;
                goto out;
        }
 
-       mutex_lock(&lst->st_mutex);
-
-       /* See if it's still hashed to avoid race with FREE_STATEID */
-       spin_lock(&cl->cl_lock);
-       hashed = !list_empty(&lst->st_perfile);
-       spin_unlock(&cl->cl_lock);
-
-       if (!hashed) {
-               mutex_unlock(&lst->st_mutex);
-               nfs4_put_stid(&lst->st_stid);
-               goto retry;
-       }
        status = nfs_ok;
        *plst = lst;
 out:
@@ -5990,14 +6074,16 @@ out:
                    seqid_mutating_err(ntohl(status)))
                        lock_sop->lo_owner.so_seqid++;
 
-               mutex_unlock(&lock_stp->st_mutex);
-
                /*
                 * If this is a new, never-before-used stateid, and we are
                 * returning an error, then just go ahead and release it.
                 */
-               if (status && new)
+               if (status && new) {
+                       lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
                        release_lock_stateid(lock_stp);
+               }
+
+               mutex_unlock(&lock_stp->st_mutex);
 
                nfs4_put_stid(&lock_stp->st_stid);
        }
@@ -7017,6 +7103,10 @@ static int nfs4_state_create_net(struct net *net)
                INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
        nn->conf_name_tree = RB_ROOT;
        nn->unconf_name_tree = RB_ROOT;
+       nn->boot_time = get_seconds();
+       nn->grace_ended = false;
+       nn->nfsd4_manager.block_opens = true;
+       INIT_LIST_HEAD(&nn->nfsd4_manager.list);
        INIT_LIST_HEAD(&nn->client_lru);
        INIT_LIST_HEAD(&nn->close_lru);
        INIT_LIST_HEAD(&nn->del_recall_lru);
@@ -7074,9 +7164,6 @@ nfs4_state_start_net(struct net *net)
        ret = nfs4_state_create_net(net);
        if (ret)
                return ret;
-       nn->boot_time = get_seconds();
-       nn->grace_ended = false;
-       nn->nfsd4_manager.block_opens = true;
        locks_start_grace(net, &nn->nfsd4_manager);
        nfsd4_client_tracking_init(net);
        printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
@@ -7153,7 +7240,7 @@ nfs4_state_shutdown_net(struct net *net)
        spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
-               nbl = list_first_entry(&nn->blocked_locks_lru,
+               nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
                posix_unblock_lock(&nbl->nbl_lock);
index 6493df6b1bd5f192646d1f63b2230af840833651..d107b4426f7eb15443188e8538c4b6e8e99d4fa8 100644 (file)
@@ -1241,6 +1241,9 @@ static __net_init int nfsd_init_net(struct net *net)
        nn->nfsd4_grace = 90;
        nn->clverifier_counter = prandom_u32();
        nn->clientid_counter = prandom_u32();
+
+       atomic_set(&nn->ntf_refcnt, 0);
+       init_waitqueue_head(&nn->ntf_wq);
        return 0;
 
 out_idmap_error:
index 33117d4ffce0753e987ff8fc67b03c719b180676..89cb484f1cfbeccde41d872298c7e08aaadd5b87 100644 (file)
@@ -335,7 +335,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct sockaddr_in sin;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nn->ntf_refcnt))
                goto out;
 
        if (nn->nfsd_serv) {
@@ -344,6 +345,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
                sin.sin_addr.s_addr = ifa->ifa_local;
                svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
        }
+       atomic_dec(&nn->ntf_refcnt);
+       wake_up(&nn->ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -363,7 +366,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct sockaddr_in6 sin6;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nn->ntf_refcnt))
                goto out;
 
        if (nn->nfsd_serv) {
@@ -374,7 +378,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
                        sin6.sin6_scope_id = ifa->idev->dev->ifindex;
                svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
        }
-
+       atomic_dec(&nn->ntf_refcnt);
+       wake_up(&nn->ntf_wq);
 out:
        return NOTIFY_DONE;
 }
@@ -391,6 +396,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
+       atomic_dec(&nn->ntf_refcnt);
        /* check if the notifier still has clients */
        if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
                unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -398,6 +404,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
                unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
        }
+       wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
 
        /*
         * write_ports can create the server without actually starting
@@ -517,6 +524,7 @@ int nfsd_create_serv(struct net *net)
                register_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
        }
+       atomic_inc(&nn->ntf_refcnt);
        ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */
        return 0;
 }
index f572538dcc4f533542b79560f738d42d3e2cf478..9f3ffba41533ebf610b0a8649f31892ebab95be3 100644 (file)
@@ -1979,7 +1979,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                                             struct the_nilfs *nilfs)
 {
        struct nilfs_inode_info *ii, *n;
-       int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+       int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
        int defer_iput = false;
 
        spin_lock(&nilfs->ns_inode_lock);
index 3ce20cd44a20960b6487570478570ee453d59b4e..3073b646e1bacf7c33aa8fd458ca8dfd7cb30884 100644 (file)
@@ -141,7 +141,7 @@ void __nilfs_error(struct super_block *sb, const char *function,
 
                if (nilfs_test_opt(nilfs, ERRORS_RO)) {
                        printk(KERN_CRIT "Remounting filesystem read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
        }
 
@@ -869,7 +869,7 @@ int nilfs_store_magic_and_option(struct super_block *sb,
 
        /* FS independent flags */
 #ifdef NILFS_ATIME_DISABLE
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
 #endif
 
        nilfs_set_default_options(sb, sbp);
@@ -1133,7 +1133,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                err = -EINVAL;
                goto restore_opts;
        }
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
 
        err = -EINVAL;
 
@@ -1143,12 +1143,12 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                /* Shutting down log writer */
                nilfs_detach_log_writer(sb);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
                /*
                 * Remounting a valid RW partition RDONLY, so set
@@ -1178,7 +1178,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                        goto restore_opts;
                }
 
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                root = NILFS_I(d_inode(sb->s_root))->i_root;
                err = nilfs_attach_log_writer(sb, root);
@@ -1212,7 +1212,7 @@ static int nilfs_parse_snapshot_option(const char *option,
        const char *msg = NULL;
        int err;
 
-       if (!(sd->flags & MS_RDONLY)) {
+       if (!(sd->flags & SB_RDONLY)) {
                msg = "read-only option is not specified";
                goto parse_error;
        }
@@ -1286,7 +1286,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
        struct dentry *root_dentry;
        int err, s_new = false;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1327,14 +1327,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
                snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
                sb_set_blocksize(s, block_size(sd.bdev));
 
-               err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
+               err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
                if (err)
                        goto failed_super;
 
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
        } else if (!sd.cno) {
                if (nilfs_tree_is_busy(s->s_root)) {
-                       if ((flags ^ s->s_flags) & MS_RDONLY) {
+                       if ((flags ^ s->s_flags) & SB_RDONLY) {
                                nilfs_msg(s, KERN_ERR,
                                          "the device already has a %s mount.",
                                          sb_rdonly(s) ? "read-only" : "read/write");
index afebb5067cec8344a3aac02070b369be0a152b51..1a85317e83f0f751332118daf559fc9ba79fb560 100644 (file)
@@ -220,7 +220,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
 
        if (!valid_fs) {
                nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
-               if (s_flags & MS_RDONLY) {
+               if (s_flags & SB_RDONLY) {
                        nilfs_msg(sb, KERN_INFO,
                                  "recovery required for readonly filesystem");
                        nilfs_msg(sb, KERN_INFO,
@@ -286,7 +286,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
        if (valid_fs)
                goto skip_recovery;
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                __u64 features;
 
                if (nilfs_test_opt(nilfs, NORECOVERY)) {
@@ -309,7 +309,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
                        err = -EROFS;
                        goto failed_unload;
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
        } else if (nilfs_test_opt(nilfs, NORECOVERY)) {
                nilfs_msg(sb, KERN_ERR,
                          "recovery cancelled because norecovery option was specified for a read/write mount");
index 81d8959b6aef9c6595509b35efaede5ef1ebf3b5..219b269c737e6e0d85f05fb142e712c918d34dfc 100644 (file)
@@ -67,7 +67,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
 
                /*
                 * If i_count is zero, the inode cannot have any watches and
-                * doing an __iget/iput with MS_ACTIVE clear would actually
+                * doing an __iget/iput with SB_ACTIVE clear would actually
                 * evict all inodes with zero i_count from icache which is
                 * unnecessarily violent and may in fact be illegal to do.
                 */
index ef243e14b6ebd2d8c57113ced959fb2f2a772a6f..7c6f76d29f5649bfcfa446b51c88ceaffe9e356a 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -255,5 +255,5 @@ void __init nsfs_init(void)
        nsfs_mnt = kern_mount(&nsfs);
        if (IS_ERR(nsfs_mnt))
                panic("can't set nsfs up\n");
-       nsfs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+       nsfs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
 }
index 3f70f041dbe9d7e0f79b5970da1d7b5d14ca584e..bb7159f697f2f3f45713ef26a327b1339f1a30bc 100644 (file)
@@ -473,7 +473,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
 
 #ifndef NTFS_RW
        /* For read-only compiled driver, enforce read-only flag. */
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
 #else /* NTFS_RW */
        /*
         * For the read-write compiled driver, if we are remounting read-write,
@@ -487,7 +487,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
         * When remounting read-only, mark the volume clean if no volume errors
         * have occurred.
         */
-       if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+       if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
                static const char *es = ".  Cannot remount read-write.";
 
                /* Remounting read-write. */
@@ -548,7 +548,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
                        NVolSetErrors(vol);
                        return -EROFS;
                }
-       } else if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+       } else if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
                /* Remounting read-only. */
                if (!NVolErrors(vol)) {
                        if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
@@ -1799,7 +1799,7 @@ static bool load_system_files(ntfs_volume *vol)
                                                es3);
                                goto iput_mirr_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s",
                                        !vol->mftmirr_ino ? es1 : es2, es3);
                } else
@@ -1937,7 +1937,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_vol_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -1974,7 +1974,7 @@ get_ctx_vol_failed:
                                }
                                goto iput_logfile_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2019,7 +2019,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_root_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2042,7 +2042,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                /*
                 * Do not set NVolErrors() because ntfs_remount() might manage
                 * to set the dirty flag in which case all would be well.
@@ -2055,7 +2055,7 @@ get_ctx_vol_failed:
         * If (still) a read-write mount, set the NT4 compatibility flag on
         * newer NTFS version volumes.
         */
-       if (!(sb->s_flags & MS_RDONLY) && (vol->major_ver > 1) &&
+       if (!(sb->s_flags & SB_RDONLY) && (vol->major_ver > 1) &&
                        ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
                static const char *es1 = "Failed to set NT4 compatibility flag";
                static const char *es2 = ".  Run chkdsk.";
@@ -2069,7 +2069,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif
@@ -2087,7 +2087,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif /* NTFS_RW */
@@ -2128,7 +2128,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_quota_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2150,7 +2150,7 @@ get_ctx_vol_failed:
                        goto iput_quota_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
        /*
@@ -2171,7 +2171,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_usnjrnl_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2194,7 +2194,7 @@ get_ctx_vol_failed:
                        goto iput_usnjrnl_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif /* NTFS_RW */
@@ -2728,7 +2728,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
        lockdep_off();
        ntfs_debug("Entering.");
 #ifndef NTFS_RW
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
 #endif /* ! NTFS_RW */
        /* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
        sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
index 8d779227370ab1d121fdc2fc6f546f7844e95a5b..bebe59feca5873a766cd28becd269cc576cdfc6d 100644 (file)
@@ -140,7 +140,7 @@ static void o2net_rx_until_empty(struct work_struct *work);
 static void o2net_shutdown_sc(struct work_struct *work);
 static void o2net_listen_data_ready(struct sock *sk);
 static void o2net_sc_send_keep_req(struct work_struct *work);
-static void o2net_idle_timer(unsigned long data);
+static void o2net_idle_timer(struct timer_list *t);
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
 
@@ -450,8 +450,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
        INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
        INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
 
-       setup_timer(&sc->sc_idle_timeout, o2net_idle_timer,
-                   (unsigned long)sc);
+       timer_setup(&sc->sc_idle_timeout, o2net_idle_timer, 0);
 
        sclog(sc, "alloced\n");
 
@@ -1517,9 +1516,9 @@ static void o2net_sc_send_keep_req(struct work_struct *work)
 /* socket shutdown does a del_timer_sync against this as it tears down.
  * we can't start this timer until we've got to the point in sc buildup
  * where shutdown is going to be involved */
-static void o2net_idle_timer(unsigned long data)
+static void o2net_idle_timer(struct timer_list *t)
 {
-       struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
+       struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 #ifdef CONFIG_DEBUG_FS
        unsigned long msecs = ktime_to_ms(ktime_get()) -
index dc455d45a66aed68bd148cf438081ef3f4b4af02..a1d05105547267df1a9f136cf34f2e090fac99b7 100644 (file)
@@ -227,7 +227,7 @@ int ocfs2_should_update_atime(struct inode *inode,
                return 0;
 
        if ((inode->i_flags & S_NOATIME) ||
-           ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
+           ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
                return 0;
 
        /*
index 040bbb6a6e4b80fd4fa313f7a0b014505343f95d..80efa5699fb0c3db95c3f6490feed61ada1f3f90 100644 (file)
@@ -675,9 +675,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
        }
 
        /* We're going to/from readonly mode. */
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
                /* Disable quota accounting before remounting RO */
-               if (*flags & MS_RDONLY) {
+               if (*flags & SB_RDONLY) {
                        ret = ocfs2_susp_quotas(osb, 0);
                        if (ret < 0)
                                goto out;
@@ -691,8 +691,8 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                        goto unlock_osb;
                }
 
-               if (*flags & MS_RDONLY) {
-                       sb->s_flags |= MS_RDONLY;
+               if (*flags & SB_RDONLY) {
+                       sb->s_flags |= SB_RDONLY;
                        osb->osb_flags |= OCFS2_OSB_SOFT_RO;
                } else {
                        if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
@@ -709,14 +709,14 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                                ret = -EINVAL;
                                goto unlock_osb;
                        }
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
                        osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
                }
                trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
 unlock_osb:
                spin_unlock(&osb->osb_lock);
                /* Enable quota accounting after remounting RW */
-               if (!ret && !(*flags & MS_RDONLY)) {
+               if (!ret && !(*flags & SB_RDONLY)) {
                        if (sb_any_quota_suspended(sb))
                                ret = ocfs2_susp_quotas(osb, 1);
                        else
@@ -724,7 +724,7 @@ unlock_osb:
                        if (ret < 0) {
                                /* Return back changes... */
                                spin_lock(&osb->osb_lock);
-                               sb->s_flags |= MS_RDONLY;
+                               sb->s_flags |= SB_RDONLY;
                                osb->osb_flags |= OCFS2_OSB_SOFT_RO;
                                spin_unlock(&osb->osb_lock);
                                goto out;
@@ -744,9 +744,9 @@ unlock_osb:
                if (!ocfs2_is_hard_readonly(osb))
                        ocfs2_set_journal_params(osb);
 
-               sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+               sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
                        ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
-                                                       MS_POSIXACL : 0);
+                                                       SB_POSIXACL : 0);
        }
 out:
        return ret;
@@ -1057,10 +1057,10 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic = OCFS2_SUPER_MAGIC;
 
-       sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
-               ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~(SB_POSIXACL | SB_NOSEC)) |
+               ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
 
-       /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
+       /* Hard readonly mode only if: bdev_read_only, SB_RDONLY,
         * heartbeat=none */
        if (bdev_read_only(sb->s_bdev)) {
                if (!sb_rdonly(sb)) {
@@ -2057,7 +2057,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
        sb->s_xattr = ocfs2_xattr_handlers;
        sb->s_time_gran = 1;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
        /* this is needed to support O_LARGEFILE */
        cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
        bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
@@ -2568,7 +2568,7 @@ static int ocfs2_handle_error(struct super_block *sb)
                        return rv;
 
                pr_crit("OCFS2: File system is now read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                ocfs2_set_ro_flag(osb, 0);
        }
 
index 5fdf269ba82e393c1bff315b260ec45b7a1787b2..c5898c59d4118d8dbd276f32bdf65e23eff52919 100644 (file)
@@ -901,7 +901,7 @@ static int ocfs2_xattr_list_entry(struct super_block *sb,
 
        case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS:
        case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT:
-               if (!(sb->s_flags & MS_POSIXACL))
+               if (!(sb->s_flags & SB_POSIXACL))
                        return 0;
                break;
 
index 13215f26e321902fde7cd0143763146f93df82ab..2200662a9bf186ae54dfc92573cfe00f589c1858 100644 (file)
@@ -369,7 +369,7 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
 static int openprom_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
@@ -386,7 +386,7 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
        struct op_inode_info *oi;
        int ret;
 
-       s->s_flags |= MS_NOATIME;
+       s->s_flags |= SB_NOATIME;
        s->s_blocksize = 1024;
        s->s_blocksize_bits = 10;
        s->s_magic = OPENPROM_SUPER_MAGIC;
index 366750eef2019201146b5fe3a21f2f3a2bfa4da4..36f1390b5ed7d6d324471ca145c529cda5477555 100644 (file)
@@ -40,7 +40,7 @@ static int orangefs_show_options(struct seq_file *m, struct dentry *root)
 {
        struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(root->d_sb);
 
-       if (root->d_sb->s_flags & MS_POSIXACL)
+       if (root->d_sb->s_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
        if (orangefs_sb->flags & ORANGEFS_OPT_INTR)
                seq_puts(m, ",intr");
@@ -60,7 +60,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
         * Force any potential flags that might be set from the mount
         * to zero, ie, initialize to unset.
         */
-       sb->s_flags &= ~MS_POSIXACL;
+       sb->s_flags &= ~SB_POSIXACL;
        orangefs_sb->flags &= ~ORANGEFS_OPT_INTR;
        orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
 
@@ -73,7 +73,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
                token = match_token(p, tokens, args);
                switch (token) {
                case Opt_acl:
-                       sb->s_flags |= MS_POSIXACL;
+                       sb->s_flags |= SB_POSIXACL;
                        break;
                case Opt_intr:
                        orangefs_sb->flags |= ORANGEFS_OPT_INTR;
@@ -507,7 +507,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
 
        ret = orangefs_fill_sb(sb,
              &new_op->downcall.resp.fs_mount, data,
-             flags & MS_SILENT ? 1 : 0);
+             flags & SB_SILENT ? 1 : 0);
 
        if (ret) {
                d = ERR_PTR(ret);
index cbfc196e5dc53b2cb2376524c5955af20bd07ea6..5ac4154668613d158d63301272039ee5f5b814e4 100644 (file)
@@ -24,6 +24,16 @@ config OVERLAY_FS_REDIRECT_DIR
          an overlay which has redirects on a kernel that doesn't support this
          feature will have unexpected results.
 
+config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
+       bool "Overlayfs: follow redirects even if redirects are turned off"
+       default y
+       depends on OVERLAY_FS
+       help
+         Disable this to get a possibly more secure configuration, but that
+         might not be backward compatible with previous kernels.
+
+         For more information, see Documentation/filesystems/overlayfs.txt
+
 config OVERLAY_FS_INDEX
        bool "Overlayfs: turn on inodes index feature by default"
        depends on OVERLAY_FS
index e13921824c70ce8061e545f21e408eda478d7644..f9788bc116a8d1b5137f805d9955995e68b31884 100644 (file)
@@ -887,7 +887,8 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
                spin_unlock(&dentry->d_lock);
        } else {
                kfree(redirect);
-               pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+               pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n",
+                                   err);
                /* Fall back to userspace copy-up */
                err = -EXDEV;
        }
index 625ed8066570607b6140a0c22c39f152135a5c81..beb945e1963c0aac86fbce312ca9814821a1c33d 100644 (file)
@@ -435,7 +435,7 @@ int ovl_verify_index(struct dentry *index, struct ovl_path *lower,
 
        /* Check if index is orphan and don't warn before cleaning it */
        if (d_inode(index)->i_nlink == 1 &&
-           ovl_get_nlink(index, origin.dentry, 0) == 0)
+           ovl_get_nlink(origin.dentry, index, 0) == 0)
                err = -ENOENT;
 
        dput(origin.dentry);
@@ -681,6 +681,22 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                if (d.stop)
                        break;
 
+               /*
+                * Following redirects can have security consequences: it's like
+                * a symlink into the lower layer without the permission checks.
+                * This is only a problem if the upper layer is untrusted (e.g
+                * comes from an USB drive).  This can allow a non-readable file
+                * or directory to become readable.
+                *
+                * Only following redirects when redirects are enabled disables
+                * this attack vector when not necessary.
+                */
+               err = -EPERM;
+               if (d.redirect && !ofs->config.redirect_follow) {
+                       pr_warn_ratelimited("overlay: refusing to follow redirect for (%pd2)\n", dentry);
+                       goto out_put;
+               }
+
                if (d.redirect && d.redirect[0] == '/' && poe != roe) {
                        poe = roe;
 
index 13eab09a6b6f33c04c90ad822aa8a0dbe12a4927..b489099ccd493a54e231c9ee7e3ebe18809398b5 100644 (file)
@@ -180,7 +180,7 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
 static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
 {
        struct dentry *ret = vfs_tmpfile(dentry, mode, 0);
-       int err = IS_ERR(ret) ? PTR_ERR(ret) : 0;
+       int err = PTR_ERR_OR_ZERO(ret);
 
        pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err);
        return ret;
index 752bab645879e5fce43e86d45e835d94d3c44b22..9d0bc03bf6e4563ef280fe5e8f695cc9f1e8abd5 100644 (file)
@@ -14,6 +14,8 @@ struct ovl_config {
        char *workdir;
        bool default_permissions;
        bool redirect_dir;
+       bool redirect_follow;
+       const char *redirect_mode;
        bool index;
 };
 
index 0daa4354fec4ae967da4ae43e81f7f833cd1095c..8c98578d27a1496922d1b862fa90af61da3dd290 100644 (file)
@@ -499,7 +499,7 @@ out:
        return err;
 
 fail:
-       pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n",
+       pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
                            p->name, err);
        goto out;
 }
@@ -663,7 +663,10 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
                        return PTR_ERR(rdt.cache);
        }
 
-       return iterate_dir(od->realfile, &rdt.ctx);
+       err = iterate_dir(od->realfile, &rdt.ctx);
+       ctx->pos = rdt.ctx.pos;
+
+       return err;
 }
 
 
index be03578181d211ac03c18aad58ad469d0d6c2618..76440feb79f64ee0fce36713ed4fe4264ff70f11 100644 (file)
@@ -33,6 +33,13 @@ module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
 MODULE_PARM_DESC(ovl_redirect_dir_def,
                 "Default to on or off for the redirect_dir feature");
 
+static bool ovl_redirect_always_follow =
+       IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW);
+module_param_named(redirect_always_follow, ovl_redirect_always_follow,
+                  bool, 0644);
+MODULE_PARM_DESC(ovl_redirect_always_follow,
+                "Follow redirects even if redirect_dir feature is turned off");
+
 static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX);
 module_param_named(index, ovl_index_def, bool, 0644);
 MODULE_PARM_DESC(ovl_index_def,
@@ -232,6 +239,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
        kfree(ofs->config.lowerdir);
        kfree(ofs->config.upperdir);
        kfree(ofs->config.workdir);
+       kfree(ofs->config.redirect_mode);
        if (ofs->creator_cred)
                put_cred(ofs->creator_cred);
        kfree(ofs);
@@ -244,6 +252,7 @@ static void ovl_put_super(struct super_block *sb)
        ovl_free_fs(ofs);
 }
 
+/* Sync real dirty inodes in upper filesystem (if it exists) */
 static int ovl_sync_fs(struct super_block *sb, int wait)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
@@ -252,14 +261,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
 
        if (!ofs->upper_mnt)
                return 0;
-       upper_sb = ofs->upper_mnt->mnt_sb;
-       if (!upper_sb->s_op->sync_fs)
+
+       /*
+        * If this is a sync(2) call or an emergency sync, all the super blocks
+        * will be iterated, including upper_sb, so no need to do anything.
+        *
+        * If this is a syncfs(2) call, then we do need to call
+        * sync_filesystem() on upper_sb, but enough if we do it when being
+        * called with wait == 1.
+        */
+       if (!wait)
                return 0;
 
-       /* real inodes have already been synced by sync_filesystem(ovl_sb) */
+       upper_sb = ofs->upper_mnt->mnt_sb;
+
        down_read(&upper_sb->s_umount);
-       ret = upper_sb->s_op->sync_fs(upper_sb, wait);
+       ret = sync_filesystem(upper_sb);
        up_read(&upper_sb->s_umount);
+
        return ret;
 }
 
@@ -295,6 +314,11 @@ static bool ovl_force_readonly(struct ovl_fs *ofs)
        return (!ofs->upper_mnt || !ofs->workdir);
 }
 
+static const char *ovl_redirect_mode_def(void)
+{
+       return ovl_redirect_dir_def ? "on" : "off";
+}
+
 /**
  * ovl_show_options
  *
@@ -313,12 +337,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
        }
        if (ofs->config.default_permissions)
                seq_puts(m, ",default_permissions");
-       if (ofs->config.redirect_dir != ovl_redirect_dir_def)
-               seq_printf(m, ",redirect_dir=%s",
-                          ofs->config.redirect_dir ? "on" : "off");
+       if (strcmp(ofs->config.redirect_mode, ovl_redirect_mode_def()) != 0)
+               seq_printf(m, ",redirect_dir=%s", ofs->config.redirect_mode);
        if (ofs->config.index != ovl_index_def)
-               seq_printf(m, ",index=%s",
-                          ofs->config.index ? "on" : "off");
+               seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
        return 0;
 }
 
@@ -326,7 +348,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
 
-       if (!(*flags & MS_RDONLY) && ovl_force_readonly(ofs))
+       if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs))
                return -EROFS;
 
        return 0;
@@ -348,8 +370,7 @@ enum {
        OPT_UPPERDIR,
        OPT_WORKDIR,
        OPT_DEFAULT_PERMISSIONS,
-       OPT_REDIRECT_DIR_ON,
-       OPT_REDIRECT_DIR_OFF,
+       OPT_REDIRECT_DIR,
        OPT_INDEX_ON,
        OPT_INDEX_OFF,
        OPT_ERR,
@@ -360,8 +381,7 @@ static const match_table_t ovl_tokens = {
        {OPT_UPPERDIR,                  "upperdir=%s"},
        {OPT_WORKDIR,                   "workdir=%s"},
        {OPT_DEFAULT_PERMISSIONS,       "default_permissions"},
-       {OPT_REDIRECT_DIR_ON,           "redirect_dir=on"},
-       {OPT_REDIRECT_DIR_OFF,          "redirect_dir=off"},
+       {OPT_REDIRECT_DIR,              "redirect_dir=%s"},
        {OPT_INDEX_ON,                  "index=on"},
        {OPT_INDEX_OFF,                 "index=off"},
        {OPT_ERR,                       NULL}
@@ -390,10 +410,37 @@ static char *ovl_next_opt(char **s)
        return sbegin;
 }
 
+static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
+{
+       if (strcmp(mode, "on") == 0) {
+               config->redirect_dir = true;
+               /*
+                * Does not make sense to have redirect creation without
+                * redirect following.
+                */
+               config->redirect_follow = true;
+       } else if (strcmp(mode, "follow") == 0) {
+               config->redirect_follow = true;
+       } else if (strcmp(mode, "off") == 0) {
+               if (ovl_redirect_always_follow)
+                       config->redirect_follow = true;
+       } else if (strcmp(mode, "nofollow") != 0) {
+               pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n",
+                      mode);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int ovl_parse_opt(char *opt, struct ovl_config *config)
 {
        char *p;
 
+       config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
+       if (!config->redirect_mode)
+               return -ENOMEM;
+
        while ((p = ovl_next_opt(&opt)) != NULL) {
                int token;
                substring_t args[MAX_OPT_ARGS];
@@ -428,12 +475,11 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                        config->default_permissions = true;
                        break;
 
-               case OPT_REDIRECT_DIR_ON:
-                       config->redirect_dir = true;
-                       break;
-
-               case OPT_REDIRECT_DIR_OFF:
-                       config->redirect_dir = false;
+               case OPT_REDIRECT_DIR:
+                       kfree(config->redirect_mode);
+                       config->redirect_mode = match_strdup(&args[0]);
+                       if (!config->redirect_mode)
+                               return -ENOMEM;
                        break;
 
                case OPT_INDEX_ON:
@@ -458,7 +504,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                config->workdir = NULL;
        }
 
-       return 0;
+       return ovl_parse_redirect_mode(config, config->redirect_mode);
 }
 
 #define OVL_WORKDIR_NAME "work"
@@ -1160,7 +1206,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (!cred)
                goto out_err;
 
-       ofs->config.redirect_dir = ovl_redirect_dir_def;
        ofs->config.index = ovl_index_def;
        err = ovl_parse_opt((char *) data, &ofs->config);
        if (err)
@@ -1190,7 +1235,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_err;
 
                if (!ofs->workdir)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
 
                sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth;
                sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran;
@@ -1203,7 +1248,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        /* If the upper fs is nonexistent, we mark overlayfs r/o too */
        if (!ofs->upper_mnt)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        else if (ofs->upper_mnt->mnt_sb != ofs->same_sb)
                ofs->same_sb = NULL;
 
@@ -1213,7 +1258,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_free_oe;
 
                if (!ofs->indexdir)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
        }
 
        /* Show index=off/on in /proc/mounts for any of the reasons above */
@@ -1227,7 +1272,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_op = &ovl_super_operations;
        sb->s_xattr = ovl_xattr_handlers;
        sb->s_fs_info = ofs;
-       sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
+       sb->s_flags |= SB_POSIXACL | SB_NOREMOTELOCK;
 
        err = -ENOMEM;
        root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
index 31934cb9dfc8864fd0b2ac6f1e77f8a6b55c99f7..60316b52d6591459d4c25bc0d434c4bfea3d2fef 100644 (file)
@@ -443,8 +443,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
                save_stack_trace_tsk(task, &trace);
 
                for (i = 0; i < trace.nr_entries; i++) {
-                       seq_printf(m, "[<%pK>] %pB\n",
-                                  (void *)entries[i], (void *)entries[i]);
+                       seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
                }
                unlock_trace(task);
        }
@@ -2269,7 +2268,7 @@ static int show_timer(struct seq_file *m, void *v)
        notify = timer->it_sigev_notify;
 
        seq_printf(m, "ID: %d\n", timer->it_id);
-       seq_printf(m, "signal: %d/%p\n",
+       seq_printf(m, "signal: %d/%px\n",
                   timer->sigq->info.si_signo,
                   timer->sigq->info.si_value.sival_ptr);
        seq_printf(m, "notify: %s/%s.%d\n",
index 225f541f7078c937c6a4c25264069700025a0582..dd0f826224274bd20acbe6944dc7fb6a3709329a 100644 (file)
@@ -483,7 +483,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
 
        /* User space would break if executables or devices appear on proc */
        s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
-       s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
+       s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
        s->s_blocksize = 1024;
        s->s_blocksize_bits = 10;
        s->s_magic = PROC_SUPER_MAGIC;
index 4e42aba97f2e3ed694aa5df146ecbf949c0b3f68..ede8e64974be240368d11ab47f3227f72a016e23 100644 (file)
@@ -91,7 +91,7 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
 {
        struct pid_namespace *ns;
 
-       if (flags & MS_KERNMOUNT) {
+       if (flags & SB_KERNMOUNT) {
                ns = data;
                data = NULL;
        } else {
index 7b635d17321377e4868554a6ad338a1bd413b3cc..b786840facd96e8dbb3cb0ce76125d915bc0c916 100644 (file)
@@ -45,10 +45,10 @@ struct proc_fs_info {
 static int show_sb_opts(struct seq_file *m, struct super_block *sb)
 {
        static const struct proc_fs_info fs_info[] = {
-               { MS_SYNCHRONOUS, ",sync" },
-               { MS_DIRSYNC, ",dirsync" },
-               { MS_MANDLOCK, ",mand" },
-               { MS_LAZYTIME, ",lazytime" },
+               { SB_SYNCHRONOUS, ",sync" },
+               { SB_DIRSYNC, ",dirsync" },
+               { SB_MANDLOCK, ",mand" },
+               { SB_LAZYTIME, ",lazytime" },
                { 0, NULL }
        };
        const struct proc_fs_info *fs_infop;
index 423159abd50182812656c6f73e421da272187670..691032107f8c78776668a93ca4c546dc2a46282a 100644 (file)
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
 
 static int pstore_new_entry;
 
-static void pstore_timefunc(unsigned long);
+static void pstore_timefunc(struct timer_list *);
 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
 
 static void pstore_dowork(struct work_struct *);
@@ -890,7 +890,7 @@ static void pstore_dowork(struct work_struct *work)
        pstore_get_records(1);
 }
 
-static void pstore_timefunc(unsigned long dummy)
+static void pstore_timefunc(struct timer_list *unused)
 {
        if (pstore_new_entry) {
                pstore_new_entry = 0;
index 3a67cfb142d886c11558409c6711260ecf9dab70..3d46fe302fcb15372c40461a64107e02c29ee97d 100644 (file)
@@ -47,7 +47,7 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data)
        sync_filesystem(sb);
        qs = qnx4_sb(sb);
        qs->Version = QNX4_VERSION;
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -199,7 +199,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
 
        s->s_op = &qnx4_sops;
        s->s_magic = QNX4_SUPER_MAGIC;
-       s->s_flags |= MS_RDONLY;        /* Yup, read-only yet */
+       s->s_flags |= SB_RDONLY;        /* Yup, read-only yet */
 
        /* Check the superblock signature. Since the qnx4 code is
           dangerous, we should leave as quickly as possible
index 1192422a1c5628e5782961252e2a1bdab58237c7..4aeb26bcb4d029695226b15da569f8610b8f1585 100644 (file)
@@ -56,7 +56,7 @@ static int qnx6_show_options(struct seq_file *seq, struct dentry *root)
 static int qnx6_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -427,7 +427,7 @@ mmi_success:
        }
        s->s_op = &qnx6_sops;
        s->s_magic = QNX6_SUPER_MAGIC;
-       s->s_flags |= MS_RDONLY;        /* Yup, read-only yet */
+       s->s_flags |= SB_RDONLY;        /* Yup, read-only yet */
 
        /* ease the later tree level calculations */
        sbi = QNX6_SB(s);
index 39f1b0b0c76fbb24cec8b2388cbcfc11dfb0ae4b..020c597ef9b6e66a74f786d70302238d32dc729a 100644 (file)
@@ -941,12 +941,13 @@ static int dqinit_needed(struct inode *inode, int type)
 }
 
 /* This routine is guarded by s_umount semaphore */
-static void add_dquot_ref(struct super_block *sb, int type)
+static int add_dquot_ref(struct super_block *sb, int type)
 {
        struct inode *inode, *old_inode = NULL;
 #ifdef CONFIG_QUOTA_DEBUG
        int reserved = 0;
 #endif
+       int err = 0;
 
        spin_lock(&sb->s_inode_list_lock);
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
@@ -966,7 +967,11 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        reserved = 1;
 #endif
                iput(old_inode);
-               __dquot_initialize(inode, type);
+               err = __dquot_initialize(inode, type);
+               if (err) {
+                       iput(inode);
+                       goto out;
+               }
 
                /*
                 * We hold a reference to 'inode' so it couldn't have been
@@ -981,7 +986,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
        }
        spin_unlock(&sb->s_inode_list_lock);
        iput(old_inode);
-
+out:
 #ifdef CONFIG_QUOTA_DEBUG
        if (reserved) {
                quota_error(sb, "Writes happened before quota was turned on "
@@ -989,6 +994,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        "Please run quotacheck(8)");
        }
 #endif
+       return err;
 }
 
 /*
@@ -2379,10 +2385,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        dqopt->flags |= dquot_state_flag(flags, type);
        spin_unlock(&dq_state_lock);
 
-       add_dquot_ref(sb, type);
-
-       return 0;
+       error = add_dquot_ref(sb, type);
+       if (error)
+               dquot_disable(sb, type, flags);
 
+       return error;
 out_file_init:
        dqopt->files[type] = NULL;
        iput(inode);
@@ -2985,7 +2992,8 @@ static int __init dquot_init(void)
        pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
                " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
 
-       register_shrinker(&dqcache_shrinker);
+       if (register_shrinker(&dqcache_shrinker))
+               panic("Cannot register dquot shrinker");
 
        return 0;
 }
index 11a48affa882415376e279aabe0e0df7b81c49fd..b13fc024d2eed8b3201ae1c6dda226a0031d1e8d 100644 (file)
@@ -2106,7 +2106,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                        journal_end(th);
                        goto out_inserted_sd;
                }
-       } else if (inode->i_sb->s_flags & MS_POSIXACL) {
+       } else if (inode->i_sb->s_flags & SB_POSIXACL) {
                reiserfs_warning(inode->i_sb, "jdm-13090",
                                 "ACLs aren't enabled in the fs, "
                                 "but vfs thinks they are!");
index 69ff280bdfe889a77afac56a24ed6d51ad668aad..70057359fbaf3b7d98c8109dd0bcba1fe112b5cb 100644 (file)
@@ -1960,7 +1960,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
        /*
         * Cancel flushing of old commits. Note that neither of these works
         * will be requeued because superblock is being shutdown and doesn't
-        * have MS_ACTIVE set.
+        * have SB_ACTIVE set.
         */
        reiserfs_cancel_old_flush(sb);
        /* wait for all commits to finish */
@@ -4302,7 +4302,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
                 * Avoid queueing work when sb is being shut down. Transaction
                 * will be flushed on journal shutdown.
                 */
-               if (sb->s_flags & MS_ACTIVE)
+               if (sb->s_flags & SB_ACTIVE)
                        queue_delayed_work(REISERFS_SB(sb)->commit_wq,
                                           &journal->j_work, HZ / 10);
        }
@@ -4393,7 +4393,7 @@ void reiserfs_abort_journal(struct super_block *sb, int errno)
        if (!journal->j_errno)
                journal->j_errno = errno;
 
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        set_bit(J_ABORTED, &journal->j_state);
 
 #ifdef CONFIG_REISERFS_CHECK
index 64f49cafbc5bff7b7c34786e56851408f4f20b55..7e288d97adcbb7504f2c3c2953ca24debd770b01 100644 (file)
@@ -390,7 +390,7 @@ void __reiserfs_error(struct super_block *sb, const char *id,
                return;
 
        reiserfs_info(sb, "Remounting filesystem read-only\n");
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        reiserfs_abort_journal(sb, -EIO);
 }
 
@@ -409,7 +409,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
        printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
               error_buf);
 
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        reiserfs_abort_journal(sb, errno);
 }
 
index 5464ec517702f1bb18e0d3323ef57564ef271b53..1fc934d244592e2df6ee902e8606f39cd313055d 100644 (file)
@@ -121,7 +121,7 @@ void reiserfs_schedule_old_flush(struct super_block *s)
         * Avoid scheduling flush when sb is being shut down. It can race
         * with journal shutdown and free still queued delayed work.
         */
-       if (sb_rdonly(s) || !(s->s_flags & MS_ACTIVE))
+       if (sb_rdonly(s) || !(s->s_flags & SB_ACTIVE))
                return;
 
        spin_lock(&sbi->old_work_lock);
@@ -252,11 +252,11 @@ static int finish_unfinished(struct super_block *s)
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       if (s->s_flags & MS_ACTIVE) {
+       if (s->s_flags & SB_ACTIVE) {
                ms_active_set = 0;
        } else {
                ms_active_set = 1;
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
        }
        /* Turn on quotas so that they are updated correctly */
        for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
@@ -411,7 +411,7 @@ static int finish_unfinished(struct super_block *s)
        reiserfs_write_lock(s);
        if (ms_active_set)
                /* Restore the flag back */
-               s->s_flags &= ~MS_ACTIVE;
+               s->s_flags &= ~SB_ACTIVE;
 #endif
        pathrelse(&path);
        if (done)
@@ -1521,7 +1521,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                        goto out_err_unlock;
        }
 
-       if (*mount_flags & MS_RDONLY) {
+       if (*mount_flags & SB_RDONLY) {
                reiserfs_write_unlock(s);
                reiserfs_xattr_init(s, *mount_flags);
                /* remount read-only */
@@ -1567,7 +1567,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
 
                /* now it is safe to call journal_begin */
-               s->s_flags &= ~MS_RDONLY;
+               s->s_flags &= ~SB_RDONLY;
                err = journal_begin(&th, s, 10);
                if (err)
                        goto out_err_unlock;
@@ -1575,7 +1575,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                /* Mount a partition which is read-only, read-write */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
                REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
-               s->s_flags &= ~MS_RDONLY;
+               s->s_flags &= ~SB_RDONLY;
                set_sb_umount_state(rs, REISERFS_ERROR_FS);
                if (!old_format_only(s))
                        set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
@@ -1590,7 +1590,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                goto out_err_unlock;
 
        reiserfs_write_unlock(s);
-       if (!(*mount_flags & MS_RDONLY)) {
+       if (!(*mount_flags & SB_RDONLY)) {
                dquot_resume(s, -1);
                reiserfs_write_lock(s);
                finish_unfinished(s);
@@ -2055,7 +2055,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
                SWARN(silent, s, "clm-7000",
                      "Detected readonly device, marking FS readonly");
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
        }
        args.objectid = REISERFS_ROOT_OBJECTID;
        args.dirid = REISERFS_ROOT_PARENT_OBJECTID;
@@ -2591,7 +2591,6 @@ out:
                return err;
        if (inode->i_size < off + len - towrite)
                i_size_write(inode, off + len - towrite);
-       inode->i_version++;
        inode->i_mtime = inode->i_ctime = current_time(inode);
        mark_inode_dirty(inode);
        return len - towrite;
index 46492fb37a4c6a44194f171fdab768acbbd17f06..5dbf5324bdda53377e57d38661ec3a835256c85f 100644 (file)
@@ -959,7 +959,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
 
 /*
  * We need to take a copy of the mount flags since things like
- * MS_RDONLY don't get set until *after* we're called.
+ * SB_RDONLY don't get set until *after* we're called.
  * mount_flags != mount_options
  */
 int reiserfs_xattr_init(struct super_block *s, int mount_flags)
@@ -971,7 +971,7 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
        if (err)
                goto error;
 
-       if (d_really_is_negative(privroot) && !(mount_flags & MS_RDONLY)) {
+       if (d_really_is_negative(privroot) && !(mount_flags & SB_RDONLY)) {
                inode_lock(d_inode(s->s_root));
                err = create_privroot(REISERFS_SB(s)->priv_root);
                inode_unlock(d_inode(s->s_root));
@@ -999,11 +999,11 @@ error:
                clear_bit(REISERFS_POSIXACL, &REISERFS_SB(s)->s_mount_opt);
        }
 
-       /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
+       /* The super_block SB_POSIXACL must mirror the (no)acl mount option. */
        if (reiserfs_posixacl(s))
-               s->s_flags |= MS_POSIXACL;
+               s->s_flags |= SB_POSIXACL;
        else
-               s->s_flags &= ~MS_POSIXACL;
+               s->s_flags &= ~SB_POSIXACL;
 
        return err;
 }
index 0186fe6d39f3b4d2e77497d4d34a7691204ae9fa..8f06fd1f3d692426a38011a77e5f6f8be0e1ad8f 100644 (file)
@@ -451,7 +451,7 @@ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int romfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -502,7 +502,7 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_maxbytes = 0xFFFFFFFF;
        sb->s_magic = ROMFS_MAGIC;
-       sb->s_flags |= MS_RDONLY | MS_NOATIME;
+       sb->s_flags |= SB_RDONLY | SB_NOATIME;
        sb->s_op = &romfs_super_ops;
 
 #ifdef CONFIG_ROMFS_ON_MTD
index cf01e15a7b16dff288e2479014d20e0d787096d5..8a73b97217c8a5fe24f0e30047354fe058644018 100644 (file)
@@ -195,7 +195,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
                (u64) le64_to_cpu(sblk->id_table_start));
 
        sb->s_maxbytes = MAX_LFS_FILESIZE;
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        sb->s_op = &squashfs_super_ops;
 
        err = -ENOMEM;
@@ -373,7 +373,7 @@ static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int squashfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
index b072a8bab71a1464d3d06d9017a2a33d441e0ef9..5b2a24f0f263b65ad62de722ffe06d28b7bb8102 100644 (file)
@@ -35,11 +35,11 @@ static int flags_by_mnt(int mnt_flags)
 static int flags_by_sb(int s_flags)
 {
        int flags = 0;
-       if (s_flags & MS_SYNCHRONOUS)
+       if (s_flags & SB_SYNCHRONOUS)
                flags |= ST_SYNCHRONOUS;
-       if (s_flags & MS_MANDLOCK)
+       if (s_flags & SB_MANDLOCK)
                flags |= ST_MANDLOCK;
-       if (s_flags & MS_RDONLY)
+       if (s_flags & SB_RDONLY)
                flags |= ST_RDONLY;
        return flags;
 }
index d4e33e8f1e6fee3172e0e07e9d358587eea34bc4..7ff1349609e4874a35268876065490d77f5e01ab 100644 (file)
@@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
 
        INIT_LIST_HEAD(&s->s_mounts);
        s->s_user_ns = get_user_ns(user_ns);
+       init_rwsem(&s->s_umount);
+       lockdep_set_class(&s->s_umount, &type->s_umount_key);
+       /*
+        * sget() can have s_umount recursion.
+        *
+        * When it cannot find a suitable sb, it allocates a new
+        * one (this one), and tries again to find a suitable old
+        * one.
+        *
+        * In case that succeeds, it will acquire the s_umount
+        * lock of the old one. Since these are clearly distrinct
+        * locks, and this object isn't exposed yet, there's no
+        * risk of deadlocks.
+        *
+        * Annotate this by putting this lock in a different
+        * subclass.
+        */
+       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
 
        if (security_sb_alloc(s))
                goto fail;
@@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
                goto fail;
        if (list_lru_init_memcg(&s->s_inode_lru))
                goto fail;
-
-       init_rwsem(&s->s_umount);
-       lockdep_set_class(&s->s_umount, &type->s_umount_key);
-       /*
-        * sget() can have s_umount recursion.
-        *
-        * When it cannot find a suitable sb, it allocates a new
-        * one (this one), and tries again to find a suitable old
-        * one.
-        *
-        * In case that succeeds, it will acquire the s_umount
-        * lock of the old one. Since these are clearly distrinct
-        * locks, and this object isn't exposed yet, there's no
-        * risk of deadlocks.
-        *
-        * Annotate this by putting this lock in a different
-        * subclass.
-        */
-       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
        s->s_count = 1;
        atomic_set(&s->s_active, 1);
        mutex_init(&s->s_vfs_rename_mutex);
index 20b8f82e115b647b9d6f29c0877a1a2e3d6fc44c..fb49510c5dcfa9710d9118404cf84094390c72e4 100644 (file)
@@ -30,7 +30,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
        void *ns;
        bool new_sb;
 
-       if (!(flags & MS_KERNMOUNT)) {
+       if (!(flags & SB_KERNMOUNT)) {
                if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
                        return ERR_PTR(-EPERM);
        }
index 3c47b7d5d4cf8e8d38eae5a7ab0484d3132e30e3..bec9f79adb25a207dca39feeb924b7e946f664db 100644 (file)
@@ -63,7 +63,7 @@ static int sysv_remount(struct super_block *sb, int *flags, char *data)
 
        sync_filesystem(sb);
        if (sbi->s_forced_ro)
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
        return 0;
 }
 
index 0d56e486b39225c597fa7f2dd86f8ec54d5e25d7..89765ddfb738c075b44003e0e1dfeea414d24c7f 100644 (file)
@@ -333,7 +333,7 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
        /* set up enough so that it can read an inode */
        sb->s_op = &sysv_sops;
        if (sbi->s_forced_ro)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        if (sbi->s_truncate)
                sb->s_d_op = &sysv_dentry_operations;
        root_inode = sysv_iget(sb, SYSV_ROOT_INO);
index a02aa59d1e245124dcead693b15cd128fa2b1f4e..dfe85069586ebe001eb9bba841405bb4a43e94bf 100644 (file)
@@ -1406,7 +1406,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time,
        if (flags & S_MTIME)
                inode->i_mtime = *time;
 
-       if (!(inode->i_sb->s_flags & MS_LAZYTIME))
+       if (!(inode->i_sb->s_flags & SB_LAZYTIME))
                iflags |= I_DIRTY_SYNC;
 
        release = ui->dirty;
index 3be28900bf3750364f79ac36803eb0eae1066f80..fe77e9625e84791d007821ef231e633a58539f64 100644 (file)
@@ -84,7 +84,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
        if (!c->ro_error) {
                c->ro_error = 1;
                c->no_chk_data_crc = 0;
-               c->vfs_sb->s_flags |= MS_RDONLY;
+               c->vfs_sb->s_flags |= SB_RDONLY;
                ubifs_warn(c, "switched to read-only mode, error %d", err);
                dump_stack();
        }
index 7503e7cdf8702a61ce91576316bfce10bd63e113..0beb285b143da6cfe9cc238ede64313f8f75a604 100644 (file)
@@ -968,7 +968,7 @@ static int parse_standard_option(const char *option)
 
        pr_notice("UBIFS: parse %s\n", option);
        if (!strcmp(option, "sync"))
-               return MS_SYNCHRONOUS;
+               return SB_SYNCHRONOUS;
        return 0;
 }
 
@@ -1160,8 +1160,8 @@ static int mount_ubifs(struct ubifs_info *c)
        size_t sz;
 
        c->ro_mount = !!sb_rdonly(c->vfs_sb);
-       /* Suppress error messages while probing if MS_SILENT is set */
-       c->probing = !!(c->vfs_sb->s_flags & MS_SILENT);
+       /* Suppress error messages while probing if SB_SILENT is set */
+       c->probing = !!(c->vfs_sb->s_flags & SB_SILENT);
 
        err = init_constants_early(c);
        if (err)
@@ -1852,7 +1852,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
                return err;
        }
 
-       if (c->ro_mount && !(*flags & MS_RDONLY)) {
+       if (c->ro_mount && !(*flags & SB_RDONLY)) {
                if (c->ro_error) {
                        ubifs_msg(c, "cannot re-mount R/W due to prior errors");
                        return -EROFS;
@@ -1864,7 +1864,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
                err = ubifs_remount_rw(c);
                if (err)
                        return err;
-       } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
+       } else if (!c->ro_mount && (*flags & SB_RDONLY)) {
                if (c->ro_error) {
                        ubifs_msg(c, "cannot re-mount R/O due to prior errors");
                        return -EROFS;
@@ -2117,7 +2117,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
         */
        ubi = open_ubi(name, UBI_READONLY);
        if (IS_ERR(ubi)) {
-               if (!(flags & MS_SILENT))
+               if (!(flags & SB_SILENT))
                        pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
                               current->pid, name, (int)PTR_ERR(ubi));
                return ERR_CAST(ubi);
@@ -2143,18 +2143,18 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
                kfree(c);
                /* A new mount point for already mounted UBIFS */
                dbg_gen("this ubi volume is already mounted");
-               if (!!(flags & MS_RDONLY) != c1->ro_mount) {
+               if (!!(flags & SB_RDONLY) != c1->ro_mount) {
                        err = -EBUSY;
                        goto out_deact;
                }
        } else {
-               err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+               err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
                if (err)
                        goto out_deact;
                /* We do not support atime */
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
 #ifndef CONFIG_UBIFS_ATIME_SUPPORT
-               sb->s_flags |= MS_NOATIME;
+               sb->s_flags |= SB_NOATIME;
 #else
                ubifs_msg(c, "full atime support is enabled.");
 #endif
index 63c7468147eb9b573db8270f55e8840febe99ffc..5ee7af879cc41ab9242e110fbbc7fa8a274d56f1 100644 (file)
@@ -1201,7 +1201,7 @@ struct ubifs_debug_info;
  * @need_recovery: %1 if the file-system needs recovery
  * @replaying: %1 during journal replay
  * @mounting: %1 while mounting
- * @probing: %1 while attempting to mount if MS_SILENT mount flag is set
+ * @probing: %1 while attempting to mount if SB_SILENT mount flag is set
  * @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
  * @replay_list: temporary list used during journal replay
  * @replay_buds: list of buds to replay
@@ -1850,7 +1850,7 @@ __printf(2, 3)
 void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...);
 /*
  * A conditional variant of 'ubifs_err()' which doesn't output anything
- * if probing (ie. MS_SILENT set).
+ * if probing (ie. SB_SILENT set).
  */
 #define ubifs_errc(c, fmt, ...)                                                \
 do {                                                                   \
index f80e0a0f24d3374d02b4b7fa50df7ed2372a4ebf..f73239a9a97daa4a9046252323ef888b27691589 100644 (file)
@@ -650,7 +650,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        sync_filesystem(sb);
        if (lvidiu) {
                int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
-               if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+               if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
                        return -EACCES;
        }
 
@@ -673,10 +673,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        sbi->s_dmode = uopt.dmode;
        write_unlock(&sbi->s_cred_lock);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out_unlock;
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                udf_close_lvid(sb);
        else
                udf_open_lvid(sb);
index b5cd79065ef9a84d72aa427252d81a0edd0abb2d..e727ee07dbe4ac1a824d8626f917d4a561e3eb0a 100644 (file)
@@ -115,7 +115,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -205,7 +205,7 @@ do_more:
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
 
        if (overflow) {
@@ -567,7 +567,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -688,7 +688,7 @@ cg_found:
 succed:
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
index 916b4a4289334f277d481020ef51c8c7f17a11b7..e1ef0f0a135352992ecff800ca94044673ad0ad2 100644 (file)
@@ -112,7 +112,7 @@ void ufs_free_inode (struct inode * inode)
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        
        ufs_mark_sb_dirty(sb);
@@ -146,14 +146,14 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
                set_buffer_uptodate(bh);
                mark_buffer_dirty(bh);
                unlock_buffer(bh);
-               if (sb->s_flags & MS_SYNCHRONOUS)
+               if (sb->s_flags & SB_SYNCHRONOUS)
                        sync_dirty_buffer(bh);
                brelse(bh);
        }
 
        fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
        ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
 
        UFSD("EXIT\n");
@@ -284,7 +284,7 @@ cg_found:
        }
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -330,7 +330,7 @@ cg_found:
                ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, ts.tv_nsec);
                mark_buffer_dirty(bh);
                unlock_buffer(bh);
-               if (sb->s_flags & MS_SYNCHRONOUS)
+               if (sb->s_flags & SB_SYNCHRONOUS)
                        sync_dirty_buffer(bh);
                brelse(bh);
        }
index 6440003f8ddc62ea689512f4dc25525334d2a42a..4d497e9c68830a1d7a229841c18db5e11903c642 100644 (file)
@@ -282,7 +282,7 @@ void ufs_error (struct super_block * sb, const char * function,
                usb1->fs_clean = UFS_FSBAD;
                ubh_mark_buffer_dirty(USPI_UBH(uspi));
                ufs_mark_sb_dirty(sb);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        va_start(args, fmt);
        vaf.fmt = fmt;
@@ -320,7 +320,7 @@ void ufs_panic (struct super_block * sb, const char * function,
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        pr_crit("panic (device %s): %s: %pV\n",
                sb->s_id, function, &vaf);
        va_end(args);
@@ -905,7 +905,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=old is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -921,7 +921,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=nextstep is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -937,7 +937,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=nextstep-cd is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -953,7 +953,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=openstep is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -968,7 +968,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=hp is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        default:
@@ -1125,21 +1125,21 @@ magic_found:
                        break;
                case UFS_FSACTIVE:
                        pr_err("%s(): fs is active\n", __func__);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                case UFS_FSBAD:
                        pr_err("%s(): fs is bad\n", __func__);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                default:
                        pr_err("%s(): can't grok fs_clean 0x%x\n",
                               __func__, usb1->fs_clean);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                }
        } else {
                pr_err("%s(): fs needs fsck\n", __func__);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /*
@@ -1328,7 +1328,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                return -EINVAL;
        }
 
-       if ((bool)(*mount_flags & MS_RDONLY) == sb_rdonly(sb)) {
+       if ((bool)(*mount_flags & SB_RDONLY) == sb_rdonly(sb)) {
                UFS_SB(sb)->s_mount_opt = new_mount_opt;
                mutex_unlock(&UFS_SB(sb)->s_lock);
                return 0;
@@ -1337,7 +1337,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
        /*
         * fs was mouted as rw, remounting ro
         */
-       if (*mount_flags & MS_RDONLY) {
+       if (*mount_flags & SB_RDONLY) {
                ufs_put_super_internal(sb);
                usb1->fs_time = cpu_to_fs32(sb, get_seconds());
                if ((flags & UFS_ST_MASK) == UFS_ST_SUN
@@ -1346,7 +1346,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                        ufs_set_fs_state(sb, usb1, usb3,
                                UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
                ubh_mark_buffer_dirty (USPI_UBH(uspi));
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else {
        /*
         * fs was mounted as ro, remounting rw
@@ -1370,7 +1370,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                        mutex_unlock(&UFS_SB(sb)->s_lock);
                        return -EPERM;
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 #endif
        }
        UFS_SB(sb)->s_mount_opt = new_mount_opt;
index 08df809e231521eace21df09737d6bb4fff99659..1210f684d3c28f9af8d8403c1f0222ef06dc380b 100644 (file)
@@ -5662,7 +5662,8 @@ xfs_bmap_collapse_extents(
                *done = true;
                goto del_cursor;
        }
-       XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
+       XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
+                               del_cursor);
 
        new_startoff = got.br_startoff - offset_shift_fsb;
        if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
@@ -5767,7 +5768,8 @@ xfs_bmap_insert_extents(
                        goto del_cursor;
                }
        }
-       XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
+       XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
+                               del_cursor);
 
        if (stop_fsb >= got.br_startoff + got.br_blockcount) {
                error = -EIO;
index de3f04a986565d7265fc694b80962000de8d7dc8..3b57ef0f2f76c758e6a9c8b89b7a0c470cdd09a9 100644 (file)
@@ -920,8 +920,7 @@ STATIC xfs_agnumber_t
 xfs_ialloc_ag_select(
        xfs_trans_t     *tp,            /* transaction pointer */
        xfs_ino_t       parent,         /* parent directory inode number */
-       umode_t         mode,           /* bits set to indicate file type */
-       int             okalloc)        /* ok to allocate more space */
+       umode_t         mode)           /* bits set to indicate file type */
 {
        xfs_agnumber_t  agcount;        /* number of ag's in the filesystem */
        xfs_agnumber_t  agno;           /* current ag number */
@@ -978,9 +977,6 @@ xfs_ialloc_ag_select(
                        return agno;
                }
 
-               if (!okalloc)
-                       goto nextag;
-
                if (!pag->pagf_init) {
                        error = xfs_alloc_pagf_init(mp, tp, agno, flags);
                        if (error)
@@ -1680,7 +1676,6 @@ xfs_dialloc(
        struct xfs_trans        *tp,
        xfs_ino_t               parent,
        umode_t                 mode,
-       int                     okalloc,
        struct xfs_buf          **IO_agbp,
        xfs_ino_t               *inop)
 {
@@ -1692,6 +1687,7 @@ xfs_dialloc(
        int                     noroom = 0;
        xfs_agnumber_t          start_agno;
        struct xfs_perag        *pag;
+       int                     okalloc = 1;
 
        if (*IO_agbp) {
                /*
@@ -1707,7 +1703,7 @@ xfs_dialloc(
         * We do not have an agbp, so select an initial allocation
         * group for inode allocation.
         */
-       start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
+       start_agno = xfs_ialloc_ag_select(tp, parent, mode);
        if (start_agno == NULLAGNUMBER) {
                *inop = NULLFSINO;
                return 0;
index d2bdcd5e7312e499deb91b29bc62cb91cc35e881..66a8de0b1caaad8d1ba9d5ed94fccc813780dca3 100644 (file)
@@ -81,7 +81,6 @@ xfs_dialloc(
        struct xfs_trans *tp,           /* transaction pointer */
        xfs_ino_t       parent,         /* parent inode (directory) */
        umode_t         mode,           /* mode bits for new inode */
-       int             okalloc,        /* ok to allocate more space */
        struct xfs_buf  **agbp,         /* buf for a.g. inode header */
        xfs_ino_t       *inop);         /* inode number allocated */
 
index 637b7a892313de51ed28f584b2b2b17f8e32a40c..f120fb20452f493ecd6fd29bfa09732770f28015 100644 (file)
@@ -318,8 +318,20 @@ xfs_scrub_dinode(
 
        /* di_mode */
        mode = be16_to_cpu(dip->di_mode);
-       if (mode & ~(S_IALLUGO | S_IFMT))
+       switch (mode & S_IFMT) {
+       case S_IFLNK:
+       case S_IFREG:
+       case S_IFDIR:
+       case S_IFCHR:
+       case S_IFBLK:
+       case S_IFIFO:
+       case S_IFSOCK:
+               /* mode is recognized */
+               break;
+       default:
                xfs_scrub_ino_set_corrupt(sc, ino, bp);
+               break;
+       }
 
        /* v1/v2 fields */
        switch (dip->di_version) {
index 8e58ba8429464d7b57691fb06caf85eaaf6b13f6..3d9037eceaf1b81c2848e056bdc8e9309bc4d87f 100644 (file)
@@ -107,7 +107,7 @@ xfs_scrub_quota_item(
        unsigned long long              rcount;
        xfs_ino_t                       fs_icount;
 
-       offset = id * qi->qi_dqperchunk;
+       offset = id / qi->qi_dqperchunk;
 
        /*
         * We fed $id and DQNEXT into the xfs_qm_dqget call, which means
@@ -207,7 +207,7 @@ xfs_scrub_quota(
        xfs_dqid_t                      id = 0;
        uint                            dqtype;
        int                             nimaps;
-       int                             error;
+       int                             error = 0;
 
        if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return -ENOENT;
index 9c42c4efd01ec57bb1945ba671780cc2687e49c3..ab3aef2ae8233350f42647b77c0dff833521fe2f 100644 (file)
@@ -46,7 +46,6 @@
 #include "scrub/scrub.h"
 #include "scrub/common.h"
 #include "scrub/trace.h"
-#include "scrub/scrub.h"
 #include "scrub/btree.h"
 
 /*
index 472080e757887957b5cd7baf9f12a10d6b27ae71..86daed0e3a458dd16ab76251b2772400be1e9409 100644 (file)
@@ -26,7 +26,6 @@
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_da_format.h"
-#include "xfs_defer.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_trans.h"
index a3eeaba156c5ab8d7d34e6b4f217423c452b73dc..21e2d70884e18edc2c765584f201a8b04604837c 100644 (file)
@@ -399,7 +399,7 @@ xfs_map_blocks(
               (ip->i_df.if_flags & XFS_IFEXTENTS));
        ASSERT(offset <= mp->m_super->s_maxbytes);
 
-       if (offset + count > mp->m_super->s_maxbytes)
+       if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
                count = mp->m_super->s_maxbytes - offset;
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -896,13 +896,13 @@ xfs_writepage_map(
        struct writeback_control *wbc,
        struct inode            *inode,
        struct page             *page,
-       loff_t                  offset,
-       uint64_t              end_offset)
+       uint64_t                end_offset)
 {
        LIST_HEAD(submit_list);
        struct xfs_ioend        *ioend, *next;
        struct buffer_head      *bh, *head;
        ssize_t                 len = i_blocksize(inode);
+       uint64_t                offset;
        int                     error = 0;
        int                     count = 0;
        int                     uptodate = 1;
@@ -1146,7 +1146,7 @@ xfs_do_writepage(
                end_offset = offset;
        }
 
-       return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
+       return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
 
 redirty:
        redirty_page_for_writepage(wbc, page);
@@ -1265,7 +1265,7 @@ xfs_map_trim_size(
        if (mapping_size > size)
                mapping_size = size;
        if (offset < i_size_read(inode) &&
-           offset + mapping_size >= i_size_read(inode)) {
+           (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
                /* limit mapping to block that spans EOF */
                mapping_size = roundup_64(i_size_read(inode) - offset,
                                          i_blocksize(inode));
@@ -1312,7 +1312,7 @@ xfs_get_blocks(
        lockmode = xfs_ilock_data_map_shared(ip);
 
        ASSERT(offset <= mp->m_super->s_maxbytes);
-       if (offset + size > mp->m_super->s_maxbytes)
+       if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
                size = mp->m_super->s_maxbytes - offset;
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
index dd136f7275e4a2efede5a7612de243ddd1349abf..e5fb008d75e899aaa26948ae5044e55e59ca6f11 100644 (file)
@@ -389,7 +389,8 @@ xfs_bud_init(
 int
 xfs_bui_recover(
        struct xfs_mount                *mp,
-       struct xfs_bui_log_item         *buip)
+       struct xfs_bui_log_item         *buip,
+       struct xfs_defer_ops            *dfops)
 {
        int                             error = 0;
        unsigned int                    bui_type;
@@ -404,9 +405,7 @@ xfs_bui_recover(
        xfs_exntst_t                    state;
        struct xfs_trans                *tp;
        struct xfs_inode                *ip = NULL;
-       struct xfs_defer_ops            dfops;
        struct xfs_bmbt_irec            irec;
-       xfs_fsblock_t                   firstfsb;
 
        ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
 
@@ -464,7 +463,6 @@ xfs_bui_recover(
 
        if (VFS_I(ip)->i_nlink == 0)
                xfs_iflags_set(ip, XFS_IRECOVERY);
-       xfs_defer_init(&dfops, &firstfsb);
 
        /* Process deferred bmap item. */
        state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
@@ -479,16 +477,16 @@ xfs_bui_recover(
                break;
        default:
                error = -EFSCORRUPTED;
-               goto err_dfops;
+               goto err_inode;
        }
        xfs_trans_ijoin(tp, ip, 0);
 
        count = bmap->me_len;
-       error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
+       error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
                        ip, whichfork, bmap->me_startoff,
                        bmap->me_startblock, &count, state);
        if (error)
-               goto err_dfops;
+               goto err_inode;
 
        if (count > 0) {
                ASSERT(type == XFS_BMAP_UNMAP);
@@ -496,16 +494,11 @@ xfs_bui_recover(
                irec.br_blockcount = count;
                irec.br_startoff = bmap->me_startoff;
                irec.br_state = state;
-               error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec);
+               error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
                if (error)
-                       goto err_dfops;
+                       goto err_inode;
        }
 
-       /* Finish transaction, free inodes. */
-       error = xfs_defer_finish(&tp, &dfops);
-       if (error)
-               goto err_dfops;
-
        set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
        error = xfs_trans_commit(tp);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -513,8 +506,6 @@ xfs_bui_recover(
 
        return error;
 
-err_dfops:
-       xfs_defer_cancel(&dfops);
 err_inode:
        xfs_trans_cancel(tp);
        if (ip) {
index c867daae4a3ce54c97055e133b478dd3baf6db61..24b354a2c83641487acfeb76a336a6476b9b98b3 100644 (file)
@@ -93,6 +93,7 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
                struct xfs_bui_log_item *);
 void xfs_bui_item_free(struct xfs_bui_log_item *);
 void xfs_bui_release(struct xfs_bui_log_item *);
-int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip);
+int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip,
+               struct xfs_defer_ops *dfops);
 
 #endif /* __XFS_BMAP_ITEM_H__ */
index 4db6e8d780f6962475348a8dd318185a011250e8..4c6e86d861fda1a452dbeb20c1d3d64018627a20 100644 (file)
@@ -1815,22 +1815,27 @@ xfs_alloc_buftarg(
        btp->bt_daxdev = dax_dev;
 
        if (xfs_setsize_buftarg_early(btp, bdev))
-               goto error;
+               goto error_free;
 
        if (list_lru_init(&btp->bt_lru))
-               goto error;
+               goto error_free;
 
        if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
-               goto error;
+               goto error_lru;
 
        btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
        btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
        btp->bt_shrinker.seeks = DEFAULT_SEEKS;
        btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
-       register_shrinker(&btp->bt_shrinker);
+       if (register_shrinker(&btp->bt_shrinker))
+               goto error_pcpu;
        return btp;
 
-error:
+error_pcpu:
+       percpu_counter_destroy(&btp->bt_io_count);
+error_lru:
+       list_lru_destroy(&btp->bt_lru);
+error_free:
        kmem_free(btp);
        return NULL;
 }
index d57c2db64e59385450b273757d7fe9798205bd5f..f248708c10ff7f64e61fb3cde307f42031322424 100644 (file)
@@ -970,14 +970,22 @@ xfs_qm_dqflush_done(
         * holding the lock before removing the dquot from the AIL.
         */
        if ((lip->li_flags & XFS_LI_IN_AIL) &&
-           lip->li_lsn == qip->qli_flush_lsn) {
+           ((lip->li_lsn == qip->qli_flush_lsn) ||
+            (lip->li_flags & XFS_LI_FAILED))) {
 
                /* xfs_trans_ail_delete() drops the AIL lock. */
                spin_lock(&ailp->xa_lock);
-               if (lip->li_lsn == qip->qli_flush_lsn)
+               if (lip->li_lsn == qip->qli_flush_lsn) {
                        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
-               else
+               } else {
+                       /*
+                        * Clear the failed state since we are about to drop the
+                        * flush lock
+                        */
+                       if (lip->li_flags & XFS_LI_FAILED)
+                               xfs_clear_li_failed(lip);
                        spin_unlock(&ailp->xa_lock);
+               }
        }
 
        /*
index 2c7a1629e064b4fd1f647affc3432797d058ae26..664dea105e76fee564a1feeb16a05387fe6b9000 100644 (file)
@@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait(
        wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
 }
 
+/*
+ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
+ * have been failed during writeback
+ *
+ * this informs the AIL that the dquot is already flush locked on the next push,
+ * and acquires a hold on the buffer to ensure that it isn't reclaimed before
+ * dirty data makes it to disk.
+ */
+STATIC void
+xfs_dquot_item_error(
+       struct xfs_log_item     *lip,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dquot        *dqp;
+
+       dqp = DQUOT_ITEM(lip)->qli_dquot;
+       ASSERT(!completion_done(&dqp->q_flush));
+       xfs_set_li_failed(lip, bp);
+}
+
 STATIC uint
 xfs_qm_dquot_logitem_push(
        struct xfs_log_item     *lip,
@@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push(
                                              __acquires(&lip->li_ailp->xa_lock)
 {
        struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
-       struct xfs_buf          *bp = NULL;
+       struct xfs_buf          *bp = lip->li_buf;
        uint                    rval = XFS_ITEM_SUCCESS;
        int                     error;
 
        if (atomic_read(&dqp->q_pincount) > 0)
                return XFS_ITEM_PINNED;
 
+       /*
+        * The buffer containing this item failed to be written back
+        * previously. Resubmit the buffer for IO
+        */
+       if (lip->li_flags & XFS_LI_FAILED) {
+               if (!xfs_buf_trylock(bp))
+                       return XFS_ITEM_LOCKED;
+
+               if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+                       rval = XFS_ITEM_FLUSHING;
+
+               xfs_buf_unlock(bp);
+               return rval;
+       }
+
        if (!xfs_dqlock_nowait(dqp))
                return XFS_ITEM_LOCKED;
 
@@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
        .iop_unlock     = xfs_qm_dquot_logitem_unlock,
        .iop_committed  = xfs_qm_dquot_logitem_committed,
        .iop_push       = xfs_qm_dquot_logitem_push,
-       .iop_committing = xfs_qm_dquot_logitem_committing
+       .iop_committing = xfs_qm_dquot_logitem_committing,
+       .iop_error      = xfs_dquot_item_error
 };
 
 /*
index 61d1cb7dc10d25dd894624d6094973b9c25328a6..b41952a4ddd851fe63a475be4f2b6bc8d7e47beb 100644 (file)
@@ -749,7 +749,6 @@ xfs_ialloc(
        xfs_nlink_t     nlink,
        dev_t           rdev,
        prid_t          prid,
-       int             okalloc,
        xfs_buf_t       **ialloc_context,
        xfs_inode_t     **ipp)
 {
@@ -765,7 +764,7 @@ xfs_ialloc(
         * Call the space management code to pick
         * the on-disk inode to be allocated.
         */
-       error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
+       error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
                            ialloc_context, &ino);
        if (error)
                return error;
@@ -957,7 +956,6 @@ xfs_dir_ialloc(
        xfs_nlink_t     nlink,
        dev_t           rdev,
        prid_t          prid,           /* project id */
-       int             okalloc,        /* ok to allocate new space */
        xfs_inode_t     **ipp,          /* pointer to inode; it will be
                                           locked. */
        int             *committed)
@@ -988,8 +986,8 @@ xfs_dir_ialloc(
         * transaction commit so that no other process can steal
         * the inode(s) that we've just allocated.
         */
-       code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
-                         &ialloc_context, &ip);
+       code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
+                       &ip);
 
        /*
         * Return an error if we were unable to allocate a new inode.
@@ -1061,7 +1059,7 @@ xfs_dir_ialloc(
                 * this call should always succeed.
                 */
                code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
-                                 okalloc, &ialloc_context, &ip);
+                                 &ialloc_context, &ip);
 
                /*
                 * If we get an error at this point, return to the caller
@@ -1182,11 +1180,6 @@ xfs_create(
                xfs_flush_inodes(mp);
                error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
        }
-       if (error == -ENOSPC) {
-               /* No space at all so try a "no-allocation" reservation */
-               resblks = 0;
-               error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
-       }
        if (error)
                goto out_release_inode;
 
@@ -1203,19 +1196,13 @@ xfs_create(
        if (error)
                goto out_trans_cancel;
 
-       if (!resblks) {
-               error = xfs_dir_canenter(tp, dp, name);
-               if (error)
-                       goto out_trans_cancel;
-       }
-
        /*
         * A newly created regular or special file just has one directory
         * entry pointing to them, but a directory also the "." entry
         * pointing to itself.
         */
-       error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
-                              prid, resblks > 0, &ip, NULL);
+       error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip,
+                       NULL);
        if (error)
                goto out_trans_cancel;
 
@@ -1340,11 +1327,6 @@ xfs_create_tmpfile(
        tres = &M_RES(mp)->tr_create_tmpfile;
 
        error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
-       if (error == -ENOSPC) {
-               /* No space at all so try a "no-allocation" reservation */
-               resblks = 0;
-               error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
-       }
        if (error)
                goto out_release_inode;
 
@@ -1353,8 +1335,7 @@ xfs_create_tmpfile(
        if (error)
                goto out_trans_cancel;
 
-       error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
-                               prid, resblks > 0, &ip, NULL);
+       error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL);
        if (error)
                goto out_trans_cancel;
 
@@ -2400,6 +2381,24 @@ retry:
        return 0;
 }
 
+/*
+ * Free any local-format buffers sitting around before we reset to
+ * extents format.
+ */
+static inline void
+xfs_ifree_local_data(
+       struct xfs_inode        *ip,
+       int                     whichfork)
+{
+       struct xfs_ifork        *ifp;
+
+       if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
+               return;
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
+}
+
 /*
  * This is called to return an inode to the inode free list.
  * The inode should already be truncated to 0 length and have
@@ -2437,6 +2436,9 @@ xfs_ifree(
        if (error)
                return error;
 
+       xfs_ifree_local_data(ip, XFS_DATA_FORK);
+       xfs_ifree_local_data(ip, XFS_ATTR_FORK);
+
        VFS_I(ip)->i_mode = 0;          /* mark incore inode as free */
        ip->i_d.di_flags = 0;
        ip->i_d.di_dmevmask = 0;
index cc13c37637217e74e4c9425b34a710a5fd55e090..b2136af9289f3d854f88a549ec32efa455cfc78b 100644 (file)
@@ -428,7 +428,7 @@ xfs_extlen_t        xfs_get_extsz_hint(struct xfs_inode *ip);
 xfs_extlen_t   xfs_get_cowextsz_hint(struct xfs_inode *ip);
 
 int            xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
-                              xfs_nlink_t, dev_t, prid_t, int,
+                              xfs_nlink_t, dev_t, prid_t,
                               struct xfs_inode **, int *);
 
 /* from xfs_file.c */
index 33eb4fb2e3fd87b0848ebe2599b0a71b05f0d082..7ab52a8bc0a9e6dff904fe10b097eb3a478db9b9 100644 (file)
@@ -1213,7 +1213,7 @@ xfs_xattr_iomap_begin(
 
        ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
        error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
-                              &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
+                              &nimaps, XFS_BMAPI_ATTRFORK);
 out_unlock:
        xfs_iunlock(ip, lockmode);
 
index 38d4227895aef844192e67198df39be01f29c334..a503af96d780ecf9fd15f6c6d579bc9822abac6f 100644 (file)
@@ -781,17 +781,17 @@ xfs_log_mount_finish(
         * something to an unlinked inode, the irele won't cause
         * premature truncation and freeing of the inode, which results
         * in log recovery failure.  We have to evict the unreferenced
-        * lru inodes after clearing MS_ACTIVE because we don't
+        * lru inodes after clearing SB_ACTIVE because we don't
         * otherwise clean up the lru if there's a subsequent failure in
         * xfs_mountfs, which leads to us leaking the inodes if nothing
         * else (e.g. quotacheck) references the inodes before the
         * mount failure occurs.
         */
-       mp->m_super->s_flags |= MS_ACTIVE;
+       mp->m_super->s_flags |= SB_ACTIVE;
        error = xlog_recover_finish(mp->m_log);
        if (!error)
                xfs_log_work_queue(mp);
-       mp->m_super->s_flags &= ~MS_ACTIVE;
+       mp->m_super->s_flags &= ~SB_ACTIVE;
        evict_inodes(mp->m_super);
 
        /*
index 87b1c331f9ebfb7cefb708adc47b55890bf7ab9d..28d1abfe835eef3e9d87f7da1c7c805fef0488f4 100644 (file)
@@ -24,6 +24,7 @@
 #include "xfs_bit.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_defer.h"
 #include "xfs_da_format.h"
 #include "xfs_da_btree.h"
 #include "xfs_inode.h"
@@ -4716,7 +4717,8 @@ STATIC int
 xlog_recover_process_cui(
        struct xfs_mount                *mp,
        struct xfs_ail                  *ailp,
-       struct xfs_log_item             *lip)
+       struct xfs_log_item             *lip,
+       struct xfs_defer_ops            *dfops)
 {
        struct xfs_cui_log_item         *cuip;
        int                             error;
@@ -4729,7 +4731,7 @@ xlog_recover_process_cui(
                return 0;
 
        spin_unlock(&ailp->xa_lock);
-       error = xfs_cui_recover(mp, cuip);
+       error = xfs_cui_recover(mp, cuip, dfops);
        spin_lock(&ailp->xa_lock);
 
        return error;
@@ -4756,7 +4758,8 @@ STATIC int
 xlog_recover_process_bui(
        struct xfs_mount                *mp,
        struct xfs_ail                  *ailp,
-       struct xfs_log_item             *lip)
+       struct xfs_log_item             *lip,
+       struct xfs_defer_ops            *dfops)
 {
        struct xfs_bui_log_item         *buip;
        int                             error;
@@ -4769,7 +4772,7 @@ xlog_recover_process_bui(
                return 0;
 
        spin_unlock(&ailp->xa_lock);
-       error = xfs_bui_recover(mp, buip);
+       error = xfs_bui_recover(mp, buip, dfops);
        spin_lock(&ailp->xa_lock);
 
        return error;
@@ -4805,6 +4808,46 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
        }
 }
 
+/* Take all the collected deferred ops and finish them in order. */
+static int
+xlog_finish_defer_ops(
+       struct xfs_mount        *mp,
+       struct xfs_defer_ops    *dfops)
+{
+       struct xfs_trans        *tp;
+       int64_t                 freeblks;
+       uint                    resblks;
+       int                     error;
+
+       /*
+        * We're finishing the defer_ops that accumulated as a result of
+        * recovering unfinished intent items during log recovery.  We
+        * reserve an itruncate transaction because it is the largest
+        * permanent transaction type.  Since we're the only user of the fs
+        * right now, take 93% (15/16) of the available free blocks.  Use
+        * weird math to avoid a 64-bit division.
+        */
+       freeblks = percpu_counter_sum(&mp->m_fdblocks);
+       if (freeblks <= 0)
+               return -ENOSPC;
+       resblks = min_t(int64_t, UINT_MAX, freeblks);
+       resblks = (resblks * 15) >> 4;
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
+                       0, XFS_TRANS_RESERVE, &tp);
+       if (error)
+               return error;
+
+       error = xfs_defer_finish(&tp, dfops);
+       if (error)
+               goto out_cancel;
+
+       return xfs_trans_commit(tp);
+
+out_cancel:
+       xfs_trans_cancel(tp);
+       return error;
+}
+
 /*
  * When this is called, all of the log intent items which did not have
  * corresponding log done items should be in the AIL.  What we do now
@@ -4825,10 +4868,12 @@ STATIC int
 xlog_recover_process_intents(
        struct xlog             *log)
 {
-       struct xfs_log_item     *lip;
-       int                     error = 0;
+       struct xfs_defer_ops    dfops;
        struct xfs_ail_cursor   cur;
+       struct xfs_log_item     *lip;
        struct xfs_ail          *ailp;
+       xfs_fsblock_t           firstfsb;
+       int                     error = 0;
 #if defined(DEBUG) || defined(XFS_WARN)
        xfs_lsn_t               last_lsn;
 #endif
@@ -4839,6 +4884,7 @@ xlog_recover_process_intents(
 #if defined(DEBUG) || defined(XFS_WARN)
        last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
 #endif
+       xfs_defer_init(&dfops, &firstfsb);
        while (lip != NULL) {
                /*
                 * We're done when we see something other than an intent.
@@ -4859,6 +4905,12 @@ xlog_recover_process_intents(
                 */
                ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
 
+               /*
+                * NOTE: If your intent processing routine can create more
+                * deferred ops, you /must/ attach them to the dfops in this
+                * routine or else those subsequent intents will get
+                * replayed in the wrong order!
+                */
                switch (lip->li_type) {
                case XFS_LI_EFI:
                        error = xlog_recover_process_efi(log->l_mp, ailp, lip);
@@ -4867,10 +4919,12 @@ xlog_recover_process_intents(
                        error = xlog_recover_process_rui(log->l_mp, ailp, lip);
                        break;
                case XFS_LI_CUI:
-                       error = xlog_recover_process_cui(log->l_mp, ailp, lip);
+                       error = xlog_recover_process_cui(log->l_mp, ailp, lip,
+                                       &dfops);
                        break;
                case XFS_LI_BUI:
-                       error = xlog_recover_process_bui(log->l_mp, ailp, lip);
+                       error = xlog_recover_process_bui(log->l_mp, ailp, lip,
+                                       &dfops);
                        break;
                }
                if (error)
@@ -4880,6 +4934,11 @@ xlog_recover_process_intents(
 out:
        xfs_trans_ail_cursor_done(&cur);
        spin_unlock(&ailp->xa_lock);
+       if (error)
+               xfs_defer_cancel(&dfops);
+       else
+               error = xlog_finish_defer_ops(log->l_mp, &dfops);
+
        return error;
 }
 
index 010a13a201aad78382ae69fdcc8e3b8d5c451c1d..ec952dfad359f6ad08d33d234f0cd75200c5933b 100644 (file)
@@ -793,8 +793,8 @@ xfs_qm_qino_alloc(
                return error;
 
        if (need_alloc) {
-               error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
-                                                               &committed);
+               error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip,
+                               &committed);
                if (error) {
                        xfs_trans_cancel(tp);
                        return error;
index 8f2e2fac4255d63fc1dab2d268433e099aaf5aa8..3a55d6fc271b1e6d50aa6ce96c9e84b7c5886e43 100644 (file)
@@ -393,7 +393,8 @@ xfs_cud_init(
 int
 xfs_cui_recover(
        struct xfs_mount                *mp,
-       struct xfs_cui_log_item         *cuip)
+       struct xfs_cui_log_item         *cuip,
+       struct xfs_defer_ops            *dfops)
 {
        int                             i;
        int                             error = 0;
@@ -405,11 +406,9 @@ xfs_cui_recover(
        struct xfs_trans                *tp;
        struct xfs_btree_cur            *rcur = NULL;
        enum xfs_refcount_intent_type   type;
-       xfs_fsblock_t                   firstfsb;
        xfs_fsblock_t                   new_fsb;
        xfs_extlen_t                    new_len;
        struct xfs_bmbt_irec            irec;
-       struct xfs_defer_ops            dfops;
        bool                            requeue_only = false;
 
        ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
@@ -465,7 +464,6 @@ xfs_cui_recover(
                return error;
        cudp = xfs_trans_get_cud(tp, cuip);
 
-       xfs_defer_init(&dfops, &firstfsb);
        for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
                refc = &cuip->cui_format.cui_extents[i];
                refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
@@ -485,7 +483,7 @@ xfs_cui_recover(
                        new_len = refc->pe_len;
                } else
                        error = xfs_trans_log_finish_refcount_update(tp, cudp,
-                               &dfops, type, refc->pe_startblock, refc->pe_len,
+                               dfops, type, refc->pe_startblock, refc->pe_len,
                                &new_fsb, &new_len, &rcur);
                if (error)
                        goto abort_error;
@@ -497,21 +495,21 @@ xfs_cui_recover(
                        switch (type) {
                        case XFS_REFCOUNT_INCREASE:
                                error = xfs_refcount_increase_extent(
-                                               tp->t_mountp, &dfops, &irec);
+                                               tp->t_mountp, dfops, &irec);
                                break;
                        case XFS_REFCOUNT_DECREASE:
                                error = xfs_refcount_decrease_extent(
-                                               tp->t_mountp, &dfops, &irec);
+                                               tp->t_mountp, dfops, &irec);
                                break;
                        case XFS_REFCOUNT_ALLOC_COW:
                                error = xfs_refcount_alloc_cow_extent(
-                                               tp->t_mountp, &dfops,
+                                               tp->t_mountp, dfops,
                                                irec.br_startblock,
                                                irec.br_blockcount);
                                break;
                        case XFS_REFCOUNT_FREE_COW:
                                error = xfs_refcount_free_cow_extent(
-                                               tp->t_mountp, &dfops,
+                                               tp->t_mountp, dfops,
                                                irec.br_startblock,
                                                irec.br_blockcount);
                                break;
@@ -525,17 +523,12 @@ xfs_cui_recover(
        }
 
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
-       error = xfs_defer_finish(&tp, &dfops);
-       if (error)
-               goto abort_defer;
        set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
        error = xfs_trans_commit(tp);
        return error;
 
 abort_error:
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
-abort_defer:
-       xfs_defer_cancel(&dfops);
        xfs_trans_cancel(tp);
        return error;
 }
index 5b74dddfa64be728f74e7dd1f731983c1523ad1a..0e5327349a13ee5921808ed866ac02acc038d0df 100644 (file)
@@ -96,6 +96,7 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
                struct xfs_cui_log_item *);
 void xfs_cui_item_free(struct xfs_cui_log_item *);
 void xfs_cui_release(struct xfs_cui_log_item *);
-int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip);
+int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip,
+               struct xfs_defer_ops *dfops);
 
 #endif /* __XFS_REFCOUNT_ITEM_H__ */
index cc041a29eb70bbb7524e036c0e24843099480617..cf7c8f81bebb566a486f0f732a0fe6a1040022c4 100644 (file)
@@ -49,8 +49,6 @@
 #include "xfs_alloc.h"
 #include "xfs_quota_defs.h"
 #include "xfs_quota.h"
-#include "xfs_btree.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_reflink.h"
 #include "xfs_iomap.h"
 #include "xfs_rmap_btree.h"
index f663022353c0d98b681e51fe8578096d0fbf57bf..5122d3021117f00e20d6dd1e195c28666cc71076 100644 (file)
@@ -212,9 +212,9 @@ xfs_parseargs(
         */
        if (sb_rdonly(sb))
                mp->m_flags |= XFS_MOUNT_RDONLY;
-       if (sb->s_flags & MS_DIRSYNC)
+       if (sb->s_flags & SB_DIRSYNC)
                mp->m_flags |= XFS_MOUNT_DIRSYNC;
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                mp->m_flags |= XFS_MOUNT_WSYNC;
 
        /*
@@ -1312,7 +1312,7 @@ xfs_fs_remount(
        }
 
        /* ro -> rw */
-       if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+       if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
                if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
                        xfs_warn(mp,
                "ro->rw transition prohibited on norecovery mount");
@@ -1368,7 +1368,7 @@ xfs_fs_remount(
        }
 
        /* rw -> ro */
-       if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+       if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
                /* Free the per-AG metadata reservation pool. */
                error = xfs_fs_unreserve_ag_blocks(mp);
                if (error) {
index 5f2f32408011d2df4db8ddc6d41297640bab7101..fcc5dfc70aa0c93a635514cb7acececb9e05e2e6 100644 (file)
@@ -30,7 +30,7 @@ extern void xfs_qm_exit(void);
 
 #ifdef CONFIG_XFS_POSIX_ACL
 # define XFS_ACL_STRING                "ACLs, "
-# define set_posix_acl_flag(sb)        ((sb)->s_flags |= MS_POSIXACL)
+# define set_posix_acl_flag(sb)        ((sb)->s_flags |= SB_POSIXACL)
 #else
 # define XFS_ACL_STRING
 # define set_posix_acl_flag(sb)        do { } while (0)
index 68d3ca2c4968054646345dab2bd3a76f97490fb8..2e9e793a8f9dfa18e87078bce5133860d4de6d4d 100644 (file)
@@ -232,11 +232,6 @@ xfs_symlink(
        resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
 
        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
-       if (error == -ENOSPC && fs_blocks == 0) {
-               resblks = 0;
-               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
-                               &tp);
-       }
        if (error)
                goto out_release_inode;
 
@@ -259,14 +254,6 @@ xfs_symlink(
        if (error)
                goto out_trans_cancel;
 
-       /*
-        * Check for ability to enter directory entry, if no space reserved.
-        */
-       if (!resblks) {
-               error = xfs_dir_canenter(tp, dp, link_name);
-               if (error)
-                       goto out_trans_cancel;
-       }
        /*
         * Initialize the bmap freelist prior to calling either
         * bmapi or the directory create code.
@@ -277,7 +264,7 @@ xfs_symlink(
         * Allocate an inode for the symlink.
         */
        error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
-                              prid, resblks > 0, &ip, NULL);
+                              prid, &ip, NULL);
        if (error)
                goto out_trans_cancel;
 
index 5d95fe34829438a0a2486bc50278d84b0d0714bf..35f3546b6af5237a78a9fe1bd3aa07cbb8503349 100644 (file)
@@ -24,7 +24,6 @@
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_da_format.h"
-#include "xfs_defer.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_da_btree.h"
index f849be28e0826683a496b7aaedf1f4c76749a95f..79287629c888dd735547e9c727e2d24ed523b241 100644 (file)
@@ -105,6 +105,7 @@ enum acpi_bus_device_type {
        ACPI_BUS_TYPE_THERMAL,
        ACPI_BUS_TYPE_POWER_BUTTON,
        ACPI_BUS_TYPE_SLEEP_BUTTON,
+       ACPI_BUS_TYPE_ECDT_EC,
        ACPI_BUS_DEVICE_TYPE_COUNT
 };
 
index 29c691265b49357bc0d036b71897348806c58e6c..14499757338f65416835330254b8c90a06918d64 100644 (file)
@@ -58,6 +58,7 @@
 #define ACPI_VIDEO_HID                 "LNXVIDEO"
 #define ACPI_BAY_HID                   "LNXIOBAY"
 #define ACPI_DOCK_HID                  "LNXDOCK"
+#define ACPI_ECDT_HID                  "LNXEC"
 /* Quirk for broken IBM BIOSes */
 #define ACPI_SMBUS_IBM_HID             "SMBUSIBM"
 
index 757dc6ffc7ba5f294bae554af3e6d1a01c1207e5..b234d54f2cb6e4c23a21db2af3b225264eccae2a 100644 (file)
@@ -805,15 +805,23 @@ static inline int pmd_trans_huge(pmd_t pmd)
 {
        return 0;
 }
-#ifndef __HAVE_ARCH_PMD_WRITE
+#ifndef pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        BUG();
        return 0;
 }
-#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* pmd_write */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+       BUG();
+       return 0;
+}
+#endif /* pud_write */
+
 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
        (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
         !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
index 6abf0a3604dc391c3218a063473c737be662aa8a..38d9c5861ed8c110d5dbac1b6a807252b520f23e 100644 (file)
@@ -242,6 +242,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
                   unsigned int ivsize);
 ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
                        int offset, size_t size, int flags);
+void af_alg_free_resources(struct af_alg_async_req *areq);
 void af_alg_async_cb(struct crypto_async_request *_req, int err);
 unsigned int af_alg_poll(struct file *file, struct socket *sock,
                         poll_table *wait);
index f0b44c16e88f241721a4296019475abae6b7a3b0..c2bae8da642cbaef97f3de444a446a27df15dc18 100644 (file)
@@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
                            struct ahash_instance *inst);
 void ahash_free_instance(struct crypto_instance *inst);
 
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+                   unsigned int keylen);
+
+static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+{
+       return alg->setkey != shash_no_setkey;
+}
+
 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
                            struct hash_alg_common *alg,
                            struct crypto_instance *inst);
index df9807a3caaea4d9f77e0797e594f3a8b12ddd11..5971577016a2b09bd484883e609860248ec57387 100644 (file)
@@ -24,6 +24,7 @@
 #define __DRM_CONNECTOR_H__
 
 #include <linux/list.h>
+#include <linux/llist.h>
 #include <linux/ctype.h>
 #include <linux/hdmi.h>
 #include <drm/drm_mode_object.h>
@@ -916,6 +917,15 @@ struct drm_connector {
        uint8_t num_h_tile, num_v_tile;
        uint8_t tile_h_loc, tile_v_loc;
        uint16_t tile_h_size, tile_v_size;
+
+       /**
+        * @free_node:
+        *
+        * List used only by &drm_connector_iter to be able to clean up a
+        * connector from any context, in conjunction with
+        * &drm_mode_config.connector_free_work.
+        */
+       struct llist_node free_node;
 };
 
 #define obj_to_connector(x) container_of(x, struct drm_connector, base)
index 6f35909b8add3221fe8ad4b4c108e5a1498c7f08..efe6d5a8e834168a82ded86e7c93341d93305b2d 100644 (file)
@@ -362,7 +362,8 @@ void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
                                   const struct drm_display_mode *mode,
                                   enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable);
+                                  bool rgb_quant_range_selectable,
+                                  bool is_hdmi2_sink);
 
 /**
  * drm_eld_mnl - Get ELD monitor name length in bytes.
@@ -464,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
                                     struct i2c_adapter *adapter);
 struct edid *drm_edid_duplicate(const struct edid *edid);
+void drm_reset_display_info(struct drm_connector *connector);
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
 int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 
 u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
index b21e827c5c78775742533d28f3baebfd6e0a9b5e..b0ce26d71296df77c835c6520a43c99b3b93b363 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/types.h>
 #include <linux/idr.h>
 #include <linux/workqueue.h>
+#include <linux/llist.h>
 
 #include <drm/drm_modeset_lock.h>
 
@@ -393,7 +394,7 @@ struct drm_mode_config {
 
        /**
         * @connector_list_lock: Protects @num_connector and
-        * @connector_list.
+        * @connector_list and @connector_free_list.
         */
        spinlock_t connector_list_lock;
        /**
@@ -413,6 +414,21 @@ struct drm_mode_config {
         * &struct drm_connector_list_iter to walk this list.
         */
        struct list_head connector_list;
+       /**
+        * @connector_free_list:
+        *
+        * List of connector objects linked with &drm_connector.free_head.
+        * Protected by @connector_list_lock. Used by
+        * drm_for_each_connector_iter() and
+        * &struct drm_connector_list_iter to savely free connectors using
+        * @connector_free_work.
+        */
+       struct llist_head connector_free_list;
+       /**
+        * @connector_free_work: Work to clean up @connector_free_list.
+        */
+       struct work_struct connector_free_work;
+
        /**
         * @num_encoder:
         *
index 38a2b4770c35f0d440867851a3424d706588d989..593811362a9172fd49fa14f62567af2ee636acc6 100644 (file)
@@ -58,12 +58,21 @@ int ttm_pool_populate(struct ttm_tt *ttm);
  */
 void ttm_pool_unpopulate(struct ttm_tt *ttm);
 
+/**
+ * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
+ */
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+
+/**
+ * Unpopulates and DMA unmaps pages as part of a
+ * ttm_dma_unpopulate() request */
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
+
 /**
  * Output the state of pools to debugfs file
  */
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
 
-
 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 /**
  * Initialize pool allocator.
@@ -83,17 +92,6 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
 #else
 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
                                          unsigned max_pages)
@@ -116,16 +114,6 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
                                      struct device *dev)
 {
 }
-
-static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-       return -ENOMEM;
-}
-
-static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-}
-
 #endif
 
 #endif
index 01ee473517e25b7a9cda5531c5f62411da6d17d4..9da6ce22803f03fc318a7fdd33af380eae67d4e6 100644 (file)
@@ -62,7 +62,7 @@ struct arch_timer_cpu {
        bool                    enabled;
 };
 
-int kvm_timer_hyp_init(void);
+int kvm_timer_hyp_init(bool);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void);
 #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
 #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
 
-void enable_el1_phys_timer_access(void);
-void disable_el1_phys_timer_access(void);
-
 #endif
index 34dba516ef24ba622c924d82be90d74f9b6d423e..8c896540a72cf4e933556627fa04bce0bf1d2ce3 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/list.h>
 #include <linux/jump_label.h>
 
+#include <linux/irqchip/arm-gic-v4.h>
+
 #define VGIC_V3_MAX_CPUS       255
 #define VGIC_V2_MAX_CPUS       8
 #define VGIC_NR_IRQS_LEGACY     256
@@ -73,6 +75,9 @@ struct vgic_global {
        /* Only needed for the legacy KVM_CREATE_IRQCHIP */
        bool                    can_emulate_gicv2;
 
+       /* Hardware has GICv4? */
+       bool                    has_gicv4;
+
        /* GIC system register CPU interface */
        struct static_key_false gicv3_cpuif;
 
@@ -116,6 +121,7 @@ struct vgic_irq {
        bool hw;                        /* Tied to HW IRQ */
        struct kref refcount;           /* Used for LPIs */
        u32 hwintid;                    /* HW INTID number */
+       unsigned int host_irq;          /* linux irq corresponding to hwintid */
        union {
                u8 targets;                     /* GICv2 target VCPUs mask */
                u32 mpidr;                      /* GICv3 target VCPU */
@@ -232,6 +238,15 @@ struct vgic_dist {
 
        /* used by vgic-debug */
        struct vgic_state_iter *iter;
+
+       /*
+        * GICv4 ITS per-VM data, containing the IRQ domain, the VPE
+        * array, the property table pointer as well as allocation
+        * data. This essentially ties the Linux IRQ core and ITS
+        * together, and avoids leaking KVM's data structures anywhere
+        * else.
+        */
+       struct its_vm           its_vm;
 };
 
 struct vgic_v2_cpu_if {
@@ -250,6 +265,14 @@ struct vgic_v3_cpu_if {
        u32             vgic_ap0r[4];
        u32             vgic_ap1r[4];
        u64             vgic_lr[VGIC_V3_MAX_LRS];
+
+       /*
+        * GICv4 ITS per-VPE data, containing the doorbell IRQ, the
+        * pending table pointer, the its_vm pointer and a few other
+        * HW specific things. As for the its_vm structure, this is
+        * linking the Linux IRQ subsystem and the ITS together.
+        */
+       struct its_vpe  its_vpe;
 };
 
 struct vgic_cpu {
@@ -307,9 +330,10 @@ void kvm_vgic_init_cpu_hardware(void);
 
 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
                        bool level, void *owner);
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+                         u32 vintid);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
 
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
 
@@ -349,4 +373,15 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
 
 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
 
+struct kvm_kernel_irq_routing_entry;
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
+                              struct kvm_kernel_irq_routing_entry *irq_entry);
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
+                                struct kvm_kernel_irq_routing_entry *irq_entry);
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
+
 #endif /* __KVM_ARM_VGIC_H */
index 82f0c8fd7be8fd20951af806319f64f615f4ffc6..23d29b39f71e83e8a6a25540adc2e3f28702aec7 100644 (file)
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 #define bio_set_dev(bio, bdev)                         \
 do {                                           \
+       if ((bio)->bi_disk != (bdev)->bd_disk)  \
+               bio_clear_flag(bio, BIO_THROTTLED);\
        (bio)->bi_disk = (bdev)->bd_disk;       \
        (bio)->bi_partno = (bdev)->bd_partno;   \
 } while (0)
index a1e628e032dad75bf1837a25e45b55a7f54ca2df..9e7d8bd776d227d2ba92b137af7230300f5b1d4a 100644 (file)
@@ -50,8 +50,6 @@ struct blk_issue_stat {
 struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct gendisk          *bi_disk;
-       u8                      bi_partno;
-       blk_status_t            bi_status;
        unsigned int            bi_opf;         /* bottom bits req flags,
                                                 * top bits REQ_OP. Use
                                                 * accessors.
@@ -59,8 +57,8 @@ struct bio {
        unsigned short          bi_flags;       /* status, etc and bvec pool number */
        unsigned short          bi_ioprio;
        unsigned short          bi_write_hint;
-
-       struct bvec_iter        bi_iter;
+       blk_status_t            bi_status;
+       u8                      bi_partno;
 
        /* Number of segments in this BIO after
         * physical address coalescing is performed.
@@ -74,8 +72,9 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
-       atomic_t                __bi_remaining;
+       struct bvec_iter        bi_iter;
 
+       atomic_t                __bi_remaining;
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
index 8089ca17db9ac65998ec9cf82f65743bb5c5abb9..0ce8a372d5069a7aca7810429a968d20e923d3d1 100644 (file)
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
 struct request {
        struct list_head queuelist;
        union {
-               call_single_data_t csd;
+               struct __call_single_data csd;
                u64 fifo_time;
        };
 
@@ -241,14 +241,24 @@ struct request {
        struct request *next_rq;
 };
 
+static inline bool blk_op_is_scsi(unsigned int op)
+{
+       return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_op_is_private(unsigned int op)
+{
+       return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+}
+
 static inline bool blk_rq_is_scsi(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+       return blk_op_is_scsi(req_op(rq));
 }
 
 static inline bool blk_rq_is_private(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+       return blk_op_is_private(req_op(rq));
 }
 
 static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
        return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
 }
 
+static inline bool bio_is_passthrough(struct bio *bio)
+{
+       unsigned op = bio_op(bio);
+
+       return blk_op_is_scsi(op) || blk_op_is_private(op);
+}
+
 static inline unsigned short req_get_ioprio(struct request *req)
 {
        return req->ioprio;
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 extern void blk_rq_unprep_clone(struct request *rq);
 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
+extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_queue_split(struct request_queue *, struct bio **);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
index c561b986bab0ebf886000ea34e377ea789138a3b..1632bb13ad8aed8cfeba2ccc69cfa458d02540bb 100644 (file)
  * In practice this is far bigger than any realistic pointer offset; this limit
  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
  */
-#define BPF_MAX_VAR_OFF        (1ULL << 31)
+#define BPF_MAX_VAR_OFF        (1 << 29)
 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
  * that converting umax_value to int cannot overflow.
  */
-#define BPF_MAX_VAR_SIZ        INT_MAX
+#define BPF_MAX_VAR_SIZ        (1 << 29)
 
 /* Liveness marks, used for registers and spilled-regs (in stack slots).
  * Read marks propagate upwards until they find a write mark; they record that
index 3672353a0acda884be51fd3debba26ea50f43b09..52e611ab9a6cf6fde23dae53784f01bfb06ce448 100644 (file)
@@ -88,17 +88,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 
 /* Unreachable code */
 #ifdef CONFIG_STACK_VALIDATION
+/*
+ * These macros help objtool understand GCC code flow for unreachable code.
+ * The __COUNTER__ based labels are a hack to make each instance of the macros
+ * unique, to convince GCC not to merge duplicate inline asm statements.
+ */
 #define annotate_reachable() ({                                                \
-       asm("%c0:\n\t"                                                  \
-           ".pushsection .discard.reachable\n\t"                       \
-           ".long %c0b - .\n\t"                                        \
-           ".popsection\n\t" : : "i" (__COUNTER__));                   \
+       asm volatile("%c0:\n\t"                                         \
+                    ".pushsection .discard.reachable\n\t"              \
+                    ".long %c0b - .\n\t"                               \
+                    ".popsection\n\t" : : "i" (__COUNTER__));          \
 })
 #define annotate_unreachable() ({                                      \
-       asm("%c0:\n\t"                                                  \
-           ".pushsection .discard.unreachable\n\t"                     \
-           ".long %c0b - .\n\t"                                        \
-           ".popsection\n\t" : : "i" (__COUNTER__));                   \
+       asm volatile("%c0:\n\t"                                         \
+                    ".pushsection .discard.unreachable\n\t"            \
+                    ".long %c0b - .\n\t"                               \
+                    ".popsection\n\t" : : "i" (__COUNTER__));          \
 })
 #define ASM_UNREACHABLE                                                        \
        "999:\n\t"                                                      \
@@ -215,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering.  One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
  *
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
- * least two memcpy()s: one for the __builtin_memcpy() and then one for
- * the macro doing the copy of variable - '__u' allocated on the stack.
+ * These two macros will also work on aggregate data types like structs or
+ * unions. If the size of the accessed data type exceeds the word size of
+ * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
+ * fall back to memcpy(). There's at least two memcpy()s: one for the
+ * __builtin_memcpy() and then one for the macro doing the copy of variable
+ * - '__u' allocated on the stack.
  *
  * Their two major use cases are: (1) Mediating communication between
  * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
  * mutilate accesses that either do not require ordering or that interact
  * with an explicit memory barrier or atomic instruction that provides the
  * required ordering.
@@ -322,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
        compiletime_assert(__native_word(t),                            \
                "Need native word sized stores/loads for atomicity.")
 
-/*
- * Prevent the compiler from merging or refetching accesses.  The compiler
- * is also forbidden from reordering successive instances of ACCESS_ONCE(),
- * but only when the compiler is aware of some particular ordering.  One way
- * to make the compiler aware of ordering is to put the two invocations of
- * ACCESS_ONCE() in different C statements.
- *
- * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
- * on a union member will work as long as the size of the member matches the
- * size of the union and the size is smaller than word size.
- *
- * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
- * between process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
- *
- * If possible use READ_ONCE()/WRITE_ONCE() instead.
- */
-#define __ACCESS_ONCE(x) ({ \
-        __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
-       (volatile typeof(x) *)&(x); })
-#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
-
 #endif /* __LINUX_COMPILER_H */
index 0662a417febe34fb9e638857f13a6d52fcaf0d49..94a59ba7d422f4d3b4a53314d99fa4a1367fbcc4 100644 (file)
@@ -10,9 +10,6 @@
  */
 
 #include <linux/wait.h>
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-#include <linux/lockdep.h>
-#endif
 
 /*
  * struct completion - structure used to maintain state for a "completion"
 struct completion {
        unsigned int done;
        wait_queue_head_t wait;
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-       struct lockdep_map_cross map;
-#endif
 };
 
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-static inline void complete_acquire(struct completion *x)
-{
-       lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
-}
-
-static inline void complete_release(struct completion *x)
-{
-       lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
-}
-
-static inline void complete_release_commit(struct completion *x)
-{
-       lock_commit_crosslock((struct lockdep_map *)&x->map);
-}
-
-#define init_completion_map(x, m)                                      \
-do {                                                                   \
-       lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,     \
-                       (m)->name, (m)->key, 0);                                \
-       __init_completion(x);                                           \
-} while (0)
-
-#define init_completion(x)                                             \
-do {                                                                   \
-       static struct lock_class_key __key;                             \
-       lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,     \
-                       "(completion)" #x,                              \
-                       &__key, 0);                                     \
-       __init_completion(x);                                           \
-} while (0)
-#else
 #define init_completion_map(x, m) __init_completion(x)
 #define init_completion(x) __init_completion(x)
 static inline void complete_acquire(struct completion *x) {}
 static inline void complete_release(struct completion *x) {}
 static inline void complete_release_commit(struct completion *x) {}
-#endif
 
-#ifdef CONFIG_LOCKDEP_COMPLETIONS
-#define COMPLETION_INITIALIZER(work) \
-       { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
-       STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
-#else
 #define COMPLETION_INITIALIZER(work) \
        { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-#endif
 
 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
        (*({ init_completion_map(&(work), &(map)); &(work); }))
index 099058e1178b4d8529438450e28ad03b06497d32..631286535d0f126a13a366b924cae62f58d114e1 100644 (file)
@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
 extern void set_groups(struct cred *, struct group_info *);
 extern int groups_search(const struct group_info *, kgid_t);
 extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
 
 /*
  * The security context of a task
index f36ecc2a57128cdf5df582b0b7b00d125ffd7ca7..3b0ba54cc4d5b0ea9bc7a11d48b476b48e9e9e22 100644 (file)
@@ -216,6 +216,8 @@ static inline void debugfs_remove(struct dentry *dentry)
 static inline void debugfs_remove_recursive(struct dentry *dentry)
 { }
 
+const struct file_operations *debugfs_real_fops(const struct file *filp);
+
 static inline int debugfs_file_get(struct dentry *dentry)
 {
        return 0;
index e8f8e8fb244d649830dfc499163a1e8a7d0e476f..81ed9b2d84dcc78e1b2213e9a22efd0f4f384330 100644 (file)
@@ -704,7 +704,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
        return ret;
 }
 
-#ifdef CONFIG_HAS_DMA
 static inline int dma_get_cache_alignment(void)
 {
 #ifdef ARCH_DMA_MINALIGN
@@ -712,7 +711,6 @@ static inline int dma_get_cache_alignment(void)
 #endif
        return 1;
 }
-#endif
 
 /* flags for the coherent memory api */
 #define DMA_MEMORY_EXCLUSIVE           0x01
index 2995a271ec466c54117025cf819ce65931d5166c..511fbaabf6248b67220c16653e491f74e3f046e7 100644 (file)
@@ -1872,7 +1872,7 @@ struct super_operations {
  */
 #define __IS_FLG(inode, flg)   ((inode)->i_sb->s_flags & (flg))
 
-static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & MS_RDONLY; }
+static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
 #define IS_RDONLY(inode)       sb_rdonly((inode)->i_sb)
 #define IS_SYNC(inode)         (__IS_FLG(inode, SB_SYNCHRONOUS) || \
                                        ((inode)->i_flags & S_SYNC))
@@ -3088,7 +3088,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
 static inline int vfs_fstatat(int dfd, const char __user *filename,
                              struct kstat *stat, int flags)
 {
-       return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+       return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+                        stat, STATX_BASIC_STATS);
 }
 static inline int vfs_fstat(int fd, struct kstat *stat)
 {
@@ -3194,6 +3195,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
        return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
 }
 
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+       struct inode *inode;
+
+       if (!vma->vm_file)
+               return false;
+       if (!vma_is_dax(vma))
+               return false;
+       inode = file_inode(vma->vm_file);
+       if (inode->i_mode == S_IFCHR)
+               return false; /* device-dax */
+       return true;
+}
+
 static inline int iocb_flags(struct file *file)
 {
        int res = 0;
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
deleted file mode 100644 (file)
index 127c39d..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_HTIRQ_H
-#define LINUX_HTIRQ_H
-
-struct pci_dev;
-struct irq_data;
-
-struct ht_irq_msg {
-       u32     address_lo;     /* low 32 bits of the ht irq message */
-       u32     address_hi;     /* high 32 bits of the it irq message */
-};
-
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
-                              struct ht_irq_msg *msg);
-
-struct ht_irq_cfg {
-       struct pci_dev *dev;
-        /* Update callback used to cope with buggy hardware */
-       ht_irq_update_t *update;
-       unsigned pos;
-       unsigned idx;
-       struct ht_irq_msg msg;
-};
-
-/* Helper functions.. */
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void mask_ht_irq(struct irq_data *data);
-void unmask_ht_irq(struct irq_data *data);
-
-/* The arch hook for getting things started */
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
-                     ht_irq_update_t *update);
-void arch_teardown_ht_irq(unsigned int irq);
-
-/* For drivers of buggy hardware */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
-
-#endif /* LINUX_HTIRQ_H */
index fbf5b31d47eea91925b9275b9f7fa2784cd5fe56..82a25880714ac69860322edc3e69b4a81b83fb62 100644 (file)
@@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
 }
 #endif
 
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
-       BUG();
-       return 0;
-}
-#endif
-
 #define HUGETLB_ANON_FILE "anon_hugepage"
 
 enum {
index f3e97c5f94c96bbaf4ef338f9bad6145f552b1be..6c9336626592b0a07e4216ea97a72a050e7146d4 100644 (file)
@@ -708,6 +708,7 @@ struct vmbus_channel {
        u8 monitor_bit;
 
        bool rescind; /* got rescind msg */
+       struct completion rescind_event;
 
        u32 ringbuffer_gpadlhandle;
 
index 7c3a365f7e127ac29cfedc6b5debe81eb87abe74..fa14f834e4ede3a81e7e6182c889fec338b2495f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/radix-tree.h>
 #include <linux/gfp.h>
 #include <linux/percpu.h>
+#include <linux/bug.h>
 
 struct idr {
        struct radix_tree_root  idr_rt;
index 34d59bfdce2d2b12b0f6d7b61c601bfc0e2cb518..464458d20b16501ef45633c09cb2b7705e9f3f84 100644 (file)
 #define LPTIM2_OUT     "lptim2_out"
 #define LPTIM3_OUT     "lptim3_out"
 
-#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
 bool is_stm32_lptim_trigger(struct iio_trigger *trig);
 #else
 static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
 {
+#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+       pr_warn_once("stm32 lptim_trigger not linked in\n");
+#endif
        return false;
 }
 #endif
similarity index 94%
rename from include/linux/pti.h
rename to include/linux/intel-pti.h
index b3ea01a3197efa5c30768fe00ee0f24aad6941aa..2710d72de3c926c2a4e1be36a921aa23ac107c31 100644 (file)
@@ -22,8 +22,8 @@
  * interface to write out it's contents for debugging a mobile system.
  */
 
-#ifndef PTI_H_
-#define PTI_H_
+#ifndef LINUX_INTEL_PTI_H_
+#define LINUX_INTEL_PTI_H_
 
 /* offset for last dword of any PTI message. Part of MIPI P1149.7 */
 #define PTI_LASTDWORD_DTS      0x30
@@ -40,4 +40,4 @@ struct pti_masterchannel *pti_request_masterchannel(u8 type,
                                                    const char *thread_name);
 void pti_release_masterchannel(struct pti_masterchannel *mc);
 
-#endif /*PTI_H_*/
+#endif /* LINUX_INTEL_PTI_H_ */
index cb18c6290ca87290996e636f3eda14eb03d26316..8415bf1a9776245b810c8f92fa16c98eb038a45f 100644 (file)
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
                                                 * 100: prefer care-of address
                                                 */
                                dontfrag:1,
-                               autoflowlabel:1;
+                               autoflowlabel:1,
+                               autoflowlabel_set:1;
        __u8                    min_hopcount;
        __u8                    tclass;
        __be32                  rcv_flowinfo;
index b01d06db9101ae73b08952ba6e11abf86c1bbb6d..e140f69163b693b386bdc709719b4efc3d8a30b0 100644 (file)
@@ -211,6 +211,7 @@ struct irq_data {
  * IRQD_MANAGED_SHUTDOWN       - Interrupt was shutdown due to empty affinity
  *                               mask. Applies only to affinity managed irqs.
  * IRQD_SINGLE_TARGET          - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET    - Expected trigger already been set
  */
 enum {
        IRQD_TRIGGER_MASK               = 0xf,
@@ -231,6 +232,7 @@ enum {
        IRQD_IRQ_STARTED                = (1 << 22),
        IRQD_MANAGED_SHUTDOWN           = (1 << 23),
        IRQD_SINGLE_TARGET              = (1 << 24),
+       IRQD_DEFAULT_TRIGGER_SET        = (1 << 25),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -260,18 +262,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d)
        __irqd_to_state(d) |= IRQD_AFFINITY_SET;
 }
 
+static inline bool irqd_trigger_type_was_set(struct irq_data *d)
+{
+       return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
+}
+
 static inline u32 irqd_get_trigger_type(struct irq_data *d)
 {
        return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
 }
 
 /*
- * Must only be called inside irq_chip.irq_set_type() functions.
+ * Must only be called inside irq_chip.irq_set_type() functions or
+ * from the DT/ACPI setup code.
  */
 static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
 {
        __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
        __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
+       __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
 }
 
 static inline bool irqd_is_level_type(struct irq_data *d)
index 447da8ca2156221749876d53c84430c62e729980..fa683ea5c7692ef4ef0c743b619a9ee61ebd9317 100644 (file)
@@ -109,6 +109,7 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map);
 int its_unmap_vlpi(int irq);
 int its_prop_update_vlpi(int irq, u8 config, bool inv);
 
+struct irq_domain_ops;
 int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
 
 #endif
index dd418955962bc23a6e30af9c2edd3fed57118f69..39fb3700f7a92aae1a6c3d417f5effbfb93d6494 100644 (file)
@@ -230,7 +230,7 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
        data->chip = chip;
 }
 
-static inline int irq_balancing_disabled(unsigned int irq)
+static inline bool irq_balancing_disabled(unsigned int irq)
 {
        struct irq_desc *desc;
 
@@ -238,7 +238,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
        return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
 }
 
-static inline int irq_is_percpu(unsigned int irq)
+static inline bool irq_is_percpu(unsigned int irq)
 {
        struct irq_desc *desc;
 
@@ -246,7 +246,7 @@ static inline int irq_is_percpu(unsigned int irq)
        return desc->status_use_accessors & IRQ_PER_CPU;
 }
 
-static inline int irq_is_percpu_devid(unsigned int irq)
+static inline bool irq_is_percpu_devid(unsigned int irq)
 {
        struct irq_desc *desc;
 
index 708f337d780be3ee4c628fe0422e3a6752d0fb33..bd118a6c60cbf8c5dd28478239e44550d899b57c 100644 (file)
 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
                         2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
-#ifndef CONFIG_64BIT
-# define KALLSYM_FMT "%08lx"
-#else
-# define KALLSYM_FMT "%016lx"
-#endif
-
 struct module;
 
 #ifdef CONFIG_KALLSYMS
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index 3203e36b2ee81f746b6d87c16701cdc567274ebd..c1961761311dbfd5968d6ed64ea91ca3c7d25b0e 100644 (file)
@@ -118,8 +118,7 @@ struct kthread_delayed_work {
 
 #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) {                         \
        .work = KTHREAD_WORK_INIT((dwork).work, (fn)),                  \
-       .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
-                                    (TIMER_DATA_TYPE)&(dwork.timer),   \
+       .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\
                                     TIMER_IRQSAFE),                    \
        }
 
@@ -165,10 +164,9 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
 #define kthread_init_delayed_work(dwork, fn)                           \
        do {                                                            \
                kthread_init_work(&(dwork)->work, (fn));                \
-               __setup_timer(&(dwork)->timer,                          \
-                             (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
-                             (TIMER_DATA_TYPE)&(dwork)->timer,         \
-                             TIMER_IRQSAFE);                           \
+               __init_timer(&(dwork)->timer,                           \
+                            kthread_delayed_work_timer_fn,             \
+                            TIMER_IRQSAFE);                            \
        } while (0)
 
 int kthread_worker_fn(void *worker_ptr);
index 2e754b7c282c8324778b60e7ea57940d9f72c22d..6bdd4b9f661154c386754654db238d13e0a13a31 100644 (file)
@@ -232,7 +232,7 @@ struct kvm_vcpu {
        struct mutex mutex;
        struct kvm_run *run;
 
-       int guest_fpu_loaded, guest_xcr0_loaded;
+       int guest_xcr0_loaded;
        struct swait_queue_head wq;
        struct pid __rcu *pid;
        int sigset_active;
@@ -715,6 +715,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
                         unsigned long len);
 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu);
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
similarity index 100%
rename from include/lib/libgcc.h
rename to include/linux/libgcc.h
index a842551fe0449a38df5510a39d3fdc4760caa0ee..2e75dc34bff5cd3e468571918329793eaf7a0911 100644 (file)
@@ -158,12 +158,6 @@ struct lockdep_map {
        int                             cpu;
        unsigned long                   ip;
 #endif
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-       /*
-        * Whether it's a crosslock.
-        */
-       int                             cross;
-#endif
 };
 
 static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,95 +261,8 @@ struct held_lock {
        unsigned int hardirqs_off:1;
        unsigned int references:12;                                     /* 32 bits */
        unsigned int pin_count;
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-       /*
-        * Generation id.
-        *
-        * A value of cross_gen_id will be stored when holding this,
-        * which is globally increased whenever each crosslock is held.
-        */
-       unsigned int gen_id;
-#endif
-};
-
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-#define MAX_XHLOCK_TRACE_ENTRIES 5
-
-/*
- * This is for keeping locks waiting for commit so that true dependencies
- * can be added at commit step.
- */
-struct hist_lock {
-       /*
-        * Id for each entry in the ring buffer. This is used to
-        * decide whether the ring buffer was overwritten or not.
-        *
-        * For example,
-        *
-        *           |<----------- hist_lock ring buffer size ------->|
-        *           pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
-        * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
-        *
-        *           where 'p' represents an acquisition in process
-        *           context, 'i' represents an acquisition in irq
-        *           context.
-        *
-        * In this example, the ring buffer was overwritten by
-        * acquisitions in irq context, that should be detected on
-        * rollback or commit.
-        */
-       unsigned int hist_id;
-
-       /*
-        * Seperate stack_trace data. This will be used at commit step.
-        */
-       struct stack_trace      trace;
-       unsigned long           trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
-
-       /*
-        * Seperate hlock instance. This will be used at commit step.
-        *
-        * TODO: Use a smaller data structure containing only necessary
-        * data. However, we should make lockdep code able to handle the
-        * smaller one first.
-        */
-       struct held_lock        hlock;
 };
 
-/*
- * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
- * be called instead of lockdep_init_map().
- */
-struct cross_lock {
-       /*
-        * When more than one acquisition of crosslocks are overlapped,
-        * we have to perform commit for them based on cross_gen_id of
-        * the first acquisition, which allows us to add more true
-        * dependencies.
-        *
-        * Moreover, when no acquisition of a crosslock is in progress,
-        * we should not perform commit because the lock might not exist
-        * any more, which might cause incorrect memory access. So we
-        * have to track the number of acquisitions of a crosslock.
-        */
-       int nr_acquire;
-
-       /*
-        * Seperate hlock instance. This will be used at commit step.
-        *
-        * TODO: Use a smaller data structure containing only necessary
-        * data. However, we should make lockdep code able to handle the
-        * smaller one first.
-        */
-       struct held_lock        hlock;
-};
-
-struct lockdep_map_cross {
-       struct lockdep_map map;
-       struct cross_lock xlock;
-};
-#endif
-
 /*
  * Initialization, self-test and debugging-output methods:
  */
@@ -560,37 +467,6 @@ enum xhlock_context_t {
        XHLOCK_CTX_NR,
 };
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
-                                      const char *name,
-                                      struct lock_class_key *key,
-                                      int subclass);
-extern void lock_commit_crosslock(struct lockdep_map *lock);
-
-/*
- * What we essencially have to initialize is 'nr_acquire'. Other members
- * will be initialized in add_xlock().
- */
-#define STATIC_CROSS_LOCK_INIT() \
-       { .nr_acquire = 0,}
-
-#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
-       { .map.name = (_name), .map.key = (void *)(_key), \
-         .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
-
-/*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
- */
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
-       { .name = (_name), .key = (void *)(_key), .cross = 0, }
-
-extern void crossrelease_hist_start(enum xhlock_context_t c);
-extern void crossrelease_hist_end(enum xhlock_context_t c);
-extern void lockdep_invariant_state(bool force);
-extern void lockdep_init_task(struct task_struct *task);
-extern void lockdep_free_task(struct task_struct *task);
-#else /* !CROSSRELEASE */
 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
 /*
  * To initialize a lockdep_map statically use this macro.
@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
 static inline void lockdep_invariant_state(bool force) {}
 static inline void lockdep_init_task(struct task_struct *task) {}
 static inline void lockdep_free_task(struct task_struct *task) {}
-#endif /* CROSSRELEASE */
 
 #ifdef CONFIG_LOCK_STAT
 
index a2a1318a3d0c8be0a1fb3d1a08fcf671ff9d8bee..c3d3f04d8cc689eddf217c0626e71d8c16530db5 100644 (file)
@@ -915,10 +915,10 @@ enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
 #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN        BIT(6)
 
 enum dev_aspm_mode {
-       DEV_ASPM_DISABLE = 0,
        DEV_ASPM_DYNAMIC,
        DEV_ASPM_BACKDOOR,
        DEV_ASPM_STATIC,
+       DEV_ASPM_DISABLE,
 };
 
 /*
index 895ec0c4942e68c43ca49f1fb77f5112344ca05a..a2246cf670badb96e6c11c4d13b233db0c93388f 100644 (file)
@@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
        new_page = __alloc_pages_nodemask(gfp_mask, order,
                                preferred_nid, nodemask);
 
-       if (new_page && PageTransHuge(page))
+       if (new_page && PageTransHuge(new_page))
                prep_transhuge_page(new_page);
 
        return new_page;
index a886b51511abbf146c4c76309aa313e4bbf77dda..57b109c6e422784dd0498c46f9a1a6e96f3ef0ca 100644 (file)
@@ -556,6 +556,7 @@ struct mlx5_core_sriov {
 };
 
 struct mlx5_irq_info {
+       cpumask_var_t mask;
        char name[MLX5_MAX_IRQ_NAME];
 };
 
@@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       enum mlx5_eq_type type);
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+void mlx5_stop_eqs(struct mlx5_core_dev *dev);
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
index 38a7577a9ce71fbcf63c21e2911364795842daa8..d44ec5f41d4a04c72b25b4db1d6fb0217f8f1fa1 100644 (file)
@@ -147,7 +147,7 @@ enum {
        MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
        MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
        MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
-       MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
+       MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
        MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
        MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
        MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
@@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
        u8         vxlan_udp_port[0x10];
 };
 
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
 
@@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
        u8         reserved_at_40[0x40];
 };
 
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
        u8         opcode[0x10];
        u8         reserved_at_10[0x10];
 
@@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
        u8         reserved_at_60[0x20];
 
        u8         rate_limit[0x20];
+
+       u8         reserved_at_a0[0x160];
 };
 
 struct mlx5_ifc_access_register_out_bits {
index ee073146aaa7c0085d4e212726be5d60ee317e5a..ea818ff739cdfbb433fc10634ed5ac77eacbc5b7 100644 (file)
@@ -377,6 +377,7 @@ enum page_entry_size {
 struct vm_operations_struct {
        void (*open)(struct vm_area_struct * area);
        void (*close)(struct vm_area_struct * area);
+       int (*split)(struct vm_area_struct * area, unsigned long addr);
        int (*mremap)(struct vm_area_struct * area);
        int (*fault)(struct vm_fault *vmf);
        int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
@@ -1379,6 +1380,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    unsigned int gup_flags, struct page **pages, int *locked);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    struct page **pages, unsigned int gup_flags);
+#ifdef CONFIG_FS_DAX
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+                           unsigned int gup_flags, struct page **pages,
+                           struct vm_area_struct **vmas);
+#else
+static inline long get_user_pages_longterm(unsigned long start,
+               unsigned long nr_pages, unsigned int gup_flags,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+#endif /* CONFIG_FS_DAX */
+
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
index 01c91d874a57f5edc7f4698b57918402a211d707..5bad038ac012e6e3047fd6a63a625c6814966d51 100644 (file)
@@ -66,6 +66,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
        return tsk->signal->oom_mm;
 }
 
+/*
+ * Use this helper if tsk->mm != mm and the victim mm needs a special
+ * handling. This is guaranteed to stay true after once set.
+ */
+static inline bool mm_is_oom_victim(struct mm_struct *mm)
+{
+       return test_bit(MMF_OOM_VICTIM, &mm->flags);
+}
+
 /*
  * Checks whether a page fault on the given mm is still reliable.
  * This is no longer true if the oom reaper started to reap the
index 96c94980d1ff383f9f45bac4ccd2f6ed6361525c..c170c9250c8b706e62e00f4b6a26149735bacba5 100644 (file)
@@ -1485,12 +1485,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
 static inline void pcie_ecrc_get_policy(char *str) { }
 #endif
 
-#ifdef CONFIG_HT_IRQ
-/* The functions a driver should call */
-int  ht_create_irq(struct pci_dev *dev, int idx);
-void ht_destroy_irq(unsigned int irq);
-#endif /* CONFIG_HT_IRQ */
-
 #ifdef CONFIG_PCI_ATS
 /* Address Translation Service */
 void pci_ats_init(struct pci_dev *dev);
@@ -1680,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
                                                unsigned int devfn)
 { return NULL; }
+static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
+                                       unsigned int bus, unsigned int devfn)
+{ return NULL; }
 
 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
index 2c9c87d8a0c18e5f5c1cf2a8e148504e4f3ad3a9..7546822a1d74f198d5c521bc2864c369e38d70e4 100644 (file)
@@ -15,6 +15,7 @@
 #define _LINUX_PERF_EVENT_H
 
 #include <uapi/linux/perf_event.h>
+#include <uapi/linux/bpf_perf_event.h>
 
 /*
  * Kernel-internal data types and definitions:
@@ -787,7 +788,7 @@ struct perf_output_handle {
 };
 
 struct bpf_perf_event_data_kern {
-       struct pt_regs *regs;
+       bpf_user_pt_regs_t *regs;
        struct perf_sample_data *data;
        struct perf_event *event;
 };
@@ -1177,6 +1178,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);
                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
 # define perf_instruction_pointer(regs)        instruction_pointer(regs)
 #endif
+#ifndef perf_arch_bpf_user_pt_regs
+# define perf_arch_bpf_user_pt_regs(regs) regs
+#endif
 
 static inline bool has_branch_stack(struct perf_event *event)
 {
index 65d39115f06d8780399e469ed44fc1c90f074e69..492ed473ba7e440ad422c3a5c16833ef47e66374 100644 (file)
@@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev);
 extern int pm_generic_poweroff(struct device *dev);
 extern void pm_generic_complete(struct device *dev);
 
+extern void dev_pm_skip_next_resume_phases(struct device *dev);
 extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
 
 #else /* !CONFIG_PM_SLEEP */
index 37b4bb2545b32dc82633df6b76e629b9999f900b..6866df4f31b59da506d56b3db29501a56d813635 100644 (file)
@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
 
 /* Note: callers invoking this in a loop must use a compiler barrier,
  * for example cpu_relax(). Callers must hold producer_lock.
+ * Callers are responsible for making sure pointer that is being queued
+ * points to a valid data.
  */
 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
 {
        if (unlikely(!r->size) || r->queue[r->producer])
                return -ENOSPC;
 
+       /* Make sure the pointer we are storing points to a valid data. */
+       /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
+       smp_wmb();
+
        r->queue[r->producer++] = ptr;
        if (unlikely(r->producer >= r->size))
                r->producer = 0;
@@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
        if (ptr)
                __ptr_ring_discard_one(r);
 
+       /* Make sure anyone accessing data through the pointer is up to date. */
+       /* Pairs with smp_wmb in __ptr_ring_produce. */
+       smp_read_barrier_depends();
        return ptr;
 }
 
index d574361943ea832532cf93177e97a8c62d6c3430..fcbeed4053efbba81993fea4151a8a6a6cd6f5d1 100644 (file)
@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
                            struct rb_root *root);
 extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
                                struct rb_root *root);
+extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+                                  struct rb_root_cached *root);
 
 static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
                                struct rb_node **rb_link)
index a328e8181e49f3a0947dd713daeef35b9d7c831f..e4b257ff881bfe439a945d7487f5700f17a26740 100644 (file)
@@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
                first->pprev = &n->next;
 }
 
-/**
- * hlist_nulls_add_tail_rcu
- * @n: the element to add to the hash list.
- * @h: the list to add to.
- *
- * Description:
- * Adds the specified element to the end of the specified hlist_nulls,
- * while permitting racing traversals.  NOTE: tail insertion requires
- * list traversal.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
- * or hlist_nulls_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.  Regardless of the type of CPU, the
- * list-traversal primitive must be guarded by rcu_read_lock().
- */
-static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
-                                       struct hlist_nulls_head *h)
-{
-       struct hlist_nulls_node *i, *last = NULL;
-
-       for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
-            i = hlist_nulls_next_rcu(i))
-               last = i;
-
-       if (last) {
-               n->next = last->next;
-               n->pprev = &last->next;
-               rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
-       } else {
-               hlist_nulls_add_head_rcu(n, h);
-       }
-}
-
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
index cc0072e93e360722f40a19928c001a1feab272bb..857a72ceb794252eb8cb22c55ef85d17453c34a0 100644 (file)
@@ -10,9 +10,6 @@
  */
 typedef struct {
        arch_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
-       unsigned int break_lock;
-#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        unsigned int magic, owner_cpu;
        void *owner;
index a5dc7c98b0a2e5d54814a2cd6d3ecc2ffd6f4ea1..d2588263a9893caa04d8854607207c41080927cc 100644 (file)
@@ -473,10 +473,10 @@ struct sched_dl_entity {
         * conditions between the inactive timer handler and the wakeup
         * code.
         */
-       int                             dl_throttled      : 1;
-       int                             dl_boosted        : 1;
-       int                             dl_yielded        : 1;
-       int                             dl_non_contending : 1;
+       unsigned int                    dl_throttled      : 1;
+       unsigned int                    dl_boosted        : 1;
+       unsigned int                    dl_yielded        : 1;
+       unsigned int                    dl_non_contending : 1;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
@@ -849,17 +849,6 @@ struct task_struct {
        struct held_lock                held_locks[MAX_LOCK_DEPTH];
 #endif
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-#define MAX_XHLOCKS_NR 64UL
-       struct hist_lock *xhlocks; /* Crossrelease history locks */
-       unsigned int xhlock_idx;
-       /* For restoring at history boundaries */
-       unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
-       unsigned int hist_id;
-       /* For overwrite check at each context exit */
-       unsigned int hist_id_save[XHLOCK_CTX_NR];
-#endif
-
 #ifdef CONFIG_UBSAN
        unsigned int                    in_ubsan;
 #endif
@@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
        __set_task_comm(tsk, from, false);
 }
 
-extern char *get_task_comm(char *to, struct task_struct *tsk);
+extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
+#define get_task_comm(buf, tsk) ({                     \
+       BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);     \
+       __get_task_comm(buf, sizeof(buf), tsk);         \
+})
 
 #ifdef CONFIG_SMP
 void scheduler_ipi(void);
index 9c8847395b5e15347e150d9b99337a70217b97db..ec912d01126f4b01f5ac61ebae7f73f8e0527d23 100644 (file)
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
 #define MMF_UNSTABLE           22      /* mm is unstable for copy_from_user */
 #define MMF_HUGE_ZERO_PAGE     23      /* mm has ever used the global huge zero page */
 #define MMF_DISABLE_THP                24      /* disable THP for all VMAs */
+#define MMF_OOM_VICTIM         25      /* mm is the oom victim */
 #define MMF_DISABLE_THP_MASK   (1 << MMF_DISABLE_THP)
 
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
index e69402d4a8aecbdc6fe4a40e8ad7e01957badbd6..d609e6dc5bad00bb54f9258c077dc3220cbcb938 100644 (file)
@@ -184,7 +184,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
        struct serdev_device *serdev = ctrl->serdev;
 
        if (!serdev || !serdev->ops->receive_buf)
-               return -EINVAL;
+               return 0;
 
        return serdev->ops->receive_buf(serdev, data, count);
 }
index bc486ef23f20f91ce3ed183e935399d1e4c55e18..a38c80e9f91efee011f22bc4e6755f3e4d85ff69 100644 (file)
@@ -1406,8 +1406,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
 }
 
 /*
- * If users == 1, we are the only owner and are can avoid redundant
- * atomic change.
+ * If users == 1, we are the only owner and can avoid redundant atomic changes.
  */
 
 /**
index 7b2170bfd6e7dae432478fffdbc70e1408740394..bc6bb325d1bf7c03db223c568b891e5d33dc93ca 100644 (file)
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  *     for that name.  This appears in the sysfs "modalias" attribute
  *     for driver coldplugging, and in uevents used for hotplugging
  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
- *     when not using a GPIO line)
+ *     not using a GPIO line)
  *
  * @statistics: statistics for the spi_device
  *
index a39186194cd6782ac0b6705f02af6179a86f7813..3bf273538840103c7b6c634b148b8c74ce754843 100644 (file)
@@ -107,16 +107,11 @@ do {                                                              \
 
 #define raw_spin_is_locked(lock)       arch_spin_is_locked(&(lock)->raw_lock)
 
-#ifdef CONFIG_GENERIC_LOCKBREAK
-#define raw_spin_is_contended(lock) ((lock)->break_lock)
-#else
-
 #ifdef arch_spin_is_contended
 #define raw_spin_is_contended(lock)    arch_spin_is_contended(&(lock)->raw_lock)
 #else
 #define raw_spin_is_contended(lock)    (((void)(lock), 0))
 #endif /*arch_spin_is_contended*/
-#endif
 
 /*
  * This barrier must provide two things:
index 73548eb13a5ddc82ea16c6a292a8d704471eb7ac..24b4e6f2c1a22fe2a6717f4d6076ea87d595994b 100644 (file)
@@ -19,9 +19,6 @@
 
 typedef struct raw_spinlock {
        arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
-       unsigned int break_lock;
-#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        unsigned int magic, owner_cpu;
        void *owner;
index 410ecf17de3ce591017e36a95f28de18ae54dcc1..cfd83eb2f926c74622f46ed931bb1c58277df49f 100644 (file)
@@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
 {
        __kernel_size_t ret;
        size_t p_size = __builtin_object_size(p, 0);
-       if (p_size == (size_t)-1)
+
+       /* Work around gcc excess stack consumption issue */
+       if (p_size == (size_t)-1 ||
+           (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
                return __builtin_strlen(p);
        ret = strnlen(p, p_size);
        if (p_size <= ret)
index 270bad0e1bed137e9ce44727d9d79fde830e0006..40d2822f0e2f1d1a6aa1a84e2ec820a8f9df392e 100644 (file)
@@ -213,7 +213,7 @@ extern void __init cache_initialize(void);
 extern int cache_register_net(struct cache_detail *cd, struct net *net);
 extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
 
-extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net);
+extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);
 extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
 
 extern void sunrpc_init_cache_detail(struct cache_detail *cd);
index e32dfe098e822f559810d785a685e05054974cc0..40839c02d28c04389218a72012f784efc8ca5ff8 100644 (file)
@@ -117,6 +117,12 @@ struct attribute_group {
        .show   = _name##_show,                                         \
 }
 
+#define __ATTR_RO_MODE(_name, _mode) {                                 \
+       .attr   = { .name = __stringify(_name),                         \
+                   .mode = VERIFY_OCTAL_PERMISSIONS(_mode) },          \
+       .show   = _name##_show,                                         \
+}
+
 #define __ATTR_WO(_name) {                                             \
        .attr   = { .name = __stringify(_name), .mode = S_IWUSR },      \
        .store  = _name##_store,                                        \
index df5d97a85e1a6674e76dcc51ef54a250cbbdb539..ca4a6361389b8a3b268ca5b0f4778662a1f7d315 100644 (file)
@@ -224,7 +224,8 @@ struct tcp_sock {
                rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */
                fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
                fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
-               unused:3;
+               is_sack_reneg:1,    /* in recovery from loss with SACK reneg? */
+               unused:2;
        u8      nonagle     : 4,/* Disable Nagle algorithm?             */
                thin_lto    : 1,/* Use linear timeouts for thin streams */
                unused1     : 1,
index 7e9011101cb08f674a5c2ba698be5f67367a464c..d315c3d6725c499bde572fa61ab5068007c0ba5d 100644 (file)
@@ -136,13 +136,6 @@ struct timekeeper {
 extern void update_vsyscall(struct timekeeper *tk);
 extern void update_vsyscall_tz(void);
 
-#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
-
-extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
-                               struct clocksource *c, u32 mult,
-                               u64 cycle_last);
-extern void update_vsyscall_tz(void);
-
 #else
 
 static inline void update_vsyscall(struct timekeeper *tk)
index c198ab40c04fb37174e7dbd683abe82715dcfb16..b17bcce58bc493eb70580e483bf20fea54ab2fe7 100644 (file)
@@ -142,12 +142,6 @@ extern bool timekeeping_rtc_skipresume(void);
 
 extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
 
-/*
- * PPS accessor
- */
-extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
-                                       struct timespec64 *ts_real);
-
 /*
  * struct system_time_snapshot - simultaneous raw/real time capture with
  *     counter value
index bf781acfc6d820f555eefed29dbcdc7c8c4f7033..04af640ea95bd011cdec0101798b0aa7810233cb 100644 (file)
@@ -17,8 +17,7 @@ struct timer_list {
         */
        struct hlist_node       entry;
        unsigned long           expires;
-       void                    (*function)(unsigned long);
-       unsigned long           data;
+       void                    (*function)(struct timer_list *);
        u32                     flags;
 
 #ifdef CONFIG_LOCKDEP
@@ -64,13 +63,9 @@ struct timer_list {
 
 #define TIMER_TRACE_FLAGMASK   (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
 
-#define TIMER_DATA_TYPE                unsigned long
-#define TIMER_FUNC_TYPE                void (*)(TIMER_DATA_TYPE)
-
-#define __TIMER_INITIALIZER(_function, _data, _flags) {                \
+#define __TIMER_INITIALIZER(_function, _flags) {               \
                .entry = { .next = TIMER_ENTRY_STATIC },        \
                .function = (_function),                        \
-               .data = (_data),                                \
                .flags = (_flags),                              \
                __TIMER_LOCKDEP_MAP_INITIALIZER(                \
                        __FILE__ ":" __stringify(__LINE__))     \
@@ -78,108 +73,71 @@ struct timer_list {
 
 #define DEFINE_TIMER(_name, _function)                         \
        struct timer_list _name =                               \
-               __TIMER_INITIALIZER((TIMER_FUNC_TYPE)_function, 0, 0)
+               __TIMER_INITIALIZER(_function, 0)
 
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+/*
+ * LOCKDEP and DEBUG timer interfaces.
+ */
+void init_timer_key(struct timer_list *timer,
+                   void (*func)(struct timer_list *), unsigned int flags,
                    const char *name, struct lock_class_key *key);
 
 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 extern void init_timer_on_stack_key(struct timer_list *timer,
+                                   void (*func)(struct timer_list *),
                                    unsigned int flags, const char *name,
                                    struct lock_class_key *key);
-extern void destroy_timer_on_stack(struct timer_list *timer);
 #else
-static inline void destroy_timer_on_stack(struct timer_list *timer) { }
 static inline void init_timer_on_stack_key(struct timer_list *timer,
-                                          unsigned int flags, const char *name,
+                                          void (*func)(struct timer_list *),
+                                          unsigned int flags,
+                                          const char *name,
                                           struct lock_class_key *key)
 {
-       init_timer_key(timer, flags, name, key);
+       init_timer_key(timer, func, flags, name, key);
 }
 #endif
 
 #ifdef CONFIG_LOCKDEP
-#define __init_timer(_timer, _flags)                                   \
+#define __init_timer(_timer, _fn, _flags)                              \
        do {                                                            \
                static struct lock_class_key __key;                     \
-               init_timer_key((_timer), (_flags), #_timer, &__key);    \
+               init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
        } while (0)
 
-#define __init_timer_on_stack(_timer, _flags)                          \
+#define __init_timer_on_stack(_timer, _fn, _flags)                     \
        do {                                                            \
                static struct lock_class_key __key;                     \
-               init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \
+               init_timer_on_stack_key((_timer), (_fn), (_flags),      \
+                                       #_timer, &__key);                \
        } while (0)
 #else
-#define __init_timer(_timer, _flags)                                   \
-       init_timer_key((_timer), (_flags), NULL, NULL)
-#define __init_timer_on_stack(_timer, _flags)                          \
-       init_timer_on_stack_key((_timer), (_flags), NULL, NULL)
+#define __init_timer(_timer, _fn, _flags)                              \
+       init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __init_timer_on_stack(_timer, _fn, _flags)                     \
+       init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL)
 #endif
 
-#define init_timer(timer)                                              \
-       __init_timer((timer), 0)
-
-#define __setup_timer(_timer, _fn, _data, _flags)                      \
-       do {                                                            \
-               __init_timer((_timer), (_flags));                       \
-               (_timer)->function = (_fn);                             \
-               (_timer)->data = (_data);                               \
-       } while (0)
-
-#define __setup_timer_on_stack(_timer, _fn, _data, _flags)             \
-       do {                                                            \
-               __init_timer_on_stack((_timer), (_flags));              \
-               (_timer)->function = (_fn);                             \
-               (_timer)->data = (_data);                               \
-       } while (0)
-
-#define setup_timer(timer, fn, data)                                   \
-       __setup_timer((timer), (fn), (data), 0)
-#define setup_pinned_timer(timer, fn, data)                            \
-       __setup_timer((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer(timer, fn, data)                                \
-       __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer(timer, fn, data)                 \
-       __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
-#define setup_timer_on_stack(timer, fn, data)                          \
-       __setup_timer_on_stack((timer), (fn), (data), 0)
-#define setup_pinned_timer_on_stack(timer, fn, data)                   \
-       __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer_on_stack(timer, fn, data)               \
-       __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer_on_stack(timer, fn, data)                \
-       __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
+/**
+ * timer_setup - prepare a timer for first use
+ * @timer: the timer in question
+ * @callback: the function to call when timer expires
+ * @flags: any TIMER_* flags
+ *
+ * Regular timer initialization should use either DEFINE_TIMER() above,
+ * or timer_setup(). For timers on the stack, timer_setup_on_stack() must
+ * be used and must be balanced with a call to destroy_timer_on_stack().
+ */
+#define timer_setup(timer, callback, flags)                    \
+       __init_timer((timer), (callback), (flags))
 
-#ifndef CONFIG_LOCKDEP
-static inline void timer_setup(struct timer_list *timer,
-                              void (*callback)(struct timer_list *),
-                              unsigned int flags)
-{
-       __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
-                     (TIMER_DATA_TYPE)timer, flags);
-}
+#define timer_setup_on_stack(timer, callback, flags)           \
+       __init_timer_on_stack((timer), (callback), (flags))
 
-static inline void timer_setup_on_stack(struct timer_list *timer,
-                              void (*callback)(struct timer_list *),
-                              unsigned int flags)
-{
-       __setup_timer_on_stack(timer, (TIMER_FUNC_TYPE)callback,
-                              (TIMER_DATA_TYPE)timer, flags);
-}
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void destroy_timer_on_stack(struct timer_list *timer);
 #else
-/*
- * Under LOCKDEP, the timer lock_class_key (set up in __init_timer) needs
- * to be tied to the caller's context, so an inline (above) won't work. We
- * do want to keep the inline for argument type checking, though.
- */
-# define timer_setup(timer, callback, flags)                           \
-               __setup_timer((timer), (TIMER_FUNC_TYPE)(callback),     \
-                             (TIMER_DATA_TYPE)(timer), (flags))
-# define timer_setup_on_stack(timer, callback, flags)                  \
-               __setup_timer_on_stack((timer),                         \
-                                      (TIMER_FUNC_TYPE)(callback),     \
-                                      (TIMER_DATA_TYPE)(timer), (flags))
+static inline void destroy_timer_on_stack(struct timer_list *timer) { }
 #endif
 
 #define from_timer(var, callback_timer, timer_fieldname) \
index d24991c1fef33343d7b7bc65d6b9ec67d8d9f575..b95ffb2188abaaac85dc992c6545d33b946495d1 100644 (file)
@@ -18,7 +18,7 @@
  */
 struct trace_export {
        struct trace_export __rcu       *next;
-       void (*write)(const void *, unsigned int);
+       void (*write)(struct trace_export *, const void *, unsigned int);
 };
 
 int register_ftrace_export(struct trace_export *export);
index a69877734c4eb033b7a7a7f2989e1da241793782..e2ec3582e54937d3818afed3e253440fc23541a0 100644 (file)
@@ -82,6 +82,7 @@ struct usbnet {
 #              define EVENT_RX_KILL    10
 #              define EVENT_LINK_CHANGE        11
 #              define EVENT_SET_RX_MODE        12
+#              define EVENT_NO_IP_ALIGN        13
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index 01a050fc6650ab055315d1dd00f5309bbe263450..4a54ef96aff5b0ba8e9bf53cc754d09c0dd4dd55 100644 (file)
@@ -176,8 +176,7 @@ struct execute_work {
 
 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {                     \
        .work = __WORK_INITIALIZER((n).work, (f)),                      \
-       .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\
-                                    (TIMER_DATA_TYPE)&(n.timer),       \
+       .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
                                     (tflags) | TIMER_IRQSAFE),         \
        }
 
@@ -242,19 +241,17 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 #define __INIT_DELAYED_WORK(_work, _func, _tflags)                     \
        do {                                                            \
                INIT_WORK(&(_work)->work, (_func));                     \
-               __setup_timer(&(_work)->timer,                          \
-                             (TIMER_FUNC_TYPE)delayed_work_timer_fn,   \
-                             (TIMER_DATA_TYPE)&(_work)->timer,         \
-                             (_tflags) | TIMER_IRQSAFE);               \
+               __init_timer(&(_work)->timer,                           \
+                            delayed_work_timer_fn,                     \
+                            (_tflags) | TIMER_IRQSAFE);                \
        } while (0)
 
 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)             \
        do {                                                            \
                INIT_WORK_ONSTACK(&(_work)->work, (_func));             \
-               __setup_timer_on_stack(&(_work)->timer,                 \
-                                      (TIMER_FUNC_TYPE)delayed_work_timer_fn,\
-                                      (TIMER_DATA_TYPE)&(_work)->timer,\
-                                      (_tflags) | TIMER_IRQSAFE);      \
+               __init_timer_on_stack(&(_work)->timer,                  \
+                                     delayed_work_timer_fn,            \
+                                     (_tflags) | TIMER_IRQSAFE);       \
        } while (0)
 
 #define INIT_DELAYED_WORK(_work, _func)                                        \
index f42d85631d1711fd0085141fc1e61b0c31cd1ddd..fdfd04e348f698b3d108228868866072164d31b7 100644 (file)
@@ -308,7 +308,7 @@ static inline void cgroup_writeback_umount(void)
 void laptop_io_completion(struct backing_dev_info *info);
 void laptop_sync_completion(void);
 void laptop_mode_sync(struct work_struct *work);
-void laptop_mode_timer_fn(unsigned long data);
+void laptop_mode_timer_fn(struct timer_list *t);
 #else
 static inline void laptop_sync_completion(void) { }
 #endif
index 8b8118a7fadbc74a9aa879dc934de0442bb3a013..cb4d92b79cd932eda4e178861d8345683b329bdb 100644 (file)
@@ -3226,7 +3226,6 @@ struct cfg80211_ops {
  * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
  * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
  *     auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
- * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
  * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
  *     firmware.
  * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
index 2fdb29ca74c2ab26068c57e0134e58b35019ac0e..fdad41469b6521bed2d666cf1cf7925579ec8111 100644 (file)
@@ -44,10 +44,10 @@ struct guehdr {
 #else
 #error  "Please fix <asm/byteorder.h>"
 #endif
-                       __u8    proto_ctype;
-                       __u16   flags;
+                       __u8    proto_ctype;
+                       __be16  flags;
                };
-               __u32 word;
+               __be32  word;
        };
 };
 
@@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags)
  * if there is an unknown standard or private flags, or the options length for
  * the flags exceeds the options length specific in hlen of the GUE header.
  */
-static inline int validate_gue_flags(struct guehdr *guehdr,
-                                    size_t optlen)
+static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen)
 {
+       __be16 flags = guehdr->flags;
        size_t len;
-       __be32 flags = guehdr->flags;
 
        if (flags & ~GUE_FLAGS_ALL)
                return 1;
@@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr,
                /* Private flags are last four bytes accounted in
                 * guehdr_flags_len
                 */
-               flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV);
+               __be32 pflags = *(__be32 *)((void *)&guehdr[1] +
+                                           len - GUE_LEN_PRIV);
 
-               if (flags & ~GUE_PFLAGS_ALL)
+               if (pflags & ~GUE_PFLAGS_ALL)
                        return 1;
 
-               len += guehdr_priv_flags_len(flags);
+               len += guehdr_priv_flags_len(pflags);
                if (len > optlen)
                        return 1;
        }
index 9896f46cbbf11235395d75a5ec18a14736ee099d..af8addbaa3c188a896b74ff9646b6fdd692d1c8e 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/flow_dissector.h>
 
 #define IPV4_MAX_PMTU          65535U          /* RFC 2675, Section 5.1 */
+#define IPV4_MIN_MTU           68                      /* RFC 791 */
 
 struct sock;
 
index 0105445cab83d32008b3526794c077f4bfbd9816..8e08b6da72f325bd4a623191e886fb1b746644d7 100644 (file)
@@ -694,9 +694,7 @@ struct tc_cls_matchall_offload {
 };
 
 enum tc_clsbpf_command {
-       TC_CLSBPF_ADD,
-       TC_CLSBPF_REPLACE,
-       TC_CLSBPF_DESTROY,
+       TC_CLSBPF_OFFLOAD,
        TC_CLSBPF_STATS,
 };
 
@@ -705,6 +703,7 @@ struct tc_cls_bpf_offload {
        enum tc_clsbpf_command command;
        struct tcf_exts *exts;
        struct bpf_prog *prog;
+       struct bpf_prog *oldprog;
        const char *name;
        bool exts_integrated;
        u32 gen_flags;
index 9a9347710701458a74953ef0407714865a13298a..9665582c4687e41bf5dd081894c0be89a40b89b6 100644 (file)
@@ -168,6 +168,17 @@ static inline void red_set_vars(struct red_vars *v)
        v->qcount       = -1;
 }
 
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+{
+       if (fls(qth_min) + Wlog > 32)
+               return false;
+       if (fls(qth_max) + Wlog > 32)
+               return false;
+       if (qth_max < qth_min)
+               return false;
+       return true;
+}
+
 static inline void red_set_parms(struct red_parms *p,
                                 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
                                 u8 Scell_log, u8 *stab, u32 max_P)
@@ -179,7 +190,7 @@ static inline void red_set_parms(struct red_parms *p,
        p->qth_max      = qth_max << Wlog;
        p->Wlog         = Wlog;
        p->Plog         = Plog;
-       if (delta < 0)
+       if (delta <= 0)
                delta = 1;
        p->qth_delta    = delta;
        if (!max_P) {
index 65d0d25f2648f645bad707c348cbb6454ef75cd5..83a3e47d5845b99fa61799a15b29e7247d478c72 100644 (file)
@@ -71,6 +71,7 @@ struct Qdisc {
                                      * qdisc_tree_decrease_qlen() should stop.
                                      */
 #define TCQ_F_INVISIBLE                0x80 /* invisible by default in dump */
+#define TCQ_F_OFFLOADED                0x200 /* qdisc is offloaded to HW */
        u32                     limit;
        const struct Qdisc_ops  *ops;
        struct qdisc_size_table __rcu *stab;
index 16f949eef52fdfd7c90fa15b44093334d1355aaf..2f8f93da5dc2660f4db37c04f8a434809b3120a1 100644 (file)
@@ -503,7 +503,8 @@ struct sctp_datamsg {
        /* Did the messenge fail to send? */
        int send_error;
        u8 send_failed:1,
-          can_delay;       /* should this message be Nagle delayed */
+          can_delay:1, /* should this message be Nagle delayed */
+          abandoned:1; /* should this message be abandoned */
 };
 
 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
index 79e1a2c7912c03d8281d449609d57cc909138a3b..9155da42269208b358df8535b14dfd3dba509365 100644 (file)
@@ -685,11 +685,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-           sk->sk_family == AF_INET6)
-               hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
-       else
-               hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+       hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
index 524cee4f4c817b3583385e9ae9f991503ab934fd..01dbfea3267277ad5efacef4b59b4a1c615de66b 100644 (file)
@@ -14,7 +14,6 @@ struct tcf_sample {
        struct psample_group __rcu *psample_group;
        u32 psample_group_num;
        struct list_head tcfm_list;
-       struct rcu_head rcu;
 };
 #define to_sample(a) ((struct tcf_sample *)a)
 
index 4e09398009c10a72478b43d3cffc24ba01612b91..6da880d2f022c0cfd787a62af2bb7d222348af32 100644 (file)
@@ -844,12 +844,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
 }
 #endif
 
-/* TCP_SKB_CB reference means this can not be used from early demux */
 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
        if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
-           skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+           skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
                return true;
 #endif
        return false;
@@ -1056,7 +1055,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
                            struct rate_sample *rs);
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-                 struct rate_sample *rs);
+                 bool is_sack_reneg, struct rate_sample *rs);
 void tcp_rate_check_app_limited(struct sock *sk);
 
 /* These functions determine how the current flow behaves in respect of SACK
index 0f9cbf96c093d86ae926ae5380f82eb454fecd02..6df6fe0c21980b4eb686bc20ff10f0c2bf28280b 100644 (file)
@@ -159,11 +159,11 @@ struct expander_device {
 
 struct sata_device {
        unsigned int class;
-       struct smp_resp        rps_resp; /* report_phy_sata_resp */
        u8     port_no;        /* port number, if this is a PM (Port) */
 
        struct ata_port *ap;
        struct ata_host ata_host;
+       struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
        u8     fis[ATA_RESP_FIS_SIZE];
 };
 
index f5db145e68ecae901ed071a70fe95db4045791b8..2c8d8115469dce4c2fc92bbf2ee91a1e0bccae5f 100644 (file)
@@ -182,6 +182,7 @@ enum tcm_sense_reason_table {
        TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE   = R(0x1a),
        TCM_TOO_MANY_SEGMENT_DESCS              = R(0x1b),
        TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE  = R(0x1c),
+       TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
 #undef R
 };
 
@@ -490,6 +491,7 @@ struct se_cmd {
 #define CMD_T_STOP             (1 << 5)
 #define CMD_T_TAS              (1 << 10)
 #define CMD_T_FABRIC_STOP      (1 << 11)
+#define CMD_T_PRE_EXECUTE      (1 << 12)
        spinlock_t              t_state_lock;
        struct kref             cmd_kref;
        struct completion       t_transport_stop_comp;
index e4b0b8e099325f2801e4f3af168004c603c23794..2c735a3e66133fc08740b4df6d64919c491c9d1e 100644 (file)
@@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq,
        { KVM_TRACE_MMIO_WRITE, "write" }
 
 TRACE_EVENT(kvm_mmio,
-       TP_PROTO(int type, int len, u64 gpa, u64 val),
+       TP_PROTO(int type, int len, u64 gpa, void *val),
        TP_ARGS(type, len, gpa, val),
 
        TP_STRUCT__entry(
@@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio,
                __entry->type           = type;
                __entry->len            = len;
                __entry->gpa            = gpa;
-               __entry->val            = val;
+               __entry->val            = 0;
+               if (val)
+                       memcpy(&__entry->val, val,
+                              min_t(u32, sizeof(__entry->val), len));
        ),
 
        TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
index f5024c560d8ff028b8952083625f66cde6d77f97..9c4eb33c5a1d35d9fcfa83b8386a7c4be3ef7c9e 100644 (file)
@@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
 
 #include <trace/define_trace.h>
 
-#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
 
+#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
 #define trace_irq_enable(...)
 #define trace_irq_disable(...)
-#define trace_preempt_enable(...)
-#define trace_preempt_disable(...)
 #define trace_irq_enable_rcuidle(...)
 #define trace_irq_disable_rcuidle(...)
+#endif
+
+#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
 #define trace_preempt_enable_rcuidle(...)
 #define trace_preempt_disable_rcuidle(...)
-
 #endif
index 306b31de519417790a50ab9dcb510faafd44cb30..bc01e06bc7167fb2557ccb94e21c58e55f9c8fdb 100644 (file)
@@ -116,9 +116,9 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
         * RUNNING (we will not have dequeued if state != RUNNING).
         */
        if (preempt)
-               return TASK_STATE_MAX;
+               return TASK_REPORT_MAX;
 
-       return task_state_index(p);
+       return 1 << task_state_index(p);
 }
 #endif /* CREATE_TRACE_POINTS */
 
@@ -164,7 +164,7 @@ TRACE_EVENT(sched_switch,
                                { 0x40, "P" }, { 0x80, "I" }) :
                  "R",
 
-               __entry->prev_state & TASK_STATE_MAX ? "+" : "",
+               __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
 
index 07cccca6cbf1762684152146372e35e1cd758338..ab34c561f26bec42a8ff32cb6f7e911447f03af2 100644 (file)
                tcp_state_name(TCP_CLOSING),            \
                tcp_state_name(TCP_NEW_SYN_RECV))
 
+#define TP_STORE_V4MAPPED(__entry, saddr, daddr)               \
+       do {                                                    \
+               struct in6_addr *pin6;                          \
+                                                               \
+               pin6 = (struct in6_addr *)__entry->saddr_v6;    \
+               ipv6_addr_set_v4mapped(saddr, pin6);            \
+               pin6 = (struct in6_addr *)__entry->daddr_v6;    \
+               ipv6_addr_set_v4mapped(daddr, pin6);            \
+       } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)          \
+       do {                                                            \
+               if (sk->sk_family == AF_INET6) {                        \
+                       struct in6_addr *pin6;                          \
+                                                                       \
+                       pin6 = (struct in6_addr *)__entry->saddr_v6;    \
+                       *pin6 = saddr6;                                 \
+                       pin6 = (struct in6_addr *)__entry->daddr_v6;    \
+                       *pin6 = daddr6;                                 \
+               } else {                                                \
+                       TP_STORE_V4MAPPED(__entry, saddr, daddr);       \
+               }                                                       \
+       } while (0)
+#else
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)  \
+       TP_STORE_V4MAPPED(__entry, saddr, daddr)
+#endif
+
 /*
  * tcp event with arguments sk and skb
  *
@@ -50,7 +79,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skbaddr = skb;
@@ -65,20 +93,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                             sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -127,7 +143,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -141,20 +156,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                              sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -197,7 +200,6 @@ TRACE_EVENT(tcp_set_state,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -213,20 +215,8 @@ TRACE_EVENT(tcp_set_state,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                              sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -256,7 +246,6 @@ TRACE_EVENT(tcp_retransmit_synack,
 
        TP_fast_assign(
                struct inet_request_sock *ireq = inet_rsk(req);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -271,20 +260,8 @@ TRACE_EVENT(tcp_retransmit_synack,
                p32 = (__be32 *) __entry->daddr;
                *p32 = ireq->ir_rmt_addr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = ireq->ir_v6_loc_addr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = ireq->ir_v6_rmt_addr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr,
+                             ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
index 4cd0f05d01134d1a1e2d5bd231407bfd7d92d250..8989a92c571a2d7036b74b233913b588e4e4248c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/netdevice.h>
 #include <linux/filter.h>
 #include <linux/tracepoint.h>
+#include <linux/bpf.h>
 
 #define __XDP_ACT_MAP(FN)      \
        FN(ABORTED)             \
diff --git a/include/uapi/asm-generic/bpf_perf_event.h b/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..53815d2
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+
+#include <linux/ptrace.h>
+
+/* Export kernel pt_regs structure */
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
index 90fc490f973f9eb1c8a17d8c4eced4f12452de9e..821f71a2e48fa67b8ef039b891fff1098c066de5 100644 (file)
@@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN,                    0,  8)
 
 #define PTR_CHECK_DEV                  ((1 << PTR_DEV_BITS) - 1)
 
-#define PTR(gen, offset, dev)                                          \
+#define MAKE_PTR(gen, offset, dev)                                     \
        ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
 
 /* Bkey utility code */
index 73445ef07ddada2a20f7309baf9c1e832103f637..940b04772af801345a07687c0f920a5dd9ce4cfb 100644 (file)
@@ -76,7 +76,7 @@ struct bfs_super_block {
 #define BFS_FILEBLOCKS(ip) \
         ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) -  le32_to_cpu((ip)->i_sblock))
 #define BFS_UNCLEAN(bfs_sb, sb)        \
-       ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY))
+       ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & SB_RDONLY))
 
 
 #endif /* _LINUX_BFS_FS_H */
index af549d4ecf1b6e76522c6a157db98d48e4190b30..8f95303f9d807d10d4fd6850d91a2486b0a490ec 100644 (file)
@@ -8,11 +8,10 @@
 #ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
-#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <asm/bpf_perf_event.h>
 
 struct bpf_perf_event_data {
-       struct pt_regs regs;
+       bpf_user_pt_regs_t regs;
        __u64 sample_period;
 };
 
index 731d0df722e3a0422edc8dee848393b4785a9a40..6e80501368aee68e77f800349c25d53adb56b7c6 100644 (file)
@@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args {
 };
 
 struct kfd_ioctl_set_scratch_backing_va_args {
-       uint64_t va_addr;       /* to KFD */
-       uint32_t gpu_id;        /* to KFD */
-       uint32_t pad;
+       __u64 va_addr;  /* to KFD */
+       __u32 gpu_id;   /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_get_tile_config_args {
        /* to KFD: pointer to tile array */
-       uint64_t tile_config_ptr;
+       __u64 tile_config_ptr;
        /* to KFD: pointer to macro tile array */
-       uint64_t macro_tile_config_ptr;
+       __u64 macro_tile_config_ptr;
        /* to KFD: array size allocated by user mode
         * from KFD: array size filled by kernel
         */
-       uint32_t num_tile_configs;
+       __u32 num_tile_configs;
        /* to KFD: array size allocated by user mode
         * from KFD: array size filled by kernel
         */
-       uint32_t num_macro_tile_configs;
+       __u32 num_macro_tile_configs;
 
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t gb_addr_config;        /* from KFD */
-       uint32_t num_banks;             /* from KFD */
-       uint32_t num_ranks;             /* from KFD */
+       __u32 gpu_id;           /* to KFD */
+       __u32 gb_addr_config;   /* from KFD */
+       __u32 num_banks;                /* from KFD */
+       __u32 num_ranks;                /* from KFD */
        /* struct size can be extended later if needed
         * without breaking ABI compatibility
         */
index 282d7613fce8788bc466913d7fcacc960dd1c6de..496e59a2738ba99308f438e1f0509e66e17086cb 100644 (file)
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
 
 struct kvm_s390_irq_state {
        __u64 buf;
-       __u32 flags;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 reserved[4];
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
 /* for KVM_SET_GUEST_DEBUG */
index af3cc2f4e1ad00dff0e011a4e94e0905d085d0ca..37b5096ae97be4e6115b0941b82918e11250ee6b 100644 (file)
@@ -256,7 +256,6 @@ struct tc_red_qopt {
 #define TC_RED_ECN             1
 #define TC_RED_HARDDROP                2
 #define TC_RED_ADAPTATIVE      4
-#define TC_RED_OFFLOADED       8
 };
 
 struct tc_red_xstats {
index d8b5f80c2ea66dd2d75600c1a556c2bc9cfa101f..843e29aa3cacf0e06beea9b59af5b5c7acc9ca3b 100644 (file)
@@ -557,6 +557,7 @@ enum {
        TCA_PAD,
        TCA_DUMP_INVISIBLE,
        TCA_CHAIN,
+       TCA_HW_OFFLOAD,
        __TCA_MAX
 };
 
index 41a0a81b01e6bc4c4c5a65f6c36f4be9b01c160e..c4c79aa331bd123e9d340ec7283ec97e89699584 100644 (file)
@@ -880,6 +880,8 @@ struct usb_wireless_cap_descriptor {        /* Ultra Wide Band */
        __u8  bReserved;
 } __attribute__((packed));
 
+#define USB_DT_USB_WIRELESS_CAP_SIZE   11
+
 /* USB 2.0 Extension descriptor */
 #define        USB_CAP_TYPE_EXT                2
 
@@ -1072,6 +1074,7 @@ struct usb_ptm_cap_descriptor {
        __u8  bDevCapabilityType;
 } __attribute__((packed));
 
+#define USB_DT_USB_PTM_ID_SIZE         3
 /*
  * The size of the descriptor for the Sublink Speed Attribute Count
  * (SSAC) specified in bmAttributes[4:0].
index dfec3809e7404f9658d51a20aae3869cd7aab9c3..e96e3a14533cda199963fe96b97dc78779c66037 100644 (file)
@@ -588,6 +588,12 @@ asmlinkage __visible void __init start_kernel(void)
                local_irq_disable();
        radix_tree_init();
 
+       /*
+        * Set up housekeeping before setting up workqueues to allow the unbound
+        * workqueue to take non-housekeeping into account.
+        */
+       housekeeping_init();
+
        /*
         * Allow workqueue creation and work item queueing/cancelling
         * early.  Work item execution depends on kthreads and starts after
@@ -605,7 +611,6 @@ asmlinkage __visible void __init start_kernel(void)
        early_irq_init();
        init_IRQ();
        tick_init();
-       housekeeping_init();
        rcu_init_nohz();
        init_timers();
        hrtimers_init();
index d240256263103f89972ab5e62201549a7074a829..9649ecd8a73a704fe07e3d678ac96723d33aec05 100644 (file)
@@ -331,7 +331,7 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
                         void *data)
 {
        struct ipc_namespace *ns;
-       if (flags & MS_KERNMOUNT) {
+       if (flags & SB_KERNMOUNT) {
                ns = data;
                data = NULL;
        } else {
index b9f8686a84cf1a5ee9d2b92d21579af11d8690aa..86b50aa26ee80adac9ba7ac52248c64cbea19b26 100644 (file)
@@ -1447,7 +1447,8 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
        rcu_read_lock();
        prog = rcu_dereference(progs)->progs;
        for (; *prog; prog++)
-               cnt++;
+               if (*prog != &dummy_bpf_prog.prog)
+                       cnt++;
        rcu_read_unlock();
        return cnt;
 }
index e469e05c8e83bc3256378644e3f3c26555651261..3905d4bc5b80d74f0b8f9e2e8f8526a0115ce239 100644 (file)
@@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab)
                pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
                                         htab->map.key_size);
                free_percpu(pptr);
+               cond_resched();
        }
 free_elems:
        bpf_map_area_free(htab->elems);
@@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab)
                        goto free_elems;
                htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
                                  pptr);
+               cond_resched();
        }
 
 skip_percpu_elems:
index 68ec884440b75da08824249db74bb992f6d938ce..8455b89d1bbf698f86c44bd1e1846b4c7d4ba60f 100644 (file)
@@ -1,3 +1,18 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/bug.h>
index d4593571c4049b8d046f53f81f8e17911a21e0c9..04b24876cd23c83c9502afc60853c871ee3fee13 100644 (file)
@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
                break;
        case PTR_TO_STACK:
                pointer_desc = "stack ";
+               /* The stack spill tracking logic in check_stack_write()
+                * and check_stack_read() relies on stack accesses being
+                * aligned.
+                */
+               strict = true;
                break;
        default:
                break;
@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
                                           strict);
 }
 
+/* truncate register to smaller size (in bytes)
+ * must be called with size < BPF_REG_SIZE
+ */
+static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
+{
+       u64 mask;
+
+       /* clear high bits in bit representation */
+       reg->var_off = tnum_cast(reg->var_off, size);
+
+       /* fix arithmetic bounds */
+       mask = ((u64)1 << (size * 8)) - 1;
+       if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
+               reg->umin_value &= mask;
+               reg->umax_value &= mask;
+       } else {
+               reg->umin_value = 0;
+               reg->umax_value = mask;
+       }
+       reg->smin_value = reg->umin_value;
+       reg->smax_value = reg->umax_value;
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
        if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
            regs[value_regno].type == SCALAR_VALUE) {
                /* b/h/w load zero-extends, mark upper bits as known 0 */
-               regs[value_regno].var_off =
-                       tnum_cast(regs[value_regno].var_off, size);
-               __update_reg_bounds(&regs[value_regno]);
+               coerce_reg_to_size(&regs[value_regno], size);
        }
        return err;
 }
@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
                verbose(env, "invalid variable stack read R%d var_off=%s\n",
                        regno, tn_buf);
+               return -EACCES;
        }
        off = regs[regno].off + regs[regno].var_off.value;
        if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -1674,7 +1701,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
                return -EINVAL;
        }
 
+       /* With LD_ABS/IND some JITs save/restore skb from r1. */
        changes_data = bpf_helper_changes_pkt_data(fn->func);
+       if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
+               verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
+                       func_id_name(func_id), func_id);
+               return -EINVAL;
+       }
 
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
@@ -1766,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        return 0;
 }
 
-static void coerce_reg_to_32(struct bpf_reg_state *reg)
-{
-       /* clear high 32 bits */
-       reg->var_off = tnum_cast(reg->var_off, 4);
-       /* Update bounds */
-       __update_reg_bounds(reg);
-}
-
 static bool signed_add_overflows(s64 a, s64 b)
 {
        /* Do the add in u64, where overflow is well-defined */
@@ -1794,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
        return res > a;
 }
 
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+                                 const struct bpf_reg_state *reg,
+                                 enum bpf_reg_type type)
+{
+       bool known = tnum_is_const(reg->var_off);
+       s64 val = reg->var_off.value;
+       s64 smin = reg->smin_value;
+
+       if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
+               verbose(env, "math between %s pointer and %lld is not allowed\n",
+                       reg_type_str[type], val);
+               return false;
+       }
+
+       if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "%s pointer offset %d is not allowed\n",
+                       reg_type_str[type], reg->off);
+               return false;
+       }
+
+       if (smin == S64_MIN) {
+               verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+                       reg_type_str[type]);
+               return false;
+       }
+
+       if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "value %lld makes %s pointer be out of bounds\n",
+                       smin, reg_type_str[type]);
+               return false;
+       }
+
+       return true;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -1830,29 +1890,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops on pointers produce (meaningless) scalars */
-               if (!env->allow_ptr_leaks)
-                       verbose(env,
-                               "R%d 32-bit pointer arithmetic prohibited\n",
-                               dst);
+               verbose(env,
+                       "R%d 32-bit pointer arithmetic prohibited\n",
+                       dst);
                return -EACCES;
        }
 
        if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == CONST_PTR_TO_MAP) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == PTR_TO_PACKET_END) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
+                       dst);
                return -EACCES;
        }
 
@@ -1862,6 +1918,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        dst_reg->type = ptr_reg->type;
        dst_reg->id = ptr_reg->id;
 
+       if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+           !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+               return -EINVAL;
+
        switch (opcode) {
        case BPF_ADD:
                /* We can take a fixed offset as long as it doesn't overflow
@@ -1915,9 +1975,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_SUB:
                if (dst_reg == off_reg) {
                        /* scalar -= pointer.  Creates an unknown scalar */
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d tried to subtract pointer from scalar\n",
-                                       dst);
+                       verbose(env, "R%d tried to subtract pointer from scalar\n",
+                               dst);
                        return -EACCES;
                }
                /* We don't allow subtraction from FP, because (according to
@@ -1925,9 +1984,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                 * be able to deal with it.
                 */
                if (ptr_reg->type == PTR_TO_STACK) {
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d subtraction from stack pointer prohibited\n",
-                                       dst);
+                       verbose(env, "R%d subtraction from stack pointer prohibited\n",
+                               dst);
                        return -EACCES;
                }
                if (known && (ptr_reg->off - smin_val ==
@@ -1976,28 +2034,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_AND:
        case BPF_OR:
        case BPF_XOR:
-               /* bitwise ops on pointers are troublesome, prohibit for now.
-                * (However, in principle we could allow some cases, e.g.
-                * ptr &= ~3 which would reduce min_value by 3.)
-                */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               /* bitwise ops on pointers are troublesome, prohibit. */
+               verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        default:
                /* other operators (e.g. MUL,LSH) produce non-pointer results */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        }
 
+       if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+               return -EINVAL;
+
        __update_reg_bounds(dst_reg);
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
 }
 
+/* WARNING: This function does calculations on 64-bit values, but the actual
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
+ * need extra checks in the 32-bit case.
+ */
 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                                      struct bpf_insn *insn,
                                      struct bpf_reg_state *dst_reg,
@@ -2008,12 +2068,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        bool src_known, dst_known;
        s64 smin_val, smax_val;
        u64 umin_val, umax_val;
+       u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
-       if (BPF_CLASS(insn->code) != BPF_ALU64) {
-               /* 32-bit ALU ops are (32,32)->64 */
-               coerce_reg_to_32(dst_reg);
-               coerce_reg_to_32(&src_reg);
-       }
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
        umin_val = src_reg.umin_value;
@@ -2021,6 +2077,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        src_known = tnum_is_const(src_reg.var_off);
        dst_known = tnum_is_const(dst_reg->var_off);
 
+       if (!src_known &&
+           opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+               __mark_reg_unknown(dst_reg);
+               return 0;
+       }
+
        switch (opcode) {
        case BPF_ADD:
                if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2149,9 +2211,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_LSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
@@ -2177,27 +2239,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_RSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
                }
-               /* BPF_RSH is an unsigned shift, so make the appropriate casts */
-               if (dst_reg->smin_value < 0) {
-                       if (umin_val) {
-                               /* Sign bit will be cleared */
-                               dst_reg->smin_value = 0;
-                       } else {
-                               /* Lost sign bit information */
-                               dst_reg->smin_value = S64_MIN;
-                               dst_reg->smax_value = S64_MAX;
-                       }
-               } else {
-                       dst_reg->smin_value =
-                               (u64)(dst_reg->smin_value) >> umax_val;
-               }
+               /* BPF_RSH is an unsigned shift.  If the value in dst_reg might
+                * be negative, then either:
+                * 1) src_reg might be zero, so the sign bit of the result is
+                *    unknown, so we lose our signed bounds
+                * 2) it's known negative, thus the unsigned bounds capture the
+                *    signed bounds
+                * 3) the signed bounds cross zero, so they tell us nothing
+                *    about the result
+                * If the value in dst_reg is known nonnegative, then again the
+                * unsigned bounts capture the signed bounds.
+                * Thus, in all cases it suffices to blow away our signed bounds
+                * and rely on inferring new ones from the unsigned bounds and
+                * var_off of the result.
+                */
+               dst_reg->smin_value = S64_MIN;
+               dst_reg->smax_value = S64_MAX;
                if (src_known)
                        dst_reg->var_off = tnum_rshift(dst_reg->var_off,
                                                       umin_val);
@@ -2213,6 +2277,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                break;
        }
 
+       if (BPF_CLASS(insn->code) != BPF_ALU64) {
+               /* 32-bit ALU ops are (32,32)->32 */
+               coerce_reg_to_size(dst_reg, 4);
+               coerce_reg_to_size(&src_reg, 4);
+       }
+
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
@@ -2227,7 +2297,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
        struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
        struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
        u8 opcode = BPF_OP(insn->code);
-       int rc;
 
        dst_reg = &regs[insn->dst_reg];
        src_reg = NULL;
@@ -2238,43 +2307,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                if (src_reg->type != SCALAR_VALUE) {
                        if (dst_reg->type != SCALAR_VALUE) {
                                /* Combining two pointers by any ALU op yields
-                                * an arbitrary scalar.
+                                * an arbitrary scalar. Disallow all math except
+                                * pointer subtraction
                                 */
-                               if (!env->allow_ptr_leaks) {
-                                       verbose(env, "R%d pointer %s pointer prohibited\n",
-                                               insn->dst_reg,
-                                               bpf_alu_string[opcode >> 4]);
-                                       return -EACCES;
+                               if (opcode == BPF_SUB){
+                                       mark_reg_unknown(env, regs, insn->dst_reg);
+                                       return 0;
                                }
-                               mark_reg_unknown(env, regs, insn->dst_reg);
-                               return 0;
+                               verbose(env, "R%d pointer %s pointer prohibited\n",
+                                       insn->dst_reg,
+                                       bpf_alu_string[opcode >> 4]);
+                               return -EACCES;
                        } else {
                                /* scalar += pointer
                                 * This is legal, but we have to reverse our
                                 * src/dest handling in computing the range
                                 */
-                               rc = adjust_ptr_min_max_vals(env, insn,
-                                                            src_reg, dst_reg);
-                               if (rc == -EACCES && env->allow_ptr_leaks) {
-                                       /* scalar += unknown scalar */
-                                       __mark_reg_unknown(&off_reg);
-                                       return adjust_scalar_min_max_vals(
-                                                       env, insn,
-                                                       dst_reg, off_reg);
-                               }
-                               return rc;
+                               return adjust_ptr_min_max_vals(env, insn,
+                                                              src_reg, dst_reg);
                        }
                } else if (ptr_reg) {
                        /* pointer += scalar */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    dst_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += scalar */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, *src_reg);
-                       }
-                       return rc;
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      dst_reg, src_reg);
                }
        } else {
                /* Pretend the src is a reg with a known value, since we only
@@ -2283,17 +2338,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                off_reg.type = SCALAR_VALUE;
                __mark_reg_known(&off_reg, insn->imm);
                src_reg = &off_reg;
-               if (ptr_reg) { /* pointer += K */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    ptr_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += K */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, off_reg);
-                       }
-                       return rc;
-               }
+               if (ptr_reg) /* pointer += K */
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      ptr_reg, src_reg);
        }
 
        /* Got here implies adding two SCALAR_VALUEs */
@@ -2390,17 +2437,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                        return -EACCES;
                                }
                                mark_reg_unknown(env, regs, insn->dst_reg);
-                               /* high 32 bits are known zero. */
-                               regs[insn->dst_reg].var_off = tnum_cast(
-                                               regs[insn->dst_reg].var_off, 4);
-                               __update_reg_bounds(&regs[insn->dst_reg]);
+                               coerce_reg_to_size(&regs[insn->dst_reg], 4);
                        }
                } else {
                        /* case: R = imm
                         * remember the value we stored into this reg
                         */
                        regs[insn->dst_reg].type = SCALAR_VALUE;
-                       __mark_reg_known(regs + insn->dst_reg, insn->imm);
+                       if (BPF_CLASS(insn->code) == BPF_ALU64) {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                insn->imm);
+                       } else {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                (u32)insn->imm);
+                       }
                }
 
        } else if (opcode > BPF_END) {
@@ -3431,15 +3481,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
                        return range_within(rold, rcur) &&
                               tnum_in(rold->var_off, rcur->var_off);
                } else {
-                       /* if we knew anything about the old value, we're not
-                        * equal, because we can't know anything about the
-                        * scalar value of the pointer in the new value.
+                       /* We're trying to use a pointer in place of a scalar.
+                        * Even if the scalar was unbounded, this could lead to
+                        * pointer leaks because scalars are allowed to leak
+                        * while pointers are not. We could make this safe in
+                        * special cases if root is calling us, but it's
+                        * probably not worth the hassle.
                         */
-                       return rold->umin_value == 0 &&
-                              rold->umax_value == U64_MAX &&
-                              rold->smin_value == S64_MIN &&
-                              rold->smax_value == S64_MAX &&
-                              tnum_is_unknown(rold->var_off);
+                       return false;
                }
        case PTR_TO_MAP_VALUE:
                /* If the new min/max/var_off satisfy the old ones and
index 5f780d8f6a9d787ed22cf30bd8be66a0ae78f069..9caeda6102491db1a32c34bc62ef75da2cd3f6df 100644 (file)
@@ -50,7 +50,7 @@ static int current_css_set_read(struct seq_file *seq, void *v)
 
        spin_lock_irq(&css_set_lock);
        rcu_read_lock();
-       cset = rcu_dereference(current->cgroups);
+       cset = task_css_set(current);
        refcnt = refcount_read(&cset->refcount);
        seq_printf(seq, "css_set %pK %d", cset, refcnt);
        if (refcnt > cset->nr_tasks)
@@ -96,7 +96,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
 
        spin_lock_irq(&css_set_lock);
        rcu_read_lock();
-       cset = rcu_dereference(current->cgroups);
+       cset = task_css_set(current);
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
                struct cgroup *c = link->cgrp;
 
index 133b465691d6fe82462c33215639b6b7f97f04b7..1e111dd455c49cd9f8e3e7619fb4f11eaffb3c71 100644 (file)
@@ -296,8 +296,12 @@ int cgroup_stat_init(struct cgroup *cgrp)
        }
 
        /* ->updated_children list is self terminated */
-       for_each_possible_cpu(cpu)
-               cgroup_cpu_stat(cgrp, cpu)->updated_children = cgrp;
+       for_each_possible_cpu(cpu) {
+               struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
+
+               cstat->updated_children = cgrp;
+               u64_stats_init(&cstat->sync);
+       }
 
        prev_cputime_init(&cgrp->stat.prev_cputime);
 
index 04892a82f6ac36c92324806b66a1c1855880c8f7..41376c3ac93b06c8163d6d764fc9a015bafdcdb4 100644 (file)
@@ -780,8 +780,8 @@ static int takedown_cpu(unsigned int cpu)
        BUG_ON(cpu_online(cpu));
 
        /*
-        * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
-        * runnable tasks from the cpu, there's only the idle task left now
+        * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
+        * all runnable tasks from the CPU, there's only the idle task left now
         * that the migration thread is done doing the stop_machine thing.
         *
         * Wait for the stop thread to go away.
@@ -1289,11 +1289,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .teardown.single        = NULL,
                .cant_stop              = true,
        },
-       [CPUHP_AP_SMPCFD_DYING] = {
-               .name                   = "smpcfd:dying",
-               .startup.single         = NULL,
-               .teardown.single        = smpcfd_dying_cpu,
-       },
        /*
         * Handled on controll processor until the plugged processor manages
         * this itself.
@@ -1335,6 +1330,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
                .startup.single         = NULL,
                .teardown.single        = rcutree_dying_cpu,
        },
+       [CPUHP_AP_SMPCFD_DYING] = {
+               .name                   = "smpcfd:dying",
+               .startup.single         = NULL,
+               .teardown.single        = smpcfd_dying_cpu,
+       },
        /* Entry state on starting. Interrupts enabled from here on. Transient
         * state for synchronsization */
        [CPUHP_AP_ONLINE] = {
index e74be38245adf732f34c55c0a676004f59870ba4..ed5d34925ad0617a40aeed3774b0e393aec03e99 100644 (file)
@@ -350,7 +350,7 @@ poll_again:
                        }
                        kdb_printf("\n");
                        for (i = 0; i < count; i++) {
-                               if (kallsyms_symbol_next(p_tmp, i) < 0)
+                               if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
                                        break;
                                kdb_printf("%s ", p_tmp);
                                *(p_tmp + len) = '\0';
index 9404c631bd3f9be1d0f501acc353d181c1fdbd40..4df5b695bf0db1c035a22b914d87ad190d1a0f42 100644 (file)
@@ -6639,6 +6639,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
        struct perf_namespaces_event *namespaces_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
+       u16 header_size = namespaces_event->event_id.header.size;
        int ret;
 
        if (!perf_event_namespaces_match(event))
@@ -6649,7 +6650,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
        ret = perf_output_begin(&handle, event,
                                namespaces_event->event_id.header.size);
        if (ret)
-               return;
+               goto out;
 
        namespaces_event->event_id.pid = perf_event_pid(event,
                                                        namespaces_event->task);
@@ -6661,6 +6662,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
        perf_event__output_id_sample(event, &handle, &sample);
 
        perf_output_end(&handle);
+out:
+       namespaces_event->event_id.header.size = header_size;
 }
 
 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
@@ -6676,6 +6679,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
                ns_inode = ns_path.dentry->d_inode;
                ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
                ns_link_info->ino = ns_inode->i_ino;
+               path_put(&ns_path);
        }
 }
 
@@ -7986,11 +7990,11 @@ static void bpf_overflow_handler(struct perf_event *event,
 {
        struct bpf_perf_event_data_kern ctx = {
                .data = data,
-               .regs = regs,
                .event = event,
        };
        int ret = 0;
 
+       ctx.regs = perf_arch_bpf_user_pt_regs(regs);
        preempt_disable();
        if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
                goto out;
index 6b4298a41167c7f3f7ea7be1d85af9a08d9d44cb..df0c91d5606c2fdd80ea7bb42a2deea3c83eec4d 100644 (file)
@@ -1755,3 +1755,11 @@ Efault:
        return -EFAULT;
 }
 #endif
+
+__weak void abort(void)
+{
+       BUG();
+
+       /* if that doesn't kill us, halt */
+       panic("Oops failed to kill thread");
+}
index 76ed5921117a24cc347fa3aa79f59c2ba378897f..57d0b3657e16b90268fa3396668bb62e6e54d287 100644 (file)
@@ -1582,8 +1582,8 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
 {
        unsigned int op =         (encoded_op & 0x70000000) >> 28;
        unsigned int cmp =        (encoded_op & 0x0f000000) >> 24;
-       int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 12);
-       int cmparg = sign_extend32(encoded_op & 0x00000fff, 12);
+       int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
+       int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
        int oldval, ret;
 
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
index e357bc800111043ed8d477d758bd245b8aa4c566..daae2f2dc6d4f64565112f0d7fea9c984887ce26 100644 (file)
@@ -86,11 +86,12 @@ static int gid_cmp(const void *_a, const void *_b)
        return gid_gt(a, b) - gid_lt(a, b);
 }
 
-static void groups_sort(struct group_info *group_info)
+void groups_sort(struct group_info *group_info)
 {
        sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid),
             gid_cmp, NULL);
 }
+EXPORT_SYMBOL(groups_sort);
 
 /* a simple bsearch */
 int groups_search(const struct group_info *group_info, kgid_t grp)
@@ -122,7 +123,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
 void set_groups(struct cred *new, struct group_info *group_info)
 {
        put_group_info(new->group_info);
-       groups_sort(group_info);
        get_group_info(group_info);
        new->group_info = group_info;
 }
@@ -206,6 +206,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
                return retval;
        }
 
+       groups_sort(group_info);
        retval = set_current_groups(group_info);
        put_group_info(group_info);
 
index 2ff1c0c82fc91a91e7a59edc1de72aab85d131c7..0f922729bab9b202d1d79054dee2ff56d8a523a2 100644 (file)
@@ -1246,7 +1246,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * set the trigger type must match. Also all must
                 * agree on ONESHOT.
                 */
-               unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
+               unsigned int oldtype;
+
+               /*
+                * If nobody did set the configuration before, inherit
+                * the one provided by the requester.
+                */
+               if (irqd_trigger_type_was_set(&desc->irq_data)) {
+                       oldtype = irqd_get_trigger_type(&desc->irq_data);
+               } else {
+                       oldtype = new->flags & IRQF_TRIGGER_MASK;
+                       irqd_set_trigger_type(&desc->irq_data, oldtype);
+               }
 
                if (!((old->flags & new->flags) & IRQF_SHARED) ||
                    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
index a3cbbc8191c52da53a1b522db248aa79528bb13f..0ba0dd8863a779f5b318ef07f03c2915ea57cd57 100644 (file)
@@ -384,7 +384,9 @@ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
 {
        struct cpumap *cm = this_cpu_ptr(m->maps);
 
-       return m->global_available - cpudown ? cm->available : 0;
+       if (!cpudown)
+               return m->global_available;
+       return m->global_available - cm->available;
 }
 
 /**
index 1215229d1c1281b2363092b86d9246336ee636e0..ef2a47e0eab6d3030af076cf62a0606ff4c6149f 100644 (file)
@@ -20,7 +20,7 @@
 static int irqfixup __read_mostly;
 
 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
-static void poll_spurious_irqs(unsigned long dummy);
+static void poll_spurious_irqs(struct timer_list *unused);
 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
 static int irq_poll_cpu;
 static atomic_t irq_poll_active;
@@ -143,7 +143,7 @@ out:
        return ok;
 }
 
-static void poll_spurious_irqs(unsigned long dummy)
+static void poll_spurious_irqs(struct timer_list *unused)
 {
        struct irq_desc *desc;
        int i;
index 8ff4ca4665ff830014db9b9698726ac0b732da58..8594d24e4adc2245e0f7cd0f6f48d9eef755d551 100644 (file)
@@ -769,7 +769,7 @@ static __init int jump_label_test(void)
 
        return 0;
 }
-late_initcall(jump_label_test);
+early_initcall(jump_label_test);
 #endif /* STATIC_KEYS_SELFTEST */
 
 #endif /* HAVE_JUMP_LABEL */
index 531ffa984bc262716b3412f5cfc2476c448d379f..d5fa4116688aff6f9d5c3c58787b4e848be4dc0a 100644 (file)
@@ -614,14 +614,14 @@ static void s_stop(struct seq_file *m, void *p)
 
 static int s_show(struct seq_file *m, void *p)
 {
-       unsigned long value;
+       void *value;
        struct kallsym_iter *iter = m->private;
 
        /* Some debugging symbols have no name.  Ignore them. */
        if (!iter->name[0])
                return 0;
 
-       value = iter->show_value ? iter->value : 0;
+       value = iter->show_value ? (void *)iter->value : NULL;
 
        if (iter->module_name[0]) {
                char type;
@@ -632,10 +632,10 @@ static int s_show(struct seq_file *m, void *p)
                 */
                type = iter->exported ? toupper(iter->type) :
                                        tolower(iter->type);
-               seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value,
+               seq_printf(m, "%px %c %s\t[%s]\n", value,
                           type, iter->name, iter->module_name);
        } else
-               seq_printf(m, KALLSYM_FMT " %c %s\n", value,
+               seq_printf(m, "%px %c %s\n", value,
                           iter->type, iter->name);
        return 0;
 }
index 15f33faf4013bdfea16baf8b0b31053456606620..7594c033d98a39f3f51632a8f6f77b4440079ec4 100644 (file)
@@ -157,7 +157,7 @@ void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
 
-void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2)
+void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
 {
        write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
 }
@@ -183,7 +183,7 @@ void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
 
-void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2)
+void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
 {
        write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
                        _RET_IP_);
index 8af313081b0d9a7f626f6b3b496119737e9e89a6..cd50e99202b011dfdb847dd2772f14e818d268bb 100644 (file)
@@ -843,7 +843,7 @@ void __kthread_queue_delayed_work(struct kthread_worker *worker,
        struct timer_list *timer = &dwork->timer;
        struct kthread_work *work = &dwork->work;
 
-       WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn);
+       WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
 
        /*
         * If @delay is 0, queue @dwork->work immediately.  This is for
index 9776da8db180d63c94f0a698bac844e91c24c0ce..5fa1324a4f29a57901bdcf0ca81874a3dfd9a66c 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/lock.h>
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-#include <linux/slab.h>
-#endif
-
 #ifdef CONFIG_PROVE_LOCKING
 int prove_locking = 1;
 module_param(prove_locking, int, 0644);
@@ -75,19 +71,6 @@ module_param(lock_stat, int, 0644);
 #define lock_stat 0
 #endif
 
-#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
-static int crossrelease_fullstack = 1;
-#else
-static int crossrelease_fullstack;
-#endif
-static int __init allow_crossrelease_fullstack(char *str)
-{
-       crossrelease_fullstack = 1;
-       return 0;
-}
-
-early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
-
 /*
  * lockdep_lock: protects the lockdep graph, the hashes and the
  *               class/list/hash allocators.
@@ -740,18 +723,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
 }
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-static void cross_init(struct lockdep_map *lock, int cross);
-static int cross_lock(struct lockdep_map *lock);
-static int lock_acquire_crosslock(struct held_lock *hlock);
-static int lock_release_crosslock(struct lockdep_map *lock);
-#else
-static inline void cross_init(struct lockdep_map *lock, int cross) {}
-static inline int cross_lock(struct lockdep_map *lock) { return 0; }
-static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
-static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
-#endif
-
 /*
  * Register a lock's class in the hash-table, if the class is not present
  * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1151,41 +1122,22 @@ print_circular_lock_scenario(struct held_lock *src,
                printk(KERN_CONT "\n\n");
        }
 
-       if (cross_lock(tgt->instance)) {
-               printk(" Possible unsafe locking scenario by crosslock:\n\n");
-               printk("       CPU0                    CPU1\n");
-               printk("       ----                    ----\n");
-               printk("  lock(");
-               __print_lock_name(parent);
-               printk(KERN_CONT ");\n");
-               printk("  lock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("                               lock(");
-               __print_lock_name(source);
-               printk(KERN_CONT ");\n");
-               printk("                               unlock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("\n *** DEADLOCK ***\n\n");
-       } else {
-               printk(" Possible unsafe locking scenario:\n\n");
-               printk("       CPU0                    CPU1\n");
-               printk("       ----                    ----\n");
-               printk("  lock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("                               lock(");
-               __print_lock_name(parent);
-               printk(KERN_CONT ");\n");
-               printk("                               lock(");
-               __print_lock_name(target);
-               printk(KERN_CONT ");\n");
-               printk("  lock(");
-               __print_lock_name(source);
-               printk(KERN_CONT ");\n");
-               printk("\n *** DEADLOCK ***\n\n");
-       }
+       printk(" Possible unsafe locking scenario:\n\n");
+       printk("       CPU0                    CPU1\n");
+       printk("       ----                    ----\n");
+       printk("  lock(");
+       __print_lock_name(target);
+       printk(KERN_CONT ");\n");
+       printk("                               lock(");
+       __print_lock_name(parent);
+       printk(KERN_CONT ");\n");
+       printk("                               lock(");
+       __print_lock_name(target);
+       printk(KERN_CONT ");\n");
+       printk("  lock(");
+       __print_lock_name(source);
+       printk(KERN_CONT ");\n");
+       printk("\n *** DEADLOCK ***\n\n");
 }
 
 /*
@@ -1211,10 +1163,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
                curr->comm, task_pid_nr(curr));
        print_lock(check_src);
 
-       if (cross_lock(check_tgt->instance))
-               pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
-       else
-               pr_warn("\nbut task is already holding lock:\n");
+       pr_warn("\nbut task is already holding lock:\n");
 
        print_lock(check_tgt);
        pr_warn("\nwhich lock already depends on the new lock.\n\n");
@@ -1244,9 +1193,7 @@ static noinline int print_circular_bug(struct lock_list *this,
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
 
-       if (cross_lock(check_tgt->instance))
-               this->trace = *trace;
-       else if (!save_trace(&this->trace))
+       if (!save_trace(&this->trace))
                return 0;
 
        depth = get_lock_depth(target);
@@ -1850,9 +1797,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
                if (nest)
                        return 2;
 
-               if (cross_lock(prev->instance))
-                       continue;
-
                return print_deadlock_bug(curr, prev, next);
        }
        return 1;
@@ -2018,31 +1962,26 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
        for (;;) {
                int distance = curr->lockdep_depth - depth + 1;
                hlock = curr->held_locks + depth - 1;
+
                /*
-                * Only non-crosslock entries get new dependencies added.
-                * Crosslock entries will be added by commit later:
+                * Only non-recursive-read entries get new dependencies
+                * added:
                 */
-               if (!cross_lock(hlock->instance)) {
+               if (hlock->read != 2 && hlock->check) {
+                       int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
+                       if (!ret)
+                               return 0;
+
                        /*
-                        * Only non-recursive-read entries get new dependencies
-                        * added:
+                        * Stop after the first non-trylock entry,
+                        * as non-trylock entries have added their
+                        * own direct dependencies already, so this
+                        * lock is connected to them indirectly:
                         */
-                       if (hlock->read != 2 && hlock->check) {
-                               int ret = check_prev_add(curr, hlock, next,
-                                                        distance, &trace, save_trace);
-                               if (!ret)
-                                       return 0;
-
-                               /*
-                                * Stop after the first non-trylock entry,
-                                * as non-trylock entries have added their
-                                * own direct dependencies already, so this
-                                * lock is connected to them indirectly:
-                                */
-                               if (!hlock->trylock)
-                                       break;
-                       }
+                       if (!hlock->trylock)
+                               break;
                }
+
                depth--;
                /*
                 * End of lock-stack?
@@ -3292,21 +3231,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       cross_init(lock, 0);
        __lockdep_init_map(lock, name, key, subclass);
 }
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
-                     struct lock_class_key *key, int subclass)
-{
-       cross_init(lock, 1);
-       __lockdep_init_map(lock, name, key, subclass);
-}
-EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
-#endif
-
 struct lock_class_key __lockdep_no_validate__;
 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
 
@@ -3362,7 +3290,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        int chain_head = 0;
        int class_idx;
        u64 chain_key;
-       int ret;
 
        if (unlikely(!debug_locks))
                return 0;
@@ -3411,8 +3338,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
        class_idx = class - lock_classes + 1;
 
-       /* TODO: nest_lock is not implemented for crosslock yet. */
-       if (depth && !cross_lock(lock)) {
+       if (depth) {
                hlock = curr->held_locks + depth - 1;
                if (hlock->class_idx == class_idx && nest_lock) {
                        if (hlock->references) {
@@ -3500,14 +3426,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
                return 0;
 
-       ret = lock_acquire_crosslock(hlock);
-       /*
-        * 2 means normal acquire operations are needed. Otherwise, it's
-        * ok just to return with '0:fail, 1:success'.
-        */
-       if (ret != 2)
-               return ret;
-
        curr->curr_chain_key = chain_key;
        curr->lockdep_depth++;
        check_chain_key(curr);
@@ -3745,19 +3663,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
        struct task_struct *curr = current;
        struct held_lock *hlock;
        unsigned int depth;
-       int ret, i;
+       int i;
 
        if (unlikely(!debug_locks))
                return 0;
 
-       ret = lock_release_crosslock(lock);
-       /*
-        * 2 means normal release operations are needed. Otherwise, it's
-        * ok just to return with '0:fail, 1:success'.
-        */
-       if (ret != 2)
-               return ret;
-
        depth = curr->lockdep_depth;
        /*
         * So we're all set to release this lock.. wait what lock? We don't
@@ -4675,494 +4585,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
        dump_stack();
 }
 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
-
-#ifdef CONFIG_LOCKDEP_CROSSRELEASE
-
-/*
- * Crossrelease works by recording a lock history for each thread and
- * connecting those historic locks that were taken after the
- * wait_for_completion() in the complete() context.
- *
- * Task-A                              Task-B
- *
- *                                     mutex_lock(&A);
- *                                     mutex_unlock(&A);
- *
- * wait_for_completion(&C);
- *   lock_acquire_crosslock();
- *     atomic_inc_return(&cross_gen_id);
- *                                |
- *                               |     mutex_lock(&B);
- *                               |     mutex_unlock(&B);
- *                                |
- *                               |     complete(&C);
- *                               `--     lock_commit_crosslock();
- *
- * Which will then add a dependency between B and C.
- */
-
-#define xhlock(i)         (current->xhlocks[(i) % MAX_XHLOCKS_NR])
-
-/*
- * Whenever a crosslock is held, cross_gen_id will be increased.
- */
-static atomic_t cross_gen_id; /* Can be wrapped */
-
-/*
- * Make an entry of the ring buffer invalid.
- */
-static inline void invalidate_xhlock(struct hist_lock *xhlock)
-{
-       /*
-        * Normally, xhlock->hlock.instance must be !NULL.
-        */
-       xhlock->hlock.instance = NULL;
-}
-
-/*
- * Lock history stacks; we have 2 nested lock history stacks:
- *
- *   HARD(IRQ)
- *   SOFT(IRQ)
- *
- * The thing is that once we complete a HARD/SOFT IRQ the future task locks
- * should not depend on any of the locks observed while running the IRQ.  So
- * what we do is rewind the history buffer and erase all our knowledge of that
- * temporal event.
- */
-
-void crossrelease_hist_start(enum xhlock_context_t c)
-{
-       struct task_struct *cur = current;
-
-       if (!cur->xhlocks)
-               return;
-
-       cur->xhlock_idx_hist[c] = cur->xhlock_idx;
-       cur->hist_id_save[c]    = cur->hist_id;
-}
-
-void crossrelease_hist_end(enum xhlock_context_t c)
-{
-       struct task_struct *cur = current;
-
-       if (cur->xhlocks) {
-               unsigned int idx = cur->xhlock_idx_hist[c];
-               struct hist_lock *h = &xhlock(idx);
-
-               cur->xhlock_idx = idx;
-
-               /* Check if the ring was overwritten. */
-               if (h->hist_id != cur->hist_id_save[c])
-                       invalidate_xhlock(h);
-       }
-}
-
-/*
- * lockdep_invariant_state() is used to annotate independence inside a task, to
- * make one task look like multiple independent 'tasks'.
- *
- * Take for instance workqueues; each work is independent of the last. The
- * completion of a future work does not depend on the completion of a past work
- * (in general). Therefore we must not carry that (lock) dependency across
- * works.
- *
- * This is true for many things; pretty much all kthreads fall into this
- * pattern, where they have an invariant state and future completions do not
- * depend on past completions. Its just that since they all have the 'same'
- * form -- the kthread does the same over and over -- it doesn't typically
- * matter.
- *
- * The same is true for system-calls, once a system call is completed (we've
- * returned to userspace) the next system call does not depend on the lock
- * history of the previous system call.
- *
- * They key property for independence, this invariant state, is that it must be
- * a point where we hold no locks and have no history. Because if we were to
- * hold locks, the restore at _end() would not necessarily recover it's history
- * entry. Similarly, independence per-definition means it does not depend on
- * prior state.
- */
-void lockdep_invariant_state(bool force)
-{
-       /*
-        * We call this at an invariant point, no current state, no history.
-        * Verify the former, enforce the latter.
-        */
-       WARN_ON_ONCE(!force && current->lockdep_depth);
-       invalidate_xhlock(&xhlock(current->xhlock_idx));
-}
-
-static int cross_lock(struct lockdep_map *lock)
-{
-       return lock ? lock->cross : 0;
-}
-
-/*
- * This is needed to decide the relationship between wrapable variables.
- */
-static inline int before(unsigned int a, unsigned int b)
-{
-       return (int)(a - b) < 0;
-}
-
-static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
-{
-       return hlock_class(&xhlock->hlock);
-}
-
-static inline struct lock_class *xlock_class(struct cross_lock *xlock)
-{
-       return hlock_class(&xlock->hlock);
-}
-
-/*
- * Should we check a dependency with previous one?
- */
-static inline int depend_before(struct held_lock *hlock)
-{
-       return hlock->read != 2 && hlock->check && !hlock->trylock;
-}
-
-/*
- * Should we check a dependency with next one?
- */
-static inline int depend_after(struct held_lock *hlock)
-{
-       return hlock->read != 2 && hlock->check;
-}
-
-/*
- * Check if the xhlock is valid, which would be false if,
- *
- *    1. Has not used after initializaion yet.
- *    2. Got invalidated.
- *
- * Remind hist_lock is implemented as a ring buffer.
- */
-static inline int xhlock_valid(struct hist_lock *xhlock)
-{
-       /*
-        * xhlock->hlock.instance must be !NULL.
-        */
-       return !!xhlock->hlock.instance;
-}
-
-/*
- * Record a hist_lock entry.
- *
- * Irq disable is only required.
- */
-static void add_xhlock(struct held_lock *hlock)
-{
-       unsigned int idx = ++current->xhlock_idx;
-       struct hist_lock *xhlock = &xhlock(idx);
-
-#ifdef CONFIG_DEBUG_LOCKDEP
-       /*
-        * This can be done locklessly because they are all task-local
-        * state, we must however ensure IRQs are disabled.
-        */
-       WARN_ON_ONCE(!irqs_disabled());
-#endif
-
-       /* Initialize hist_lock's members */
-       xhlock->hlock = *hlock;
-       xhlock->hist_id = ++current->hist_id;
-
-       xhlock->trace.nr_entries = 0;
-       xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
-       xhlock->trace.entries = xhlock->trace_entries;
-
-       if (crossrelease_fullstack) {
-               xhlock->trace.skip = 3;
-               save_stack_trace(&xhlock->trace);
-       } else {
-               xhlock->trace.nr_entries = 1;
-               xhlock->trace.entries[0] = hlock->acquire_ip;
-       }
-}
-
-static inline int same_context_xhlock(struct hist_lock *xhlock)
-{
-       return xhlock->hlock.irq_context == task_irq_context(current);
-}
-
-/*
- * This should be lockless as far as possible because this would be
- * called very frequently.
- */
-static void check_add_xhlock(struct held_lock *hlock)
-{
-       /*
-        * Record a hist_lock, only in case that acquisitions ahead
-        * could depend on the held_lock. For example, if the held_lock
-        * is trylock then acquisitions ahead never depends on that.
-        * In that case, we don't need to record it. Just return.
-        */
-       if (!current->xhlocks || !depend_before(hlock))
-               return;
-
-       add_xhlock(hlock);
-}
-
-/*
- * For crosslock.
- */
-static int add_xlock(struct held_lock *hlock)
-{
-       struct cross_lock *xlock;
-       unsigned int gen_id;
-
-       if (!graph_lock())
-               return 0;
-
-       xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
-
-       /*
-        * When acquisitions for a crosslock are overlapped, we use
-        * nr_acquire to perform commit for them, based on cross_gen_id
-        * of the first acquisition, which allows to add additional
-        * dependencies.
-        *
-        * Moreover, when no acquisition of a crosslock is in progress,
-        * we should not perform commit because the lock might not exist
-        * any more, which might cause incorrect memory access. So we
-        * have to track the number of acquisitions of a crosslock.
-        *
-        * depend_after() is necessary to initialize only the first
-        * valid xlock so that the xlock can be used on its commit.
-        */
-       if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
-               goto unlock;
-
-       gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
-       xlock->hlock = *hlock;
-       xlock->hlock.gen_id = gen_id;
-unlock:
-       graph_unlock();
-       return 1;
-}
-
-/*
- * Called for both normal and crosslock acquires. Normal locks will be
- * pushed on the hist_lock queue. Cross locks will record state and
- * stop regular lock_acquire() to avoid being placed on the held_lock
- * stack.
- *
- * Return: 0 - failure;
- *         1 - crosslock, done;
- *         2 - normal lock, continue to held_lock[] ops.
- */
-static int lock_acquire_crosslock(struct held_lock *hlock)
-{
-       /*
-        *      CONTEXT 1               CONTEXT 2
-        *      ---------               ---------
-        *      lock A (cross)
-        *      X = atomic_inc_return(&cross_gen_id)
-        *      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-        *                              Y = atomic_read_acquire(&cross_gen_id)
-        *                              lock B
-        *
-        * atomic_read_acquire() is for ordering between A and B,
-        * IOW, A happens before B, when CONTEXT 2 see Y >= X.
-        *
-        * Pairs with atomic_inc_return() in add_xlock().
-        */
-       hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
-
-       if (cross_lock(hlock->instance))
-               return add_xlock(hlock);
-
-       check_add_xhlock(hlock);
-       return 2;
-}
-
-static int copy_trace(struct stack_trace *trace)
-{
-       unsigned long *buf = stack_trace + nr_stack_trace_entries;
-       unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
-       unsigned int nr = min(max_nr, trace->nr_entries);
-
-       trace->nr_entries = nr;
-       memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
-       trace->entries = buf;
-       nr_stack_trace_entries += nr;
-
-       if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
-               if (!debug_locks_off_graph_unlock())
-                       return 0;
-
-               print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
-               dump_stack();
-
-               return 0;
-       }
-
-       return 1;
-}
-
-static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
-{
-       unsigned int xid, pid;
-       u64 chain_key;
-
-       xid = xlock_class(xlock) - lock_classes;
-       chain_key = iterate_chain_key((u64)0, xid);
-       pid = xhlock_class(xhlock) - lock_classes;
-       chain_key = iterate_chain_key(chain_key, pid);
-
-       if (lookup_chain_cache(chain_key))
-               return 1;
-
-       if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
-                               chain_key))
-               return 0;
-
-       if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
-                           &xhlock->trace, copy_trace))
-               return 0;
-
-       return 1;
-}
-
-static void commit_xhlocks(struct cross_lock *xlock)
-{
-       unsigned int cur = current->xhlock_idx;
-       unsigned int prev_hist_id = xhlock(cur).hist_id;
-       unsigned int i;
-
-       if (!graph_lock())
-               return;
-
-       if (xlock->nr_acquire) {
-               for (i = 0; i < MAX_XHLOCKS_NR; i++) {
-                       struct hist_lock *xhlock = &xhlock(cur - i);
-
-                       if (!xhlock_valid(xhlock))
-                               break;
-
-                       if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
-                               break;
-
-                       if (!same_context_xhlock(xhlock))
-                               break;
-
-                       /*
-                        * Filter out the cases where the ring buffer was
-                        * overwritten and the current entry has a bigger
-                        * hist_id than the previous one, which is impossible
-                        * otherwise:
-                        */
-                       if (unlikely(before(prev_hist_id, xhlock->hist_id)))
-                               break;
-
-                       prev_hist_id = xhlock->hist_id;
-
-                       /*
-                        * commit_xhlock() returns 0 with graph_lock already
-                        * released if fail.
-                        */
-                       if (!commit_xhlock(xlock, xhlock))
-                               return;
-               }
-       }
-
-       graph_unlock();
-}
-
-void lock_commit_crosslock(struct lockdep_map *lock)
-{
-       struct cross_lock *xlock;
-       unsigned long flags;
-
-       if (unlikely(!debug_locks || current->lockdep_recursion))
-               return;
-
-       if (!current->xhlocks)
-               return;
-
-       /*
-        * Do commit hist_locks with the cross_lock, only in case that
-        * the cross_lock could depend on acquisitions after that.
-        *
-        * For example, if the cross_lock does not have the 'check' flag
-        * then we don't need to check dependencies and commit for that.
-        * Just skip it. In that case, of course, the cross_lock does
-        * not depend on acquisitions ahead, either.
-        *
-        * WARNING: Don't do that in add_xlock() in advance. When an
-        * acquisition context is different from the commit context,
-        * invalid(skipped) cross_lock might be accessed.
-        */
-       if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
-               return;
-
-       raw_local_irq_save(flags);
-       check_flags(flags);
-       current->lockdep_recursion = 1;
-       xlock = &((struct lockdep_map_cross *)lock)->xlock;
-       commit_xhlocks(xlock);
-       current->lockdep_recursion = 0;
-       raw_local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(lock_commit_crosslock);
-
-/*
- * Return: 0 - failure;
- *         1 - crosslock, done;
- *         2 - normal lock, continue to held_lock[] ops.
- */
-static int lock_release_crosslock(struct lockdep_map *lock)
-{
-       if (cross_lock(lock)) {
-               if (!graph_lock())
-                       return 0;
-               ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
-               graph_unlock();
-               return 1;
-       }
-       return 2;
-}
-
-static void cross_init(struct lockdep_map *lock, int cross)
-{
-       if (cross)
-               ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
-
-       lock->cross = cross;
-
-       /*
-        * Crossrelease assumes that the ring buffer size of xhlocks
-        * is aligned with power of 2. So force it on build.
-        */
-       BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
-}
-
-void lockdep_init_task(struct task_struct *task)
-{
-       int i;
-
-       task->xhlock_idx = UINT_MAX;
-       task->hist_id = 0;
-
-       for (i = 0; i < XHLOCK_CTX_NR; i++) {
-               task->xhlock_idx_hist[i] = UINT_MAX;
-               task->hist_id_save[i] = 0;
-       }
-
-       task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
-                               GFP_KERNEL);
-}
-
-void lockdep_free_task(struct task_struct *task)
-{
-       if (task->xhlocks) {
-               void *tmp = task->xhlocks;
-               /* Diable crossrelease for current */
-               task->xhlocks = NULL;
-               kfree(tmp);
-       }
-}
-#endif
index 1fd1a7543cdddf39197acaa3882a0f9de7ddb3ab..936f3d14dd6bfeda3ef7921266fef5dc5b36e2a8 100644 (file)
@@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock)                        \
                        break;                                          \
                preempt_enable();                                       \
                                                                        \
-               if (!(lock)->break_lock)                                \
-                       (lock)->break_lock = 1;                         \
-               while ((lock)->break_lock)                              \
-                       arch_##op##_relax(&lock->raw_lock);             \
+               arch_##op##_relax(&lock->raw_lock);                     \
        }                                                               \
-       (lock)->break_lock = 0;                                         \
 }                                                                      \
                                                                        \
 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
@@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)       \
                local_irq_restore(flags);                               \
                preempt_enable();                                       \
                                                                        \
-               if (!(lock)->break_lock)                                \
-                       (lock)->break_lock = 1;                         \
-               while ((lock)->break_lock)                              \
-                       arch_##op##_relax(&lock->raw_lock);             \
+               arch_##op##_relax(&lock->raw_lock);                     \
        }                                                               \
-       (lock)->break_lock = 0;                                         \
+                                                                       \
        return flags;                                                   \
 }                                                                      \
                                                                        \
index f0411a27176552a2a172c534a84c37e10c76705d..dea01ac9cb74c4ef619c51b5eba4e869d9e4fdc8 100644 (file)
@@ -4157,7 +4157,7 @@ static int m_show(struct seq_file *m, void *p)
 {
        struct module *mod = list_entry(p, struct module, list);
        char buf[MODULE_FLAGS_BUF_SIZE];
-       unsigned long value;
+       void *value;
 
        /* We always ignore unformed modules. */
        if (mod->state == MODULE_STATE_UNFORMED)
@@ -4173,8 +4173,8 @@ static int m_show(struct seq_file *m, void *p)
                   mod->state == MODULE_STATE_COMING ? "Loading" :
                   "Live");
        /* Used by oprofile and other similar tools. */
-       value = m->private ? 0 : (unsigned long)mod->core_layout.base;
-       seq_printf(m, " 0x" KALLSYM_FMT, value);
+       value = m->private ? NULL : mod->core_layout.base;
+       seq_printf(m, " 0x%px", value);
 
        /* Taints info */
        if (mod->taints)
index f262c9a4e70ab76d5ee5e748542845cb99e8eaff..57c0074d50cc485b706579aaa616e4a004775a8e 100644 (file)
@@ -288,9 +288,9 @@ static void invoke_padata_reorder(struct work_struct *work)
        local_bh_enable();
 }
 
-static void padata_reorder_timer(unsigned long arg)
+static void padata_reorder_timer(struct timer_list *t)
 {
-       struct parallel_data *pd = (struct parallel_data *)arg;
+       struct parallel_data *pd = from_timer(pd, t, timer);
        unsigned int weight;
        int target_cpu, cpu;
 
@@ -485,7 +485,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
 
        padata_init_pqueues(pd);
        padata_init_squeues(pd);
-       setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+       timer_setup(&pd->timer, padata_reorder_timer, 0);
        atomic_set(&pd->seq_nr, -1);
        atomic_set(&pd->reorder_objects, 0);
        atomic_set(&pd->refcnt, 0);
index 5d81206a572d721e7d96b129f160a4e16d2e2f2e..b9006617710f591bc659e8f522c6d73baa2a04d5 100644 (file)
@@ -3141,9 +3141,6 @@ void dump_stack_print_info(const char *log_lvl)
 void show_regs_print_info(const char *log_lvl)
 {
        dump_stack_print_info(log_lvl);
-
-       printk("%stask: %p task.stack: %p\n",
-              log_lvl, current, task_stack_page(current));
 }
 
 #endif
index 75554f366fd3aa20339a00a03d7897cf42bc7d97..644fa2e3d993b5ef1bd1c57e4286168daadf14ff 100644 (file)
@@ -5097,17 +5097,6 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
        return ret;
 }
 
-/**
- * sys_sched_rr_get_interval - return the default timeslice of a process.
- * @pid: pid of the process.
- * @interval: userspace pointer to the timeslice value.
- *
- * this syscall writes the default timeslice value of a given process
- * into the user-space timespec buffer. A value of '0' means infinity.
- *
- * Return: On success, 0 and the timeslice is in @interval. Otherwise,
- * an error code.
- */
 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
 {
        struct task_struct *p;
@@ -5144,6 +5133,17 @@ out_unlock:
        return retval;
 }
 
+/**
+ * sys_sched_rr_get_interval - return the default timeslice of a process.
+ * @pid: pid of the process.
+ * @interval: userspace pointer to the timeslice value.
+ *
+ * this syscall writes the default timeslice value of a given process
+ * into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
+ */
 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                struct timespec __user *, interval)
 {
index 4037e19bbca25939f0dd57b05f8fb25de8a90908..2fe3aa853e4dbacef363b70246390f94cc67932c 100644 (file)
@@ -3413,9 +3413,9 @@ void set_task_rq_fair(struct sched_entity *se,
  * _IFF_ we look at the pure running and runnable sums. Because they
  * represent the very same entity, just at different points in the hierarchy.
  *
- *
- * Per the above update_tg_cfs_util() is trivial (and still 'wrong') and
- * simply copies the running sum over.
+ * Per the above update_tg_cfs_util() is trivial and simply copies the running
+ * sum over (but still wrong, because the group entity and group rq do not have
+ * their PELT windows aligned).
  *
  * However, update_tg_cfs_runnable() is more complex. So we have:
  *
@@ -3424,11 +3424,11 @@ void set_task_rq_fair(struct sched_entity *se,
  * And since, like util, the runnable part should be directly transferable,
  * the following would _appear_ to be the straight forward approach:
  *
- *   grq->avg.load_avg = grq->load.weight * grq->avg.running_avg       (3)
+ *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg      (3)
  *
  * And per (1) we have:
  *
- *   ge->avg.running_avg == grq->avg.running_avg
+ *   ge->avg.runnable_avg == grq->avg.runnable_avg
  *
  * Which gives:
  *
@@ -3447,27 +3447,28 @@ void set_task_rq_fair(struct sched_entity *se,
  * to (shortly) return to us. This only works by keeping the weights as
  * integral part of the sum. We therefore cannot decompose as per (3).
  *
- * OK, so what then?
+ * Another reason this doesn't work is that runnable isn't a 0-sum entity.
+ * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
+ * rq itself is runnable anywhere between 2/3 and 1 depending on how the
+ * runnable section of these tasks overlap (or not). If they were to perfectly
+ * align the rq as a whole would be runnable 2/3 of the time. If however we
+ * always have at least 1 runnable task, the rq as a whole is always runnable.
  *
+ * So we'll have to approximate.. :/
  *
- * Another way to look at things is:
+ * Given the constraint:
  *
- *   grq->avg.load_avg = \Sum se->avg.load_avg
+ *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
  *
- * Therefore, per (2):
+ * We can construct a rule that adds runnable to a rq by assuming minimal
+ * overlap.
  *
- *   grq->avg.load_avg = \Sum se->load.weight * se->avg.runnable_avg
+ * On removal, we'll assume each task is equally runnable; which yields:
  *
- * And the very thing we're propagating is a change in that sum (someone
- * joined/left). So we can easily know the runnable change, which would be, per
- * (2) the already tracked se->load_avg divided by the corresponding
- * se->weight.
+ *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
  *
- * Basically (4) but in differential form:
+ * XXX: only do this for the part of runnable > running ?
  *
- *   d(runnable_avg) += se->avg.load_avg / se->load.weight
- *                                                                (5)
- *   ge->avg.load_avg += ge->load.weight * d(runnable_avg)
  */
 
 static inline void
@@ -3479,6 +3480,14 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        if (!delta)
                return;
 
+       /*
+        * The relation between sum and avg is:
+        *
+        *   LOAD_AVG_MAX - 1024 + sa->period_contrib
+        *
+        * however, the PELT windows are not aligned between grq and gse.
+        */
+
        /* Set new sched_entity's utilization */
        se->avg.util_avg = gcfs_rq->avg.util_avg;
        se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
@@ -3491,33 +3500,68 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
 static inline void
 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long runnable_sum = gcfs_rq->prop_runnable_sum;
-       long runnable_load_avg, load_avg;
-       s64 runnable_load_sum, load_sum;
+       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       unsigned long runnable_load_avg, load_avg;
+       u64 runnable_load_sum, load_sum = 0;
+       s64 delta_sum;
 
        if (!runnable_sum)
                return;
 
        gcfs_rq->prop_runnable_sum = 0;
 
+       if (runnable_sum >= 0) {
+               /*
+                * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
+                * the CPU is saturated running == runnable.
+                */
+               runnable_sum += se->avg.load_sum;
+               runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
+       } else {
+               /*
+                * Estimate the new unweighted runnable_sum of the gcfs_rq by
+                * assuming all tasks are equally runnable.
+                */
+               if (scale_load_down(gcfs_rq->load.weight)) {
+                       load_sum = div_s64(gcfs_rq->avg.load_sum,
+                               scale_load_down(gcfs_rq->load.weight));
+               }
+
+               /* But make sure to not inflate se's runnable */
+               runnable_sum = min(se->avg.load_sum, load_sum);
+       }
+
+       /*
+        * runnable_sum can't be lower than running_sum
+        * As running sum is scale with cpu capacity wehreas the runnable sum
+        * is not we rescale running_sum 1st
+        */
+       running_sum = se->avg.util_sum /
+               arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+       runnable_sum = max(runnable_sum, running_sum);
+
        load_sum = (s64)se_weight(se) * runnable_sum;
        load_avg = div_s64(load_sum, LOAD_AVG_MAX);
 
-       add_positive(&se->avg.load_sum, runnable_sum);
-       add_positive(&se->avg.load_avg, load_avg);
+       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
+       delta_avg = load_avg - se->avg.load_avg;
 
-       add_positive(&cfs_rq->avg.load_avg, load_avg);
-       add_positive(&cfs_rq->avg.load_sum, load_sum);
+       se->avg.load_sum = runnable_sum;
+       se->avg.load_avg = load_avg;
+       add_positive(&cfs_rq->avg.load_avg, delta_avg);
+       add_positive(&cfs_rq->avg.load_sum, delta_sum);
 
        runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
        runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
+       delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
+       delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
 
-       add_positive(&se->avg.runnable_load_sum, runnable_sum);
-       add_positive(&se->avg.runnable_load_avg, runnable_load_avg);
+       se->avg.runnable_load_sum = runnable_sum;
+       se->avg.runnable_load_avg = runnable_load_avg;
 
        if (se->on_rq) {
-               add_positive(&cfs_rq->avg.runnable_load_avg, runnable_load_avg);
-               add_positive(&cfs_rq->avg.runnable_load_sum, runnable_load_sum);
+               add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
+               add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
        }
 }
 
index 4056c19ca3f00efbc7592a1b4b071426fabf2124..665ace2fc55885e0a4c0621e4bdbff2a5870b61f 100644 (file)
@@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq)
        bool resched = false;
        struct task_struct *p;
        struct rq *src_rq;
+       int rt_overload_count = rt_overloaded(this_rq);
 
-       if (likely(!rt_overloaded(this_rq)))
+       if (likely(!rt_overload_count))
                return;
 
        /*
@@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq)
         */
        smp_rmb();
 
+       /* If we are the only overloaded CPU do nothing */
+       if (rt_overload_count == 1 &&
+           cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
+               return;
+
 #ifdef HAVE_RT_PUSH_IPI
        if (sched_feat(RT_PUSH_IPI)) {
                tell_cpu_to_push(this_rq);
index 98feab7933c76a0d178cd7da0115376641e7bbad..929ecb7d6b78a70f4ec4548a582f78c749d7d3ee 100644 (file)
@@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
 
        wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
        spin_lock_irqsave(&wq_head->lock, flags);
-       __add_wait_queue_entry_tail(wq_head, wq_entry);
+       __add_wait_queue(wq_head, wq_entry);
        spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(add_wait_queue);
index d689a9557e170b9d89ddb7cb42e518a82d5851cb..e776fc8cc1df3bc6dd09e7722250f87f183fb5ef 100644 (file)
@@ -21,10 +21,6 @@ config CLOCKSOURCE_VALIDATE_LAST_CYCLE
 config GENERIC_TIME_VSYSCALL
        bool
 
-# Timekeeping vsyscall support
-config GENERIC_TIME_VSYSCALL_OLD
-       bool
-
 # Old style timekeeping
 config ARCH_USES_GETTIMEOFFSET
        bool
index 03918a19cf2da854bcefa9f8188daa53a7db82f4..65f9e3f24dde8bc8f908f4d68cda2fa4f39843ce 100644 (file)
@@ -171,7 +171,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
        spin_unlock_irqrestore(&watchdog_lock, flags);
 }
 
-static void clocksource_watchdog(unsigned long data)
+static void clocksource_watchdog(struct timer_list *unused)
 {
        struct clocksource *cs;
        u64 csnow, wdnow, cslast, wdlast, delta;
@@ -290,8 +290,7 @@ static inline void clocksource_start_watchdog(void)
 {
        if (watchdog_running || !watchdog || list_empty(&watchdog_list))
                return;
-       init_timer(&watchdog_timer);
-       watchdog_timer.function = clocksource_watchdog;
+       timer_setup(&watchdog_timer, clocksource_watchdog, 0);
        watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
        add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
        watchdog_running = 1;
index 13d6881f908b7f91a5669264a060f59d7990f801..ec999f32c84058a0624d55ff3ee26a4fac63eb57 100644 (file)
@@ -434,17 +434,22 @@ static struct pid *good_sigevent(sigevent_t * event)
 {
        struct task_struct *rtn = current->group_leader;
 
-       if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
-               (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-                !same_thread_group(rtn, current) ||
-                (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
+       switch (event->sigev_notify) {
+       case SIGEV_SIGNAL | SIGEV_THREAD_ID:
+               rtn = find_task_by_vpid(event->sigev_notify_thread_id);
+               if (!rtn || !same_thread_group(rtn, current))
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_SIGNAL:
+       case SIGEV_THREAD:
+               if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_NONE:
+               return task_pid(rtn);
+       default:
                return NULL;
-
-       if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-           ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-               return NULL;
-
-       return task_pid(rtn);
+       }
 }
 
 static struct k_itimer * alloc_posix_timer(void)
@@ -669,7 +674,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
        struct timespec64 ts64;
        bool sig_none;
 
-       sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sig_none = timr->it_sigev_notify == SIGEV_NONE;
        iv = timr->it_interval;
 
        /* interval timer ? */
@@ -856,7 +861,7 @@ int common_timer_set(struct k_itimer *timr, int flags,
 
        timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
        expires = timespec64_to_ktime(new_setting->it_value);
-       sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sigev_none = timr->it_sigev_notify == SIGEV_NONE;
 
        kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
        timr->it_active = !sigev_none;
index 198afa78bf69e425b8cc1b0dfed538467df12bf7..cd03317e7b57deaec813644758080f605490f446 100644 (file)
@@ -557,45 +557,6 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
        update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 }
 
-#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
-#warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon.
-
-static inline void update_vsyscall(struct timekeeper *tk)
-{
-       struct timespec xt, wm;
-
-       xt = timespec64_to_timespec(tk_xtime(tk));
-       wm = timespec64_to_timespec(tk->wall_to_monotonic);
-       update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
-                           tk->tkr_mono.cycle_last);
-}
-
-static inline void old_vsyscall_fixup(struct timekeeper *tk)
-{
-       s64 remainder;
-
-       /*
-       * Store only full nanoseconds into xtime_nsec after rounding
-       * it up and add the remainder to the error difference.
-       * XXX - This is necessary to avoid small 1ns inconsistnecies caused
-       * by truncating the remainder in vsyscalls. However, it causes
-       * additional work to be done in timekeeping_adjust(). Once
-       * the vsyscall implementations are converted to use xtime_nsec
-       * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
-       * users are removed, this can be killed.
-       */
-       remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
-       if (remainder != 0) {
-               tk->tkr_mono.xtime_nsec -= remainder;
-               tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
-               tk->ntp_error += remainder << tk->ntp_error_shift;
-               tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
-       }
-}
-#else
-#define old_vsyscall_fixup(tk)
-#endif
-
 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 
 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
@@ -2163,12 +2124,6 @@ void update_wall_time(void)
        /* correct the clock when NTP error is too big */
        timekeeping_adjust(tk, offset);
 
-       /*
-        * XXX This can be killed once everyone converts
-        * to the new update_vsyscall.
-        */
-       old_vsyscall_fixup(tk);
-
        /*
         * Finally, make sure that after the rounding
         * xtime_nsec isn't larger than NSEC_PER_SEC
index af0b8bae45027042ff153172522b659fb605cf1d..ffebcf878fba5d5cf67f5e9abcece25c16259919 100644 (file)
@@ -707,14 +707,18 @@ static inline void debug_timer_assert_init(struct timer_list *timer)
        debug_object_assert_init(timer, &timer_debug_descr);
 }
 
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+                         void (*func)(struct timer_list *),
+                         unsigned int flags,
                          const char *name, struct lock_class_key *key);
 
-void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
+void init_timer_on_stack_key(struct timer_list *timer,
+                            void (*func)(struct timer_list *),
+                            unsigned int flags,
                             const char *name, struct lock_class_key *key)
 {
        debug_object_init_on_stack(timer, &timer_debug_descr);
-       do_init_timer(timer, flags, name, key);
+       do_init_timer(timer, func, flags, name, key);
 }
 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 
@@ -755,10 +759,13 @@ static inline void debug_assert_init(struct timer_list *timer)
        debug_timer_assert_init(timer);
 }
 
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+                         void (*func)(struct timer_list *),
+                         unsigned int flags,
                          const char *name, struct lock_class_key *key)
 {
        timer->entry.pprev = NULL;
+       timer->function = func;
        timer->flags = flags | raw_smp_processor_id();
        lockdep_init_map(&timer->lockdep_map, name, key, 0);
 }
@@ -766,6 +773,7 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
 /**
  * init_timer_key - initialize a timer
  * @timer: the timer to be initialized
+ * @func: timer callback function
  * @flags: timer flags
  * @name: name of the timer
  * @key: lockdep class key of the fake lock used for tracking timer
@@ -774,11 +782,12 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
  * init_timer_key() must be done to a timer prior calling *any* of the
  * other timer functions.
  */
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+void init_timer_key(struct timer_list *timer,
+                   void (*func)(struct timer_list *), unsigned int flags,
                    const char *name, struct lock_class_key *key)
 {
        debug_init(timer);
-       do_init_timer(timer, flags, name, key);
+       do_init_timer(timer, func, flags, name, key);
 }
 EXPORT_SYMBOL(init_timer_key);
 
@@ -1107,12 +1116,12 @@ EXPORT_SYMBOL(timer_reduce);
  * add_timer - start a timer
  * @timer: the timer to be added
  *
- * The kernel will do a ->function(->data) callback from the
+ * The kernel will do a ->function(@timer) callback from the
  * timer interrupt at the ->expires point in the future. The
  * current time is 'jiffies'.
  *
- * The timer's ->expires, ->function (and if the handler uses it, ->data)
- * fields must be set prior calling this function.
+ * The timer's ->expires, ->function fields must be set prior calling this
+ * function.
  *
  * Timers with an ->expires field in the past will be executed in the next
  * timer tick.
@@ -1284,8 +1293,7 @@ int del_timer_sync(struct timer_list *timer)
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
-static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
-                         unsigned long data)
+static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list *))
 {
        int count = preempt_count();
 
@@ -1309,7 +1317,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
        lock_map_acquire(&lockdep_map);
 
        trace_timer_expire_entry(timer);
-       fn(data);
+       fn(timer);
        trace_timer_expire_exit(timer);
 
        lock_map_release(&lockdep_map);
@@ -1331,8 +1339,7 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
 {
        while (!hlist_empty(head)) {
                struct timer_list *timer;
-               void (*fn)(unsigned long);
-               unsigned long data;
+               void (*fn)(struct timer_list *);
 
                timer = hlist_entry(head->first, struct timer_list, entry);
 
@@ -1340,15 +1347,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
                detach_timer(timer, true);
 
                fn = timer->function;
-               data = timer->data;
 
                if (timer->flags & TIMER_IRQSAFE) {
                        raw_spin_unlock(&base->lock);
-                       call_timer_fn(timer, fn, data);
+                       call_timer_fn(timer, fn);
                        raw_spin_lock(&base->lock);
                } else {
                        raw_spin_unlock_irq(&base->lock);
-                       call_timer_fn(timer, fn, data);
+                       call_timer_fn(timer, fn);
                        raw_spin_lock_irq(&base->lock);
                }
        }
index 0e7f5428a1484ed85215a72e115759eace1392fb..0ed768b56c6061c9da5669649a15590e6f720264 100644 (file)
@@ -389,7 +389,7 @@ static int __init init_timer_list_procfs(void)
 {
        struct proc_dir_entry *pe;
 
-       pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
+       pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
        if (!pe)
                return -ENOMEM;
        return 0;
index af7dad126c13cecbe73f5d797778f005b5838377..904c952ac3833bdfd0e017dff437d26f4c545e3d 100644 (file)
@@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS
        bool "Enable trace events for preempt and irq disable/enable"
        select TRACE_IRQFLAGS
        depends on DEBUG_PREEMPT || !PROVE_LOCKING
+       depends on TRACING
        default n
        help
          Enable tracing of disable and enable events for preemption and irqs.
index 206e0e2ace53d18b77437d45d8baef24bce97187..987d9a9ae2839a2daefdd5c2d24ffe9c6d440bde 100644 (file)
@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
                return ret;
 
        if (copy_to_user(arg, &buts, sizeof(buts))) {
-               blk_trace_remove(q);
+               __blk_trace_remove(q);
                return -EFAULT;
        }
        return 0;
@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
                return ret;
 
        if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
-               blk_trace_remove(q);
+               __blk_trace_remove(q);
                return -EFAULT;
        }
 
@@ -872,7 +872,7 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
  *
  **/
 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-                             u32 what, int error, union kernfs_node_id *cgid)
+                             u32 what, int error)
 {
        struct blk_trace *bt = q->blk_trace;
 
@@ -880,22 +880,21 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
                return;
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
+                       bio_op(bio), bio->bi_opf, what, error, 0, NULL,
+                       blk_trace_bio_get_cgid(q, bio));
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
                                     struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
                                       struct request_queue *q, struct bio *bio,
                                       int error)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
@@ -903,8 +902,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
                                        struct request *rq,
                                        struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0,
-                        blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 }
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
@@ -912,15 +910,13 @@ static void blk_add_trace_bio_frontmerge(void *ignore,
                                         struct request *rq,
                                         struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 }
 
 static void blk_add_trace_bio_queue(void *ignore,
                                    struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0,
-                         blk_trace_bio_get_cgid(q, bio));
+       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 }
 
 static void blk_add_trace_getrq(void *ignore,
@@ -928,8 +924,7 @@ static void blk_add_trace_getrq(void *ignore,
                                struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
-                                 blk_trace_bio_get_cgid(q, bio));
+               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
@@ -945,8 +940,7 @@ static void blk_add_trace_sleeprq(void *ignore,
                                  struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
-                                 blk_trace_bio_get_cgid(q, bio));
+               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
index 27d1f4ffa3def946525b2d248757fac3620504e5..40207c2a41134851d5016fe9cbdc678e42606868 100644 (file)
@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
        .arg4_type      = ARG_CONST_SIZE,
 };
 
-static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
 
 static __always_inline u64
 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
-                       u64 flags, struct perf_raw_record *raw)
+                       u64 flags, struct perf_sample_data *sd)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
-       struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
        unsigned int cpu = smp_processor_id();
        u64 index = flags & BPF_F_INDEX_MASK;
        struct bpf_event_entry *ee;
@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
        if (unlikely(event->oncpu != cpu))
                return -EOPNOTSUPP;
 
-       perf_sample_data_init(sd, 0, 0);
-       sd->raw = raw;
        perf_event_output(event, sd, regs);
        return 0;
 }
@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
           u64, flags, void *, data, u64, size)
 {
+       struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
        struct perf_raw_record raw = {
                .frag = {
                        .size = size,
@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
        if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
                return -EINVAL;
 
-       return __bpf_perf_event_output(regs, map, flags, &raw);
+       perf_sample_data_init(sd, 0, 0);
+       sd->raw = &raw;
+
+       return __bpf_perf_event_output(regs, map, flags, sd);
 }
 
 static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
 };
 
 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
 
 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
                     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 {
+       struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
        struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
        struct perf_raw_frag frag = {
                .copy           = ctx_copy,
@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
        };
 
        perf_fetch_caller_regs(regs);
+       perf_sample_data_init(sd, 0, 0);
+       sd->raw = &raw;
 
-       return __bpf_perf_event_output(regs, map, flags, &raw);
+       return __bpf_perf_event_output(regs, map, flags, sd);
 }
 
 BPF_CALL_0(bpf_get_current_task)
@@ -759,6 +764,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {
 
 static DEFINE_MUTEX(bpf_event_mutex);
 
+#define BPF_TRACE_MAX_PROGS 64
+
 int perf_event_attach_bpf_prog(struct perf_event *event,
                               struct bpf_prog *prog)
 {
@@ -772,6 +779,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
                goto unlock;
 
        old_array = event->tp_event->prog_array;
+       if (old_array &&
+           bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
+               ret = -E2BIG;
+               goto unlock;
+       }
+
        ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
        if (ret < 0)
                goto unlock;
index 91874a95060de5de11aa47d3fbddb8c4980a0da8..c87766c1c20446de2d191edda885669d447adecc 100644 (file)
@@ -1799,12 +1799,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
 
-static __always_inline void *
-__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
-{
-       return bpage->data + index;
-}
-
 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
 {
        return bpage->page->data + index;
index 73e67b68c53b47d5b422970cd0dee1d0bec27002..59518b8126d04b4f1f62a526571490dda8398e3b 100644 (file)
@@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
 }
 
 /**
- * trace_pid_filter_add_remove - Add or remove a task from a pid_list
+ * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
  * @pid_list: The list to modify
  * @self: The current task for fork or NULL for exit
  * @task: The task to add or remove
@@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
 }
 
 /**
- * trace_snapshot - take a snapshot of the current buffer.
+ * tracing_snapshot - take a snapshot of the current buffer.
  *
  * This causes a swap between the snapshot buffer and the current live
  * tracing buffer. You can use this to take snapshots of the live
@@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 
 /**
- * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
+ * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
  *
- * This is similar to trace_snapshot(), but it will allocate the
+ * This is similar to tracing_snapshot(), but it will allocate the
  * snapshot buffer if it isn't already allocated. Use this only
  * where it is safe to sleep, as the allocation may sleep.
  *
@@ -1303,7 +1303,7 @@ unsigned long __read_mostly       tracing_thresh;
 /*
  * Copy the new maximum trace into the separate maximum-trace
  * structure. (this way the maximum trace is permanently saved,
- * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
+ * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
  */
 static void
 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2415,7 +2415,7 @@ trace_process_export(struct trace_export *export,
 
        entry = ring_buffer_event_data(event);
        size = ring_buffer_event_length(event);
-       export->write(entry, size);
+       export->write(export, entry, size);
 }
 
 static DEFINE_MUTEX(ftrace_export_lock);
@@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = {
        .llseek         = seq_lseek,
 };
 
-/*
- * The tracer itself will not take this lock, but still we want
- * to provide a consistent cpumask to user-space:
- */
-static DEFINE_MUTEX(tracing_cpumask_update_lock);
-
-/*
- * Temporary storage for the character representation of the
- * CPU bitmask (and one more byte for the newline):
- */
-static char mask_str[NR_CPUS + 1];
-
 static ssize_t
 tracing_cpumask_read(struct file *filp, char __user *ubuf,
                     size_t count, loff_t *ppos)
 {
        struct trace_array *tr = file_inode(filp)->i_private;
+       char *mask_str;
        int len;
 
-       mutex_lock(&tracing_cpumask_update_lock);
+       len = snprintf(NULL, 0, "%*pb\n",
+                      cpumask_pr_args(tr->tracing_cpumask)) + 1;
+       mask_str = kmalloc(len, GFP_KERNEL);
+       if (!mask_str)
+               return -ENOMEM;
 
-       len = snprintf(mask_str, count, "%*pb\n",
+       len = snprintf(mask_str, len, "%*pb\n",
                       cpumask_pr_args(tr->tracing_cpumask));
        if (len >= count) {
                count = -EINVAL;
                goto out_err;
        }
-       count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+       count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
 
 out_err:
-       mutex_unlock(&tracing_cpumask_update_lock);
+       kfree(mask_str);
 
        return count;
 }
@@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
        if (err)
                goto err_unlock;
 
-       mutex_lock(&tracing_cpumask_update_lock);
-
        local_irq_disable();
        arch_spin_lock(&tr->max_lock);
        for_each_tracing_cpu(cpu) {
@@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
        local_irq_enable();
 
        cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
-
-       mutex_unlock(&tracing_cpumask_update_lock);
        free_cpumask_var(tracing_cpumask_new);
 
        return count;
index 734accc02418930280a5248013fa6fbd4a5868e2..3c7bfc4bf5e9981b687ca8dd4ed1cf890b38ee12 100644 (file)
@@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
        if (__this_cpu_read(disable_stack_tracer) != 1)
                goto out;
 
+       /* If rcu is not watching, then save stack trace can fail */
+       if (!rcu_is_watching())
+               goto out;
+
        ip += MCOUNT_INSN_SIZE;
 
        check_stack(ip, &stack);
index ce74a4901d2b058595af031cf6c408c48dc5e1f1..ef1da2a5f9bd00689e4f78adffb04c867d8395cb 100644 (file)
@@ -192,6 +192,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
                return retval;
        }
 
+       groups_sort(group_info);
        retval = set_current_groups(group_info);
        put_group_info(group_info);
 
index dde6298f6b221e136fc579a37adc80310f97a8f6..43d18cb46308385865d14ce40906c4646d378063 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/hardirq.h>
 #include <linux/mempolicy.h>
 #include <linux/freezer.h>
-#include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
 #include <linux/idr.h>
@@ -48,6 +47,7 @@
 #include <linux/nodemask.h>
 #include <linux/moduleparam.h>
 #include <linux/uaccess.h>
+#include <linux/sched/isolation.h>
 
 #include "workqueue_internal.h"
 
@@ -1509,7 +1509,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        struct work_struct *work = &dwork->work;
 
        WARN_ON_ONCE(!wq);
-       WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)delayed_work_timer_fn);
+       WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
        WARN_ON_ONCE(timer_pending(timer));
        WARN_ON_ONCE(!list_empty(&work->entry));
 
@@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker)
                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 
        /*
-        * Sanity check nr_running.  Because wq_unbind_fn() releases
+        * Sanity check nr_running.  Because unbind_workers() releases
         * pool->lock between setting %WORKER_UNBOUND and zapping
         * nr_running, the warning may trigger spuriously.  Check iff
         * unbind is not in progress.
@@ -4510,9 +4510,8 @@ void show_workqueue_state(void)
  * cpu comes back online.
  */
 
-static void wq_unbind_fn(struct work_struct *work)
+static void unbind_workers(int cpu)
 {
-       int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
 
@@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool)
 
        spin_lock_irq(&pool->lock);
 
-       /*
-        * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
-        * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
-        * being reworked and this can go away in time.
-        */
-       if (!(pool->flags & POOL_DISASSOCIATED)) {
-               spin_unlock_irq(&pool->lock);
-               return;
-       }
-
        pool->flags &= ~POOL_DISASSOCIATED;
 
        for_each_pool_worker(worker, pool) {
@@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu)
 
 int workqueue_offline_cpu(unsigned int cpu)
 {
-       struct work_struct unbind_work;
        struct workqueue_struct *wq;
 
        /* unbinding per-cpu workers should happen on the local CPU */
-       INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
-       queue_work_on(cpu, system_highpri_wq, &unbind_work);
+       if (WARN_ON(cpu != smp_processor_id()))
+               return -1;
+
+       unbind_workers(cpu);
 
        /* update NUMA affinity of unbound workqueues */
        mutex_lock(&wq_pool_mutex);
@@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu)
                wq_update_unbound_numa(wq, cpu, false);
        mutex_unlock(&wq_pool_mutex);
 
-       /* wait for per-cpu unbinding to finish */
-       flush_work(&unbind_work);
-       destroy_work_on_stack(&unbind_work);
        return 0;
 }
 
@@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
        if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
                return -ENOMEM;
 
+       /*
+        * Not excluding isolated cpus on purpose.
+        * If the user wishes to include them, we allow that.
+        */
        cpumask_and(cpumask, cpumask, cpu_possible_mask);
        if (!cpumask_empty(cpumask)) {
                apply_wqattrs_lock();
@@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void)
        WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
 
        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
-       cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
+       cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));
 
        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
index 947d3e2ed5c2f1a7fa1e001e4bf56b9c7a2b9d49..9d5b78aad4c5bcd59a927c654e8010e54269d750 100644 (file)
@@ -1099,8 +1099,6 @@ config PROVE_LOCKING
        select DEBUG_MUTEXES
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_LOCK_ALLOC
-       select LOCKDEP_CROSSRELEASE
-       select LOCKDEP_COMPLETIONS
        select TRACE_IRQFLAGS
        default n
        help
@@ -1170,37 +1168,6 @@ config LOCK_STAT
         CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
         (CONFIG_LOCKDEP defines "acquire" and "release" events.)
 
-config LOCKDEP_CROSSRELEASE
-       bool
-       help
-        This makes lockdep work for crosslock which is a lock allowed to
-        be released in a different context from the acquisition context.
-        Normally a lock must be released in the context acquiring the lock.
-        However, relexing this constraint helps synchronization primitives
-        such as page locks or completions can use the lock correctness
-        detector, lockdep.
-
-config LOCKDEP_COMPLETIONS
-       bool
-       help
-        A deadlock caused by wait_for_completion() and complete() can be
-        detected by lockdep using crossrelease feature.
-
-config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
-       bool "Enable the boot parameter, crossrelease_fullstack"
-       depends on LOCKDEP_CROSSRELEASE
-       default n
-       help
-        The lockdep "cross-release" feature needs to record stack traces
-        (of calling functions) for all acquisitions, for eventual later
-        use during analysis. By default only a single caller is recorded,
-        because the unwind operation can be very expensive with deeper
-        stack chains.
-
-        However a boot parameter, crossrelease_fullstack, was
-        introduced since sometimes deeper traces are required for full
-        analysis. This option turns on the boot parameter.
-
 config DEBUG_LOCKDEP
        bool "Lock dependency engine debugging"
        depends on DEBUG_KERNEL && LOCKDEP
index 1b6087db95a54b665262ec8ee142a3a323850578..3ffc46e3bb6c84cee02bcb038b7739a52cbe8761 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __ashldi3(long long u, word_type b)
 {
index 2e67c97ac65a98737d054898884a0b944c3cd6de..ea054550f0e800897652b1415d9baa2b356d376d 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __ashrdi3(long long u, word_type b)
 {
index 1ef0cec38d7879332966f8095d0bfdc0c7580230..dc14beae2c9aac7df010cdc36e861e33d34a1103 100644 (file)
@@ -313,42 +313,47 @@ next_op:
 
        /* Decide how to handle the operation */
        switch (op) {
-       case ASN1_OP_MATCH_ANY_ACT:
-       case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
-       case ASN1_OP_COND_MATCH_ANY_ACT:
-       case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
-               ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
-               if (ret < 0)
-                       return ret;
-               goto skip_data;
-
-       case ASN1_OP_MATCH_ACT:
-       case ASN1_OP_MATCH_ACT_OR_SKIP:
-       case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
-               ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
-               if (ret < 0)
-                       return ret;
-               goto skip_data;
-
        case ASN1_OP_MATCH:
        case ASN1_OP_MATCH_OR_SKIP:
+       case ASN1_OP_MATCH_ACT:
+       case ASN1_OP_MATCH_ACT_OR_SKIP:
        case ASN1_OP_MATCH_ANY:
        case ASN1_OP_MATCH_ANY_OR_SKIP:
+       case ASN1_OP_MATCH_ANY_ACT:
+       case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
        case ASN1_OP_COND_MATCH_OR_SKIP:
+       case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
        case ASN1_OP_COND_MATCH_ANY:
        case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
-       skip_data:
+       case ASN1_OP_COND_MATCH_ANY_ACT:
+       case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
+
                if (!(flags & FLAG_CONS)) {
                        if (flags & FLAG_INDEFINITE_LENGTH) {
+                               size_t tmp = dp;
+
                                ret = asn1_find_indefinite_length(
-                                       data, datalen, &dp, &len, &errmsg);
+                                       data, datalen, &tmp, &len, &errmsg);
                                if (ret < 0)
                                        goto error;
-                       } else {
-                               dp += len;
                        }
                        pr_debug("- LEAF: %zu\n", len);
                }
+
+               if (op & ASN1_OP_MATCH__ACT) {
+                       unsigned char act;
+
+                       if (op & ASN1_OP_MATCH__ANY)
+                               act = machine[pc + 1];
+                       else
+                               act = machine[pc + 2];
+                       ret = actions[act](context, hdr, tag, data + dp, len);
+                       if (ret < 0)
+                               return ret;
+               }
+
+               if (!(flags & FLAG_CONS))
+                       dp += len;
                pc += asn1_op_lengths[op];
                goto next_op;
 
@@ -434,6 +439,8 @@ next_op:
                        else
                                act = machine[pc + 1];
                        ret = actions[act](context, hdr, 0, data + tdp, len);
+                       if (ret < 0)
+                               return ret;
                }
                pc += asn1_op_lengths[op];
                goto next_op;
index 6d7ebf6c2b862f32b9dd391c8f456769472ec62d..2250da7e503ebaebbdd86a967eb2b6c36560d3f6 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/export.h>
 
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 word_type notrace __cmpdi2(long long a, long long b)
 {
index 8e845f4bb65f48eaec6ae720fb2515d1e8de52b8..99cfa5721f2d2e0f042a214ccd40a7d63261d01c 100644 (file)
@@ -17,7 +17,7 @@
  */
 
 #include <linux/module.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 long long notrace __lshrdi3(long long u, word_type b)
 {
index 88938543e10a626f183fcea3e9127af286967daf..54c8b3123376bc0a17c3dec743fd9c42a8231fab 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <linux/export.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 #define W_TYPE_SIZE 32
 
index 8bf78b4b78f0a286e40941344ecd4b504738cc01..dfa55c873c1318643fdbcbe916b9c18a54edc4c9 100644 (file)
 #include <linux/types.h>
 #include <net/netlink.h>
 
-/* for these data types attribute length must be exactly given size */
+/* For these data types, attribute length should be exactly the given
+ * size. However, to maintain compatibility with broken commands, if the
+ * attribute length does not match the expected size a warning is emitted
+ * to the user that the command is sending invalid data and needs to be fixed.
+ */
 static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
        [NLA_U8]        = sizeof(u8),
        [NLA_U16]       = sizeof(u16),
@@ -28,8 +32,16 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
 };
 
 static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+       [NLA_U8]        = sizeof(u8),
+       [NLA_U16]       = sizeof(u16),
+       [NLA_U32]       = sizeof(u32),
+       [NLA_U64]       = sizeof(u64),
        [NLA_MSECS]     = sizeof(u64),
        [NLA_NESTED]    = NLA_HDRLEN,
+       [NLA_S8]        = sizeof(s8),
+       [NLA_S16]       = sizeof(s16),
+       [NLA_S32]       = sizeof(s32),
+       [NLA_S64]       = sizeof(s64),
 };
 
 static int validate_nla_bitfield32(const struct nlattr *nla,
@@ -69,11 +81,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
 
        BUG_ON(pt->type > NLA_TYPE_MAX);
 
-       /* for data types NLA_U* and NLA_S* require exact length */
-       if (nla_attr_len[pt->type]) {
-               if (attrlen != nla_attr_len[pt->type])
-                       return -ERANGE;
-               return 0;
+       if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
+               pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+                                   current->comm, type);
        }
 
        switch (pt->type) {
index 41b9e50711a72af31d7548d3aea5be1177a45a9d..0bcac6ccb1b2041369926eac64e4267076969e06 100644 (file)
@@ -116,14 +116,14 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
        int count;
 
        if (v >= end)
-               return -EBADMSG;
+               goto bad;
 
        n = *v++;
        ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
+       if (count >= bufsize)
+               return -ENOBUFS;
        buffer += count;
        bufsize -= count;
-       if (bufsize == 0)
-               return -ENOBUFS;
 
        while (v < end) {
                num = 0;
@@ -134,20 +134,24 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
                        num = n & 0x7f;
                        do {
                                if (v >= end)
-                                       return -EBADMSG;
+                                       goto bad;
                                n = *v++;
                                num <<= 7;
                                num |= n & 0x7f;
                        } while (n & 0x80);
                }
                ret += count = snprintf(buffer, bufsize, ".%lu", num);
-               buffer += count;
-               if (bufsize <= count)
+               if (count >= bufsize)
                        return -ENOBUFS;
+               buffer += count;
                bufsize -= count;
        }
 
        return ret;
+
+bad:
+       snprintf(buffer, bufsize, "(bad)");
+       return -EBADMSG;
 }
 EXPORT_SYMBOL_GPL(sprint_oid);
 
index 65cc018fef40d714272fb2e7948084e4082fe499..4aaa76404d561b86609bbd1c6eb3ca4620ca81bf 100644 (file)
@@ -213,11 +213,11 @@ static int __init prandom_init(void)
 }
 core_initcall(prandom_init);
 
-static void __prandom_timer(unsigned long dontcare);
+static void __prandom_timer(struct timer_list *unused);
 
 static DEFINE_TIMER(seed_timer, __prandom_timer);
 
-static void __prandom_timer(unsigned long dontcare)
+static void __prandom_timer(struct timer_list *unused)
 {
        u32 entropy;
        unsigned long expires;
index ba4a9d165f1bed3c39651387def0008fc793fbd6..d3ff682fd4b8dac865276f5fefc7a1108c1bdb37 100644 (file)
@@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
 }
 EXPORT_SYMBOL(rb_replace_node);
 
+void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+                           struct rb_root_cached *root)
+{
+       rb_replace_node(victim, new, &root->rb_root);
+
+       if (root->rb_leftmost == victim)
+               root->rb_leftmost = new;
+}
+EXPORT_SYMBOL(rb_replace_node_cached);
+
 void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
                         struct rb_root *root)
 {
index aa8812ae6776ee31712fe88c58da4048ff9c31e4..9e97480892709957e127e9941710ce45f11ff724 100644 (file)
@@ -435,6 +435,41 @@ loop:
        return 0;
 }
 
+static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+{
+       struct bpf_insn *insn;
+
+       insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       /* Due to func address being non-const, we need to
+        * assemble this here.
+        */
+       insn[0] = BPF_MOV64_REG(R6, R1);
+       insn[1] = BPF_LD_ABS(BPF_B, 0);
+       insn[2] = BPF_LD_ABS(BPF_H, 0);
+       insn[3] = BPF_LD_ABS(BPF_W, 0);
+       insn[4] = BPF_MOV64_REG(R7, R6);
+       insn[5] = BPF_MOV64_IMM(R6, 0);
+       insn[6] = BPF_MOV64_REG(R1, R7);
+       insn[7] = BPF_MOV64_IMM(R2, 1);
+       insn[8] = BPF_MOV64_IMM(R3, 2);
+       insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                              bpf_skb_vlan_push_proto.func - __bpf_call_base);
+       insn[10] = BPF_MOV64_REG(R6, R7);
+       insn[11] = BPF_LD_ABS(BPF_B, 0);
+       insn[12] = BPF_LD_ABS(BPF_H, 0);
+       insn[13] = BPF_LD_ABS(BPF_W, 0);
+       insn[14] = BPF_MOV64_IMM(R0, 42);
+       insn[15] = BPF_EXIT_INSN();
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = 16;
+
+       return 0;
+}
+
 static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 {
        unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
                {},
                { {0x1, 0x42 } },
        },
+       {
+               "LD_ABS with helper changing skb data",
+               { },
+               INTERNAL,
+               { 0x34 },
+               { { ETH_HLEN, 42 } },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
+       },
 };
 
 static struct net_device dev;
index 563f10e6876aecf6e3932e0a2e955305f271b249..71ebfa43ad05f2bdbbd011989dc92b5efac2cdd6 100644 (file)
 #define PAD_SIZE 16
 #define FILL_CHAR '$'
 
-#define PTR1 ((void*)0x01234567)
-#define PTR2 ((void*)(long)(int)0xfedcba98)
-
-#if BITS_PER_LONG == 64
-#define PTR1_ZEROES "000000000"
-#define PTR1_SPACES "         "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fffffffffedcba98"
-#define PTR_WIDTH 16
-#else
-#define PTR1_ZEROES "0"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fedcba98"
-#define PTR_WIDTH 8
-#endif
-#define PTR_WIDTH_STR stringify(PTR_WIDTH)
-
 static unsigned total_tests __initdata;
 static unsigned failed_tests __initdata;
 static char *test_buffer __initdata;
@@ -217,30 +199,79 @@ test_string(void)
        test("a  |   |   ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
 }
 
+#define PLAIN_BUF_SIZE 64      /* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789ab)
+#define PTR_STR "ffff0123456789ab"
+#define ZEROS "00000000"       /* hex 32 zero bits */
+
+static int __init
+plain_format(void)
+{
+       char buf[PLAIN_BUF_SIZE];
+       int nchars;
+
+       nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+       if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+               return -1;
+
+       return 0;
+}
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+
+static int __init
+plain_format(void)
+{
+       /* Format is implicitly tested for 32 bit machines by plain_hash() */
+       return 0;
+}
+
+#endif /* BITS_PER_LONG == 64 */
+
+static int __init
+plain_hash(void)
+{
+       char buf[PLAIN_BUF_SIZE];
+       int nchars;
+
+       nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+       if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * We can't use test() to test %p because we don't know what output to expect
+ * after an address is hashed.
+ */
 static void __init
 plain(void)
 {
-       test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2);
-       /*
-        * The field width is overloaded for some %p extensions to
-        * pass another piece of information. For plain pointers, the
-        * behaviour is slightly odd: One cannot pass either the 0
-        * flag nor a precision to %p without gcc complaining, and if
-        * one explicitly gives a field width, the number is no longer
-        * zero-padded.
-        */
-       test("|" PTR1_STR PTR1_SPACES "  |  " PTR1_SPACES PTR1_STR "|",
-            "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1);
-       test("|" PTR2_STR "  |  " PTR2_STR "|",
-            "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2);
+       int err;
 
-       /*
-        * Unrecognized %p extensions are treated as plain %p, but the
-        * alphanumeric suffix is ignored (that is, does not occur in
-        * the output.)
-        */
-       test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1);
-       test("|"PTR2_STR"|", "|%p0y|", PTR2);
+       err = plain_hash();
+       if (err) {
+               pr_warn("plain 'p' does not appear to be hashed\n");
+               failed_tests++;
+               return;
+       }
+
+       err = plain_format();
+       if (err) {
+               pr_warn("hashing plain 'p' has unexpected format\n");
+               failed_tests++;
+       }
 }
 
 static void __init
@@ -251,6 +282,7 @@ symbol_ptr(void)
 static void __init
 kernel_ptr(void)
 {
+       /* We can't test this without access to kptr_restrict. */
 }
 
 static void __init
index 49a53505c8e3527959f0586de21ea6b774fc7fad..25ca2d4c1e19118b2c34bdee66ab1a64ed54fe3d 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <linux/module.h>
-#include <lib/libgcc.h>
+#include <linux/libgcc.h>
 
 word_type __ucmpdi2(unsigned long long a, unsigned long long b)
 {
index 1746bae94d416f6ce3311c569e5d99080173f998..01c3957b2de621ae21fed6162058c37f801ff526 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/uuid.h>
 #include <linux/of.h>
 #include <net/addrconf.h>
+#include <linux/siphash.h>
+#include <linux/compiler.h>
 #ifdef CONFIG_BLOCK
 #include <linux/blkdev.h>
 #endif
@@ -1343,6 +1345,59 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        return string(buf, end, uuid, spec);
 }
 
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+                        struct printf_spec spec)
+{
+       spec.base = 16;
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = 2 * sizeof(ptr);
+               spec.flags |= ZEROPAD;
+       }
+
+       switch (kptr_restrict) {
+       case 0:
+               /* Always print %pK values */
+               break;
+       case 1: {
+               const struct cred *cred;
+
+               /*
+                * kptr_restrict==1 cannot be used in IRQ context
+                * because its test for CAP_SYSLOG would be meaningless.
+                */
+               if (in_irq() || in_serving_softirq() || in_nmi())
+                       return string(buf, end, "pK-error", spec);
+
+               /*
+                * Only print the real pointer value if the current
+                * process has CAP_SYSLOG and is running with the
+                * same credentials it started with. This is because
+                * access to files is checked at open() time, but %pK
+                * checks permission at read() time. We don't want to
+                * leak pointer values if a binary opens a file using
+                * %pK and then elevates privileges before reading it.
+                */
+               cred = current_cred();
+               if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+                   !uid_eq(cred->euid, cred->uid) ||
+                   !gid_eq(cred->egid, cred->gid))
+                       ptr = NULL;
+               break;
+       }
+       case 2:
+       default:
+               /* Always print 0's for %pK */
+               ptr = NULL;
+               break;
+       }
+
+       return number(buf, end, (unsigned long)ptr, spec);
+}
+
 static noinline_for_stack
 char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
 {
@@ -1591,7 +1646,86 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
        return widen_string(buf, buf - buf_start, end, spec);
 }
 
-int kptr_restrict __read_mostly;
+static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+                    struct printf_spec spec)
+{
+       spec.base = 16;
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = 2 * sizeof(ptr);
+               spec.flags |= ZEROPAD;
+       }
+
+       return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+static bool have_filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+       get_random_bytes(&ptr_key, sizeof(ptr_key));
+       /*
+        * have_filled_random_ptr_key==true is dependent on get_random_bytes().
+        * ptr_to_id() needs to see have_filled_random_ptr_key==true
+        * after get_random_bytes() returns.
+        */
+       smp_mb();
+       WRITE_ONCE(have_filled_random_ptr_key, true);
+}
+
+static struct random_ready_callback random_ready = {
+       .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+       int ret = add_random_ready_callback(&random_ready);
+
+       if (!ret) {
+               return 0;
+       } else if (ret == -EALREADY) {
+               fill_random_ptr_key(&random_ready);
+               return 0;
+       }
+
+       return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
+{
+       unsigned long hashval;
+       const int default_width = 2 * sizeof(ptr);
+
+       if (unlikely(!have_filled_random_ptr_key)) {
+               spec.field_width = default_width;
+               /* string length must be less than default_width */
+               return string(buf, end, "(ptrval)", spec);
+       }
+
+#ifdef CONFIG_64BIT
+       hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+       /*
+        * Mask off the first 32 bits, this makes explicit that we have
+        * modified the address (and 32 bits is plenty for a unique ID).
+        */
+       hashval = hashval & 0xffffffff;
+#else
+       hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = default_width;
+               spec.flags |= ZEROPAD;
+       }
+       spec.base = 16;
+
+       return number(buf, end, hashval, spec);
+}
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
@@ -1698,11 +1832,16 @@ int kptr_restrict __read_mostly;
  *                        c major compatible string
  *                        C full compatible string
  *
+ * - 'x' For printing the address. Equivalent to "%lx".
+ *
  * ** Please update also Documentation/printk-formats.txt when making changes **
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
  * pointer to the real address.
+ *
+ * Note: The default behaviour (unadorned %p) is to hash the address,
+ * rendering it useful as a unique identifier.
  */
 static noinline_for_stack
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -1792,47 +1931,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                        return buf;
                }
        case 'K':
-               switch (kptr_restrict) {
-               case 0:
-                       /* Always print %pK values */
-                       break;
-               case 1: {
-                       const struct cred *cred;
-
-                       /*
-                        * kptr_restrict==1 cannot be used in IRQ context
-                        * because its test for CAP_SYSLOG would be meaningless.
-                        */
-                       if (in_irq() || in_serving_softirq() || in_nmi()) {
-                               if (spec.field_width == -1)
-                                       spec.field_width = default_width;
-                               return string(buf, end, "pK-error", spec);
-                       }
-
-                       /*
-                        * Only print the real pointer value if the current
-                        * process has CAP_SYSLOG and is running with the
-                        * same credentials it started with. This is because
-                        * access to files is checked at open() time, but %pK
-                        * checks permission at read() time. We don't want to
-                        * leak pointer values if a binary opens a file using
-                        * %pK and then elevates privileges before reading it.
-                        */
-                       cred = current_cred();
-                       if (!has_capability_noaudit(current, CAP_SYSLOG) ||
-                           !uid_eq(cred->euid, cred->uid) ||
-                           !gid_eq(cred->egid, cred->gid))
-                               ptr = NULL;
-                       break;
-               }
-               case 2:
-               default:
-                       /* Always print 0's for %pK */
-                       ptr = NULL;
+               if (!kptr_restrict)
                        break;
-               }
-               break;
-
+               return restricted_pointer(buf, end, ptr, spec);
        case 'N':
                return netdev_bits(buf, end, ptr, fmt);
        case 'a':
@@ -1857,15 +1958,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                case 'F':
                        return device_node_string(buf, end, ptr, spec, fmt + 1);
                }
+       case 'x':
+               return pointer_string(buf, end, ptr, spec);
        }
-       spec.flags |= SMALL;
-       if (spec.field_width == -1) {
-               spec.field_width = default_width;
-               spec.flags |= ZEROPAD;
-       }
-       spec.base = 16;
 
-       return number(buf, end, (unsigned long) ptr, spec);
+       /* default is to _not_ leak addresses, hash before printing */
+       return ptr_to_id(buf, end, ptr, spec);
 }
 
 /*
index 74b52dfd5852da3e211ae949ce7d31133361c14b..b5f940ce0143ba061a183db0df3ef0dc17f57c72 100644 (file)
@@ -113,11 +113,23 @@ static const struct file_operations bdi_debug_stats_fops = {
        .release        = single_release,
 };
 
-static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 {
+       if (!bdi_debug_root)
+               return -ENOMEM;
+
        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
+       if (!bdi->debug_dir)
+               return -ENOMEM;
+
        bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
                                               bdi, &bdi_debug_stats_fops);
+       if (!bdi->debug_stats) {
+               debugfs_remove(bdi->debug_dir);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 static void bdi_debug_unregister(struct backing_dev_info *bdi)
@@ -129,9 +141,10 @@ static void bdi_debug_unregister(struct backing_dev_info *bdi)
 static inline void bdi_debug_init(void)
 {
 }
-static inline void bdi_debug_register(struct backing_dev_info *bdi,
+static inline int bdi_debug_register(struct backing_dev_info *bdi,
                                      const char *name)
 {
+       return 0;
 }
 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 {
index d04ac1ec05598d64c2acfd7a9b95b4b4a7a81c36..1826f191e72c836c59970006528e39bd791209bc 100644 (file)
@@ -111,7 +111,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
        enum fixed_addresses idx;
        int i, slot;
 
-       WARN_ON(system_state != SYSTEM_BOOTING);
+       WARN_ON(system_state >= SYSTEM_RUNNING);
 
        slot = -1;
        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
index 2f98df0d460eef41f80586544ad98abb66fae60c..c64dca6e27c28c915ad4ce662de764d6da5ef6f1 100644 (file)
@@ -53,6 +53,20 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
                ret = -EFAULT;
                goto out;
        }
+
+       /*
+        * While get_vaddr_frames() could be used for transient (kernel
+        * controlled lifetime) pinning of memory pages all current
+        * users establish long term (userspace controlled lifetime)
+        * page pinning. Treat get_vaddr_frames() like
+        * get_user_pages_longterm() and disallow it for filesystem-dax
+        * mappings.
+        */
+       if (vma_is_fsdax(vma)) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
                vec->got_ref = true;
                vec->is_pfns = false;
index dfcde13f289a76ddcb54919f900467aeab15609d..e0d82b6706d72d82637bca5eaef1e35e15a1abdf 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
 }
 EXPORT_SYMBOL(get_user_pages);
 
+#ifdef CONFIG_FS_DAX
+/*
+ * This is the same as get_user_pages() in that it assumes we are
+ * operating on the current task's mm, but it goes further to validate
+ * that the vmas associated with the address range are suitable for
+ * longterm elevated page reference counts. For example, filesystem-dax
+ * mappings are subject to the lifetime enforced by the filesystem and
+ * we need guarantees that longterm users like RDMA and V4L2 only
+ * establish mappings that have a kernel enforced revocation mechanism.
+ *
+ * "longterm" == userspace controlled elevated page count lifetime.
+ * Contrast this to iov_iter_get_pages() usages which are transient.
+ */
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+               unsigned int gup_flags, struct page **pages,
+               struct vm_area_struct **vmas_arg)
+{
+       struct vm_area_struct **vmas = vmas_arg;
+       struct vm_area_struct *vma_prev = NULL;
+       long rc, i;
+
+       if (!pages)
+               return -EINVAL;
+
+       if (!vmas) {
+               vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
+                              GFP_KERNEL);
+               if (!vmas)
+                       return -ENOMEM;
+       }
+
+       rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+
+       for (i = 0; i < rc; i++) {
+               struct vm_area_struct *vma = vmas[i];
+
+               if (vma == vma_prev)
+                       continue;
+
+               vma_prev = vma;
+
+               if (vma_is_fsdax(vma))
+                       break;
+       }
+
+       /*
+        * Either get_user_pages() failed, or the vma validation
+        * succeeded, in either case we don't need to put_page() before
+        * returning.
+        */
+       if (i >= rc)
+               goto out;
+
+       for (i = 0; i < rc; i++)
+               put_page(pages[i]);
+       rc = -EOPNOTSUPP;
+out:
+       if (vmas != vmas_arg)
+               kfree(vmas);
+       return rc;
+}
+EXPORT_SYMBOL(get_user_pages_longterm);
+#endif /* CONFIG_FS_DAX */
+
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
  * @vma:   target vma
index 86fe697e8bfb3c4e8393b23a09f177965450ad93..0e7ded98d114d184877d2fc9bd0f02c3187f2ed5 100644 (file)
@@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd)
+               pmd_t *pmd, int flags)
 {
        pmd_t _pmd;
 
-       /*
-        * We should set the dirty bit only for FOLL_WRITE but for now
-        * the dirty bit in the pmd is meaningless.  And if the dirty
-        * bit will become meaningful and we'll only set it with
-        * FOLL_WRITE, an atomic set_bit will be required on the pmd to
-        * set the young bit, instead of the current set_pmd_at.
-        */
-       _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+       _pmd = pmd_mkyoung(*pmd);
+       if (flags & FOLL_WRITE)
+               _pmd = pmd_mkdirty(_pmd);
        if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
-                               pmd, _pmd,  1))
+                               pmd, _pmd, flags & FOLL_WRITE))
                update_mmu_cache_pmd(vma, addr, pmd);
 }
 
@@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd);
+               touch_pmd(vma, addr, pmd, flags);
 
        /*
         * device mapped pages can only be returned if the
@@ -995,20 +990,15 @@ out:
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
-               pud_t *pud)
+               pud_t *pud, int flags)
 {
        pud_t _pud;
 
-       /*
-        * We should set the dirty bit only for FOLL_WRITE but for now
-        * the dirty bit in the pud is meaningless.  And if the dirty
-        * bit will become meaningful and we'll only set it with
-        * FOLL_WRITE, an atomic set_bit will be required on the pud to
-        * set the young bit, instead of the current set_pud_at.
-        */
-       _pud = pud_mkyoung(pud_mkdirty(*pud));
+       _pud = pud_mkyoung(*pud);
+       if (flags & FOLL_WRITE)
+               _pud = pud_mkdirty(_pud);
        if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
-                               pud, _pud,  1))
+                               pud, _pud, flags & FOLL_WRITE))
                update_mmu_cache_pud(vma, addr, pud);
 }
 
@@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pud(vma, addr, pud);
+               touch_pud(vma, addr, pud, flags);
 
        /*
         * device mapped pages can only be returned if the
@@ -1424,7 +1414,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        page = pmd_page(*pmd);
        VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd);
+               touch_pmd(vma, addr, pmd, flags);
        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
                /*
                 * We don't mlock() pte-mapped THPs. This way we can avoid
index 681b300185c0c0383bb240d6a898849bf777f46b..9a334f5fb730873190a57648bc0f040f91ac0ed6 100644 (file)
@@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
        }
 }
 
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       if (addr & ~(huge_page_mask(hstate_vma(vma))))
+               return -EINVAL;
+       return 0;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
+       .split = hugetlb_vm_op_split,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -4627,7 +4635,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
-       p4d = p4d_offset(pgd, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
        pud = pud_alloc(mm, p4d, addr);
        if (pud) {
                if (sz == PUD_SIZE) {
index 6bcfb01ba0386e5bf2ec49512321e59a1fac0661..410c8235e671501ab6f6876a13b419b448cc624c 100644 (file)
@@ -134,7 +134,7 @@ static void print_error_description(struct kasan_access_info *info)
 
        pr_err("BUG: KASAN: %s in %pS\n",
                bug_type, (void *)info->ip);
-       pr_err("%s of size %zu at addr %p by task %s/%d\n",
+       pr_err("%s of size %zu at addr %px by task %s/%d\n",
                info->is_write ? "Write" : "Read", info->access_size,
                info->access_addr, current->comm, task_pid_nr(current));
 }
@@ -206,7 +206,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
        const char *rel_type;
        int rel_bytes;
 
-       pr_err("The buggy address belongs to the object at %p\n"
+       pr_err("The buggy address belongs to the object at %px\n"
               " which belongs to the cache %s of size %d\n",
                object, cache->name, cache->object_size);
 
@@ -225,7 +225,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
        }
 
        pr_err("The buggy address is located %d bytes %s of\n"
-              " %d-byte region [%p, %p)\n",
+              " %d-byte region [%px, %px)\n",
                rel_bytes, rel_type, cache->object_size, (void *)object_addr,
                (void *)(object_addr + cache->object_size));
 }
@@ -302,7 +302,7 @@ static void print_shadow_for_address(const void *addr)
                char shadow_buf[SHADOW_BYTES_PER_ROW];
 
                snprintf(buffer, sizeof(buffer),
-                       (i == 0) ? ">%p: " : " %p: ", kaddr);
+                       (i == 0) ? ">%px: " : " %px: ", kaddr);
                /*
                 * We should not pass a shadow pointer to generic
                 * function, because generic functions may try to
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
deleted file mode 100644 (file)
index cec5940..0000000
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
index e4738d5e9b8c5214c106756b311e102eaf2cdad1..d73c14294f3a61c2385741b447aa31d203a2bc72 100644 (file)
@@ -1523,6 +1523,8 @@ static void kmemleak_scan(void)
                        if (page_count(page) == 0)
                                continue;
                        scan_block(page, page + 1, NULL);
+                       if (!(pfn & 63))
+                               cond_resched();
                }
        }
        put_online_mems();
index 375cf32087e4a2da0c42b251a1d5538ffaa1c857..751e97aa22106f9be73919033271ad9f98498fca 100644 (file)
@@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
 {
        struct file *file = vma->vm_file;
 
+       *prev = vma;
 #ifdef CONFIG_SWAP
        if (!file) {
-               *prev = vma;
                force_swapin_readahead(vma, start, end);
                return 0;
        }
 
        if (shmem_mapping(file->f_mapping)) {
-               *prev = vma;
                force_shm_swapin_readahead(vma, start, end,
                                        file->f_mapping);
                return 0;
@@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
                return 0;
        }
 
-       *prev = vma;
        start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        if (end > vma->vm_end)
                end = vma->vm_end;
index 50e6906314f8d9c987181744c9a45b54872edfb6..ac2ffd5e02b914fb9564649c9475babc51119de6 100644 (file)
@@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        memcg_check_events(memcg, page);
 
        if (!mem_cgroup_is_root(memcg))
-               css_put(&memcg->css);
+               css_put_many(&memcg->css, nr_entries);
 }
 
 /**
index 85e7a87da79fe4a5487e1f3f6216e61b9827515c..ca5674cbaff2b65c4e51086e5922fbbd274f2cfa 100644 (file)
@@ -3831,7 +3831,8 @@ static inline int create_huge_pmd(struct vm_fault *vmf)
        return VM_FAULT_FALLBACK;
 }
 
-static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
+/* `inline' is required to avoid gcc 4.1.2 build error */
+static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
 {
        if (vma_is_anonymous(vmf->vma))
                return do_huge_pmd_wp_page(vmf, orig_pmd);
index 924839fac0e6421a77839825a99833a342d3153c..9efdc021ad2202fc9ebd7e55fe572813136d2f2c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2555,9 +2555,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_area_struct *new;
        int err;
 
-       if (is_vm_hugetlb_page(vma) && (addr &
-                                       ~(huge_page_mask(hstate_vma(vma)))))
-               return -EINVAL;
+       if (vma->vm_ops && vma->vm_ops->split) {
+               err = vma->vm_ops->split(vma, addr);
+               if (err)
+                       return err;
+       }
 
        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
@@ -3017,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm)
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
 
-       set_bit(MMF_OOM_SKIP, &mm->flags);
-       if (unlikely(tsk_is_oom_victim(current))) {
+       if (unlikely(mm_is_oom_victim(mm))) {
                /*
                 * Wait for oom_reap_task() to stop working on this
                 * mm. Because MMF_OOM_SKIP is already set before
                 * calling down_read(), oom_reap_task() will not run
                 * on this "mm" post up_write().
                 *
-                * tsk_is_oom_victim() cannot be set from under us
-                * either because current->mm is already set to NULL
+                * mm_is_oom_victim() cannot be set from under us
+                * either because victim->mm is already set to NULL
                 * under task_lock before calling mmput and oom_mm is
-                * set not NULL by the OOM killer only if current->mm
+                * set not NULL by the OOM killer only if victim->mm
                 * is found not NULL while holding the task_lock.
                 */
+               set_bit(MMF_OOM_SKIP, &mm->flags);
                down_write(&mm->mmap_sem);
                up_write(&mm->mmap_sem);
        }
index c86fbd1b590ecda69741d4c1d9a9c0875d98ee69..29f855551efef89d6c251075828bc0cd79da1842 100644 (file)
@@ -550,7 +550,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
         */
        set_bit(MMF_UNSTABLE, &mm->flags);
 
-       tlb_gather_mmu(&tlb, mm, 0, -1);
        for (vma = mm->mmap ; vma; vma = vma->vm_next) {
                if (!can_madv_dontneed_vma(vma))
                        continue;
@@ -565,11 +564,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
                 * we do not want to block exit_mmap by keeping mm ref
                 * count elevated without a good reason.
                 */
-               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
+               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+                       tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
                        unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
                                         NULL);
+                       tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+               }
        }
-       tlb_finish_mmu(&tlb, 0, -1);
        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
                        task_pid_nr(tsk), tsk->comm,
                        K(get_mm_counter(mm, MM_ANONPAGES)),
@@ -682,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk)
                return;
 
        /* oom_mm is bound to the signal struct life time. */
-       if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+       if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
                mmgrab(tsk->signal->oom_mm);
+               set_bit(MMF_OOM_VICTIM, &mm->flags);
+       }
 
        /*
         * Make sure that the task is woken up from uninterruptible sleep
index 8a1551154285d764207a641aed035f7e13c11b14..586f31261c8328e30106254e09e52fa6e93f410e 100644 (file)
@@ -433,11 +433,8 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        else
                bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
-       if (unlikely(bg_thresh >= thresh)) {
-               pr_warn("vm direct limit must be set greater than background limit.\n");
+       if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;
-       }
-
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
@@ -1993,11 +1990,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
 }
 
 #ifdef CONFIG_BLOCK
-void laptop_mode_timer_fn(unsigned long data)
+void laptop_mode_timer_fn(struct timer_list *t)
 {
-       struct request_queue *q = (struct request_queue *)data;
+       struct backing_dev_info *backing_dev_info =
+               from_timer(backing_dev_info, t, laptop_mode_wb_timer);
 
-       wakeup_flusher_threads_bdi(q->backing_dev_info, WB_REASON_LAPTOP_TIMER);
+       wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
 }
 
 /*
index d4096f4a5c1f75a276620b89cad5e2ed8878d0b0..7e5e775e97f400d8a050effc41f68a8261bd9a23 100644 (file)
@@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone)
        if (WARN_ON_ONCE(!mm_percpu_wq))
                return;
 
-       /* Workqueues cannot recurse */
-       if (current->flags & PF_WQ_WORKER)
-               return;
-
        /*
         * Do not drain if one is already in progress unless it's specific to
         * a zone. Such callers are primarily CMA and memory hotplug and need
@@ -2688,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
 {
        struct page *page, *next;
        unsigned long flags, pfn;
+       int batch_count = 0;
 
        /* Prepare pages for freeing */
        list_for_each_entry_safe(page, next, list, lru) {
@@ -2704,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
                set_page_private(page, 0);
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn);
+
+               /*
+                * Guard against excessive IRQ disabled times when we get
+                * a large list of pages to free.
+                */
+               if (++batch_count == SWAP_CLUSTER_MAX) {
+                       local_irq_restore(flags);
+                       batch_count = 0;
+                       local_irq_save(flags);
+               }
        }
        local_irq_restore(flags);
 }
@@ -7656,11 +7663,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /*
         * In case of -EBUSY, we'd like to know which page causes problem.
-        * So, just fall through. We will check it in test_pages_isolated().
+        * So, just fall through. test_pages_isolated() has a tracepoint
+        * which will report the busy page.
+        *
+        * It is possible that busy pages could become available before
+        * the call to test_pages_isolated, and the range will actually be
+        * allocated.  So, if we fall through be sure to clear ret so that
+        * -EBUSY is not accidentally used or returned to caller.
         */
        ret = __alloc_contig_migrate_range(&cc, start, end);
        if (ret && ret != -EBUSY)
                goto done;
+       ret =0;
 
        /*
         * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
index 79e3549cab0f40f30916499f2acca7622379fdc5..50e7fdf84055151d8c7e8bb220f7a73e96b7f3e4 100644 (file)
@@ -2719,7 +2719,11 @@ void __init setup_per_cpu_areas(void)
 
        if (pcpu_setup_first_chunk(ai, fc) < 0)
                panic("Failed to initialize percpu areas.");
+#ifdef CONFIG_CRIS
+#warning "the CRIS architecture has physical and virtual addresses confused"
+#else
        pcpu_free_alloc_info(ai);
+#endif
 }
 
 #endif /* CONFIG_SMP */
index 4aa9307feab0a020a488f50f63d9798b13789b8b..7fbe67be86fa816b13d06603c098ccf10cca4fe2 100644 (file)
@@ -3776,7 +3776,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
         * tmpfs instance, limiting inodes to one per page of lowmem;
         * but the internal instance is left unlimited.
         */
-       if (!(sb->s_flags & MS_KERNMOUNT)) {
+       if (!(sb->s_flags & SB_KERNMOUNT)) {
                sbinfo->max_blocks = shmem_default_max_blocks();
                sbinfo->max_inodes = shmem_default_max_inodes();
                if (shmem_parse_options(data, sbinfo, false)) {
@@ -3784,12 +3784,12 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed;
                }
        } else {
-               sb->s_flags |= MS_NOUSER;
+               sb->s_flags |= SB_NOUSER;
        }
        sb->s_export_op = &shmem_export_ops;
-       sb->s_flags |= MS_NOSEC;
+       sb->s_flags |= SB_NOSEC;
 #else
-       sb->s_flags |= MS_NOUSER;
+       sb->s_flags |= SB_NOUSER;
 #endif
 
        spin_lock_init(&sbinfo->stat_lock);
@@ -3809,7 +3809,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_xattr = shmem_xattr_handlers;
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        uuid_gen(&sb->s_uuid);
 
index 183e996dde5ff37a8881e9c223a348de947bf890..4e51ef954026bd15e5bef41167459970976faab0 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1584,11 +1584,8 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
                       *dbg_redzone2(cachep, objp));
        }
 
-       if (cachep->flags & SLAB_STORE_USER) {
-               pr_err("Last user: [<%p>](%pSR)\n",
-                      *dbg_userword(cachep, objp),
-                      *dbg_userword(cachep, objp));
-       }
+       if (cachep->flags & SLAB_STORE_USER)
+               pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
        realobj = (char *)objp + obj_offset(cachep);
        size = cachep->object_size;
        for (i = 0; i < size && lines; i += 16, lines--) {
@@ -1621,7 +1618,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
                        /* Mismatch ! */
                        /* Print header */
                        if (lines == 0) {
-                               pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
+                               pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
                                       print_tainted(), cachep->name,
                                       realobj, size);
                                print_objinfo(cachep, objp, 0);
@@ -1650,13 +1647,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
                if (objnr) {
                        objp = index_to_obj(cachep, page, objnr - 1);
                        realobj = (char *)objp + obj_offset(cachep);
-                       pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
+                       pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
                if (objnr + 1 < cachep->num) {
                        objp = index_to_obj(cachep, page, objnr + 1);
                        realobj = (char *)objp + obj_offset(cachep);
-                       pr_err("Next obj: start=%p, len=%d\n", realobj, size);
+                       pr_err("Next obj: start=%px, len=%d\n", realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
        }
@@ -2608,7 +2605,7 @@ static void slab_put_obj(struct kmem_cache *cachep,
        /* Verify double free bug */
        for (i = page->active; i < cachep->num; i++) {
                if (get_free_obj(page, i) == objnr) {
-                       pr_err("slab: double free detected in cache '%s', objp %p\n",
+                       pr_err("slab: double free detected in cache '%s', objp %px\n",
                               cachep->name, objp);
                        BUG();
                }
@@ -2772,7 +2769,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
        else
                slab_error(cache, "memory outside object was overwritten");
 
-       pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
+       pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
               obj, redzone1, redzone2);
 }
 
@@ -3078,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
                                *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
                        slab_error(cachep, "double free, or memory outside object was overwritten");
-                       pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
+                       pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
                               objp, *dbg_redzone1(cachep, objp),
                               *dbg_redzone2(cachep, objp));
                }
@@ -3091,7 +3088,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                cachep->ctor(objp);
        if (ARCH_SLAB_MINALIGN &&
            ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
-               pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+               pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
                       objp, (int)ARCH_SLAB_MINALIGN);
        }
        return objp;
@@ -4283,7 +4280,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
                return;
        }
 #endif
-       seq_printf(m, "%p", (void *)address);
+       seq_printf(m, "%px", (void *)address);
 }
 
 static int leaks_show(struct seq_file *m, void *p)
index 2dac647ff4201fc1dfcf98d05b398422fea7d6d5..7f50d47470bd450566b448b0a3e6ad6fe6d0a000 100644 (file)
@@ -401,9 +401,9 @@ static void garp_join_timer_arm(struct garp_applicant *app)
        mod_timer(&app->join_timer, jiffies + delay);
 }
 
-static void garp_join_timer(unsigned long data)
+static void garp_join_timer(struct timer_list *t)
 {
-       struct garp_applicant *app = (struct garp_applicant *)data;
+       struct garp_applicant *app = from_timer(app, t, join_timer);
 
        spin_lock(&app->lock);
        garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
@@ -584,7 +584,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
        spin_lock_init(&app->lock);
        skb_queue_head_init(&app->queue);
        rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
-       setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app);
+       timer_setup(&app->join_timer, garp_join_timer, 0);
        garp_join_timer_arm(app);
        return 0;
 
index be4dd31653474fbe806d17458773cc587aa56606..a808dd5bbb27a7ff84a1457315dad7d21e87e3c9 100644 (file)
@@ -586,9 +586,9 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
        mod_timer(&app->join_timer, jiffies + delay);
 }
 
-static void mrp_join_timer(unsigned long data)
+static void mrp_join_timer(struct timer_list *t)
 {
-       struct mrp_applicant *app = (struct mrp_applicant *)data;
+       struct mrp_applicant *app = from_timer(app, t, join_timer);
 
        spin_lock(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
@@ -605,9 +605,9 @@ static void mrp_periodic_timer_arm(struct mrp_applicant *app)
                  jiffies + msecs_to_jiffies(mrp_periodic_time));
 }
 
-static void mrp_periodic_timer(unsigned long data)
+static void mrp_periodic_timer(struct timer_list *t)
 {
-       struct mrp_applicant *app = (struct mrp_applicant *)data;
+       struct mrp_applicant *app = from_timer(app, t, periodic_timer);
 
        spin_lock(&app->lock);
        mrp_mad_event(app, MRP_EVENT_PERIODIC);
@@ -865,10 +865,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
        spin_lock_init(&app->lock);
        skb_queue_head_init(&app->queue);
        rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
-       setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
+       timer_setup(&app->join_timer, mrp_join_timer, 0);
        mrp_join_timer_arm(app);
-       setup_timer(&app->periodic_timer, mrp_periodic_timer,
-                   (unsigned long)app);
+       timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
        mrp_periodic_timer_arm(app);
        return 0;
 
index 985046ae42312e86505d6fded2fb56501c38536b..80f5c79053a4d2eac267f555fd732b7c1687ba36 100644 (file)
@@ -839,7 +839,6 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
        if (IS_ERR(file)) {
                pr_err("%s (%d): failed to map fd\n",
                       __func__, task_pid_nr(current));
-               sock_release(csocket);
                kfree(p);
                return PTR_ERR(file);
        }
index 8ad3ec2610b6499b92b2f3bc97ac02d2d043dd45..309d7dbb36e8476cff412b6be73f93926cf1ec95 100644 (file)
@@ -310,7 +310,7 @@ static void __aarp_expire_device(struct aarp_entry **n, struct net_device *dev)
 }
 
 /* Handle the timer event */
-static void aarp_expire_timeout(unsigned long unused)
+static void aarp_expire_timeout(struct timer_list *unused)
 {
        int ct;
 
@@ -884,7 +884,7 @@ void __init aarp_proto_init(void)
        aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
        if (!aarp_dl)
                printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
-       setup_timer(&aarp_timer, aarp_expire_timeout, 0);
+       timer_setup(&aarp_timer, aarp_expire_timeout, 0);
        aarp_timer.expires  = jiffies + sysctl_aarp_expiry_time;
        add_timer(&aarp_timer);
        register_netdevice_notifier(&aarp_notifier);
index 5d035c1f1156e45540a6bf935341e5799b11ca85..03a9fc0771c084f04ad782c15a502bca13301e2b 100644 (file)
@@ -158,9 +158,9 @@ found:
        return s;
 }
 
-static void atalk_destroy_timer(unsigned long data)
+static void atalk_destroy_timer(struct timer_list *t)
 {
-       struct sock *sk = (struct sock *)data;
+       struct sock *sk = from_timer(sk, t, sk_timer);
 
        if (sk_has_allocations(sk)) {
                sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
@@ -175,8 +175,7 @@ static inline void atalk_destroy_socket(struct sock *sk)
        skb_queue_purge(&sk->sk_receive_queue);
 
        if (sk_has_allocations(sk)) {
-               setup_timer(&sk->sk_timer, atalk_destroy_timer,
-                               (unsigned long)sk);
+               timer_setup(&sk->sk_timer, atalk_destroy_timer, 0);
                sk->sk_timer.expires    = jiffies + SOCK_DESTROY_TIME;
                add_timer(&sk->sk_timer);
        } else
index c976196da3ea1b6a218d19c38475809c8dc3cd96..6676e34332616a1c867a4c8864919fd8bf11b43c 100644 (file)
@@ -1798,7 +1798,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
                else
                        send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL);
                entry->timer.expires = jiffies + (1 * HZ);
-               entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_arp;
+               entry->timer.function = lec_arp_expire_arp;
                add_timer(&entry->timer);
                found = priv->mcast_vcc;
        }
@@ -1998,7 +1998,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
                entry->old_recv_push = old_push;
                entry->status = ESI_UNKNOWN;
                entry->timer.expires = jiffies + priv->vcc_timeout_period;
-               entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+               entry->timer.function = lec_arp_expire_vcc;
                hlist_add_head(&entry->next, &priv->lec_no_forward);
                add_timer(&entry->timer);
                dump_arp_table(priv);
@@ -2082,7 +2082,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
        entry->status = ESI_UNKNOWN;
        hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
        entry->timer.expires = jiffies + priv->vcc_timeout_period;
-       entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+       entry->timer.function = lec_arp_expire_vcc;
        add_timer(&entry->timer);
        pr_debug("After vcc was added\n");
        dump_arp_table(priv);
index e882d8b5db05e889be00fe26a0595458ead470a4..7c6a1cc760a2d075aade51a143cba668a52397dd 100644 (file)
@@ -121,7 +121,7 @@ static struct notifier_block mpoa_notifier = {
 
 struct mpoa_client *mpcs = NULL; /* FIXME */
 static struct atm_mpoa_qos *qos_head = NULL;
-static DEFINE_TIMER(mpc_timer, NULL);
+static DEFINE_TIMER(mpc_timer, mpc_cache_check);
 
 
 static struct mpoa_client *find_mpc_by_itfnum(int itf)
@@ -1413,7 +1413,6 @@ static void mpc_timer_refresh(void)
 {
        mpc_timer.expires = jiffies + (MPC_P2 * HZ);
        checking_time = mpc_timer.expires;
-       mpc_timer.function = (TIMER_FUNC_TYPE)mpc_cache_check;
        add_timer(&mpc_timer);
 }
 
index 1b659ab652fb0c70f964d0bb292e99527a4eda8f..bbe8414b6ee7d21f86e5ab9302ebfc875dbe33d3 100644 (file)
@@ -1214,7 +1214,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        orig_node->last_seen = jiffies;
 
        /* find packet count of corresponding one hop neighbor */
-       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+       spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
        if_num = if_incoming->if_num;
        orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
        neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1224,7 +1224,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        } else {
                neigh_rq_count = 0;
        }
-       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+       spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
 
        /* pay attention to not get a value bigger than 100 % */
        if (orig_eq_count > neigh_rq_count)
index 341ceab8338d829d14a48831bef805bb04af0a8f..e0e2bfcd6b3efd73f2567d23ce304951c9e2c3f0 100644 (file)
@@ -814,7 +814,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
        }
 
        orig_gw = batadv_gw_node_get(bat_priv, orig_node);
-       if (!orig_node)
+       if (!orig_gw)
                goto out;
 
        if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
index a98cf1104a30a30e66fb6018bef59dea83dc4b7a..ebe6e38934e46ed5de4d30204e791dbe40285fcc 100644 (file)
@@ -499,6 +499,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
         */
        if (skb->priority >= 256 && skb->priority <= 263)
                frag_header.priority = skb->priority - 256;
+       else
+               frag_header.priority = 0;
 
        ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
        ether_addr_copy(frag_header.dest, orig_node->orig);
index 4b90033f35a851eb33612a4e09d39de6fc8066ad..ebc4e2241c770d826fa8e731ba46ec043fe1992d 100644 (file)
@@ -482,15 +482,15 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
 
 /**
  * batadv_tp_sender_timeout - timer that fires in case of packet loss
- * @arg: address of the related tp_vars
+ * @t: address to timer_list inside tp_vars
  *
  * If fired it means that there was packet loss.
  * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
  * reset the cwnd to 3*MSS
  */
-static void batadv_tp_sender_timeout(unsigned long arg)
+static void batadv_tp_sender_timeout(struct timer_list *t)
 {
-       struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+       struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
        struct batadv_priv *bat_priv = tp_vars->bat_priv;
 
        if (atomic_read(&tp_vars->sending) == 0)
@@ -1020,8 +1020,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
        atomic64_set(&tp_vars->tot_sent, 0);
 
        kref_get(&tp_vars->refcount);
-       setup_timer(&tp_vars->timer, batadv_tp_sender_timeout,
-                   (unsigned long)tp_vars);
+       timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
 
        tp_vars->bat_priv = bat_priv;
        tp_vars->start_time = jiffies;
@@ -1107,11 +1106,11 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
 /**
  * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
  *  reached without received ack
- * @arg: address of the related tp_vars
+ * @t: address to timer_list inside tp_vars
  */
-static void batadv_tp_receiver_shutdown(unsigned long arg)
+static void batadv_tp_receiver_shutdown(struct timer_list *t)
 {
-       struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+       struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
        struct batadv_tp_unacked *un, *safe;
        struct batadv_priv *bat_priv;
 
@@ -1373,8 +1372,7 @@ batadv_tp_init_recv(struct batadv_priv *bat_priv,
        hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
 
        kref_get(&tp_vars->refcount);
-       setup_timer(&tp_vars->timer, batadv_tp_receiver_shutdown,
-                   (unsigned long)tp_vars);
+       timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
 
        batadv_tp_reset_receiver_timer(tp_vars);
 
index 8112893037bdc0afee1247dad7ac6433ac0168bb..f2cec70d520cc2b29606f51a5c2b3c19a7fdc838 100644 (file)
@@ -398,9 +398,9 @@ static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum,
        }
 }
 
-static void hidp_idle_timeout(unsigned long arg)
+static void hidp_idle_timeout(struct timer_list *t)
 {
-       struct hidp_session *session = (struct hidp_session *) arg;
+       struct hidp_session *session = from_timer(session, t, timer);
 
        /* The HIDP user-space API only contains calls to add and remove
         * devices. There is no way to forward events of any kind. Therefore,
@@ -944,8 +944,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
 
        /* device management */
        INIT_WORK(&session->dev_init, hidp_session_dev_work);
-       setup_timer(&session->timer, hidp_idle_timeout,
-                   (unsigned long)session);
+       timer_setup(&session->timer, hidp_idle_timeout, 0);
 
        /* session data */
        mutex_init(&session->report_mutex);
index 4a0b41d75c84833c89fdcd8a4387597cb99df0d2..b98225d65e87a34de2773c41a29ffc15a19db471 100644 (file)
@@ -233,9 +233,9 @@ static int rfcomm_check_security(struct rfcomm_dlc *d)
                                 d->out);
 }
 
-static void rfcomm_session_timeout(unsigned long arg)
+static void rfcomm_session_timeout(struct timer_list *t)
 {
-       struct rfcomm_session *s = (void *) arg;
+       struct rfcomm_session *s = from_timer(s, t, timer);
 
        BT_DBG("session %p state %ld", s, s->state);
 
@@ -258,9 +258,9 @@ static void rfcomm_session_clear_timer(struct rfcomm_session *s)
 }
 
 /* ---- RFCOMM DLCs ---- */
-static void rfcomm_dlc_timeout(unsigned long arg)
+static void rfcomm_dlc_timeout(struct timer_list *t)
 {
-       struct rfcomm_dlc *d = (void *) arg;
+       struct rfcomm_dlc *d = from_timer(d, t, timer);
 
        BT_DBG("dlc %p state %ld", d, d->state);
 
@@ -307,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
        if (!d)
                return NULL;
 
-       setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
+       timer_setup(&d->timer, rfcomm_dlc_timeout, 0);
 
        skb_queue_head_init(&d->tx_queue);
        mutex_init(&d->lock);
@@ -650,7 +650,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
 
        BT_DBG("session %p sock %p", s, sock);
 
-       setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
+       timer_setup(&s->timer, rfcomm_session_timeout, 0);
 
        INIT_LIST_HEAD(&s->dlcs);
        s->state = state;
index 795e920a3281939f8f84e76f9f3fc6161146a558..08df57665e1ff62fd3714598c5911833fe24a55b 100644 (file)
@@ -73,9 +73,9 @@ struct sco_pinfo {
 #define SCO_CONN_TIMEOUT       (HZ * 40)
 #define SCO_DISCONN_TIMEOUT    (HZ * 2)
 
-static void sco_sock_timeout(unsigned long arg)
+static void sco_sock_timeout(struct timer_list *t)
 {
-       struct sock *sk = (struct sock *)arg;
+       struct sock *sk = from_timer(sk, t, sk_timer);
 
        BT_DBG("sock %p state %d", sk, sk->sk_state);
 
@@ -487,7 +487,7 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
 
        sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
 
-       setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
+       timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
 
        bt_sock_link(&sco_sk_list, sk);
        return sk;
index d0ef0a8e8831920cb86fc767eb0d756bf89feb46..015f465c514b28564c9e91eec40dc041b765fe25 100644 (file)
@@ -1262,19 +1262,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
        struct net_bridge *br = netdev_priv(dev);
        int err;
 
+       err = register_netdevice(dev);
+       if (err)
+               return err;
+
        if (tb[IFLA_ADDRESS]) {
                spin_lock_bh(&br->lock);
                br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
                spin_unlock_bh(&br->lock);
        }
 
-       err = register_netdevice(dev);
-       if (err)
-               return err;
-
        err = br_changelink(dev, tb, data, extack);
        if (err)
-               unregister_netdevice(dev);
+               br_dev_delete(dev, NULL);
+
        return err;
 }
 
index d979b3dc49a6b9a2ef9e1ee9c262c9bb9bd9f253..0c59f876fe6f0c48bcf06adf7178d1d3e1528c77 100644 (file)
@@ -221,7 +221,7 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
 
        seq_putc(m, '\n');
 
-       if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+       if (net->can.can_stattimer.function == can_stat_update) {
                seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
                                can_stats->total_rx_match_ratio);
 
@@ -291,7 +291,7 @@ static int can_reset_stats_proc_show(struct seq_file *m, void *v)
 
        user_reset = 1;
 
-       if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+       if (net->can.can_stattimer.function == can_stat_update) {
                seq_printf(m, "Scheduled statistic reset #%ld.\n",
                                can_pstats->stats_reset + 1);
        } else {
index 07ed21d64f92b39da9b683aa432efde6a14afdf0..01ee854454a8089cdd49e2c8964a99f6a2d74730 100644 (file)
@@ -1106,7 +1106,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
         * when the name is long and there isn't enough space left
         * for the digits, or if all bits are used.
         */
-       return p ? -ENFILE : -EEXIST;
+       return -ENFILE;
 }
 
 static int dev_alloc_name_ns(struct net *net,
@@ -3904,7 +3904,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                                     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
                                     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
                        goto do_drop;
-               if (troom > 0 && __skb_linearize(skb))
+               if (skb_linearize(skb))
                        goto do_drop;
        }
 
index 70ccda233bd1f1aab18535e6d9d0419bb9a1a23b..c7785efeea577594b8e5ed0a74fb7b8b924ebae2 100644 (file)
@@ -144,9 +144,9 @@ static void send_dm_alert(struct work_struct *work)
  * in the event that more drops will arrive during the
  * hysteresis period.
  */
-static void sched_send_work(unsigned long _data)
+static void sched_send_work(struct timer_list *t)
 {
-       struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+       struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
 
        schedule_work(&data->dm_alert_work);
 }
@@ -412,8 +412,7 @@ static int __init init_net_drop_monitor(void)
        for_each_possible_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
-               setup_timer(&data->send_timer, sched_send_work,
-                           (unsigned long)data);
+               timer_setup(&data->send_timer, sched_send_work, 0);
                spin_lock_init(&data->lock);
                reset_per_cpu_data(data);
        }
index 7c1ffd6f950172c1915d8e5fa2b5e3f77e4f4c78..9834cfa21b21168a7654290dc2a999e41937b534 100644 (file)
@@ -76,9 +76,9 @@ static void est_fetch_counters(struct net_rate_estimator *e,
 
 }
 
-static void est_timer(unsigned long arg)
+static void est_timer(struct timer_list *t)
 {
-       struct net_rate_estimator *est = (struct net_rate_estimator *)arg;
+       struct net_rate_estimator *est = from_timer(est, t, timer);
        struct gnet_stats_basic_packed b;
        u64 rate, brate;
 
@@ -170,7 +170,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        }
 
        est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
-       setup_timer(&est->timer, est_timer, (unsigned long)est);
+       timer_setup(&est->timer, est_timer, 0);
        mod_timer(&est->timer, est->next_jiffies);
 
        rcu_assign_pointer(*rate_est, est);
index 6ea3a1a7f36a2e2d35ee170756aca0e0d6fc5120..d1f5fe986edda5ff886575be0eea0b361e2be7ff 100644 (file)
@@ -51,7 +51,7 @@ do {                                          \
 
 #define PNEIGH_HASHMASK                0xF
 
-static void neigh_timer_handler(unsigned long arg);
+static void neigh_timer_handler(struct timer_list *t);
 static void __neigh_notify(struct neighbour *n, int type, int flags,
                           u32 pid);
 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
@@ -331,7 +331,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
        n->output         = neigh_blackhole;
        seqlock_init(&n->hh.hh_lock);
        n->parms          = neigh_parms_clone(&tbl->parms);
-       setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
+       timer_setup(&n->timer, neigh_timer_handler, 0);
 
        NEIGH_CACHE_STAT_INC(tbl, allocs);
        n->tbl            = tbl;
@@ -903,10 +903,10 @@ static void neigh_probe(struct neighbour *neigh)
 
 /* Called when a timer expires for a neighbour entry. */
 
-static void neigh_timer_handler(unsigned long arg)
+static void neigh_timer_handler(struct timer_list *t)
 {
        unsigned long now, next;
-       struct neighbour *neigh = (struct neighbour *)arg;
+       struct neighbour *neigh = from_timer(neigh, t, timer);
        unsigned int state;
        int notify = 0;
 
@@ -1391,9 +1391,9 @@ int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(neigh_direct_output);
 
-static void neigh_proxy_process(unsigned long arg)
+static void neigh_proxy_process(struct timer_list *t)
 {
-       struct neigh_table *tbl = (struct neigh_table *)arg;
+       struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
        long sched_next = 0;
        unsigned long now = jiffies;
        struct sk_buff *skb, *n;
@@ -1573,7 +1573,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
        INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
        queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
                        tbl->parms.reachable_time);
-       setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
+       timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
        skb_queue_head_init_class(&tbl->proxy_queue,
                        &neigh_table_proxy_queue_class);
 
index b797832565d34ccefb374ee87f9cbc460779a484..60a71be75aea063b418a48ade2a1e1c7804ab35c 100644 (file)
@@ -267,7 +267,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
        spin_lock_bh(&net->nsid_lock);
        peer = idr_find(&net->netns_ids, id);
        if (peer)
-               get_net(peer);
+               peer = maybe_get_net(peer);
        spin_unlock_bh(&net->nsid_lock);
        rcu_read_unlock();
 
index 1c4810919a0a35900d45a659de0cd780b7e500d3..b9057478d69c8ad02ea7b4ba8d6f612e7792a738 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/module.h>
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
index 6b0ff396fa9dc58fed483597d459c66243be4cd2..a3cb0be4c6f3b5b519b60ac8dde73c33e902763a 100644 (file)
@@ -1178,7 +1178,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        u32 d_off;
 
        if (!num_frags)
-               return 0;
+               goto release;
 
        if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
                return -EINVAL;
@@ -1238,6 +1238,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
        skb_shinfo(skb)->nr_frags = new_frags;
 
+release:
        skb_zcopy_clear(skb, false);
        return 0;
 }
@@ -3654,8 +3655,6 @@ normal:
 
                skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
                                              SKBTX_SHARED_FRAG;
-               if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC))
-                       goto err;
 
                while (pos < offset + len) {
                        if (i >= nfrags) {
@@ -3681,6 +3680,8 @@ normal:
 
                        if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
                                goto err;
+                       if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+                               goto err;
 
                        *nskb_frag = *frag;
                        __skb_frag_ref(nskb_frag);
@@ -4293,7 +4294,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
        struct sock *sk = skb->sk;
 
        if (!skb_may_tx_timestamp(sk, false))
-               return;
+               goto err;
 
        /* Take a reference to prevent skb_orphan() from freeing the socket,
         * but only if the socket refcount is not zero.
@@ -4302,7 +4303,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
                *skb_hwtstamps(skb) = *hwtstamps;
                __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
                sock_put(sk);
+               return;
        }
+
+err:
+       kfree_skb(skb);
 }
 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
 
index abd07a443219853b022bef41cb072e90ff8f07f0..178bb9833311f83205317b07fe64cb2e45a9f734 100644 (file)
@@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                if (state == DCCP_TIME_WAIT)
                        timeo = DCCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index b68168fcc06aa1981258eca4857511329af62f9a..9d43c1f4027408f3a2176767da0dd425938ba652 100644 (file)
@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet = inet_sk(sk);
+       struct dccp_sock *dp = dccp_sk(sk);
        int err = 0;
        const int old_state = sk->sk_state;
 
@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags)
                sk->sk_err = ECONNRESET;
 
        dccp_clear_xmit_timers(sk);
+       ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+       ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+       dp->dccps_hc_rx_ccid = NULL;
+       dp->dccps_hc_tx_ccid = NULL;
 
        __skb_queue_purge(&sk->sk_receive_queue);
        __skb_queue_purge(&sk->sk_write_queue);
index b36dceab0dc12000a73e6fec63e28ffa98691f59..324cb9f2f55146a46ef78528fa5ac768e22736b9 100644 (file)
@@ -125,7 +125,7 @@ static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
                                             struct sk_buff *skb,
                                             const void *daddr);
 static int dn_route_input(struct sk_buff *);
-static void dn_run_flush(unsigned long dummy);
+static void dn_run_flush(struct timer_list *unused);
 
 static struct dn_rt_hash_bucket *dn_rt_hash_table;
 static unsigned int dn_rt_hash_mask;
@@ -183,7 +183,7 @@ static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
        return dn_rt_hash_mask & (unsigned int)tmp;
 }
 
-static void dn_dst_check_expire(unsigned long dummy)
+static void dn_dst_check_expire(struct timer_list *unused)
 {
        int i;
        struct dn_route *rt;
@@ -357,7 +357,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
        return 0;
 }
 
-static void dn_run_flush(unsigned long dummy)
+static void dn_run_flush(struct timer_list *unused)
 {
        int i;
        struct dn_route *rt, *next;
@@ -1875,7 +1875,7 @@ void __init dn_route_init(void)
                kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
                                  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
        dst_entries_init(&dn_dst_ops);
-       setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
+       timer_setup(&dn_route_timer, dn_dst_check_expire, 0);
        dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
        add_timer(&dn_route_timer);
 
index f430daed24a0d97a97fe625456fbe8fc7fa22ce3..aa4155875ca84eabb75ab445c4f1adf2612eeac2 100644 (file)
 
 #define SLOW_INTERVAL (HZ/2)
 
-static void dn_slow_timer(unsigned long arg);
+static void dn_slow_timer(struct timer_list *t);
 
 void dn_start_slow_timer(struct sock *sk)
 {
-       setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+       timer_setup(&sk->sk_timer, dn_slow_timer, 0);
        sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 }
 
@@ -47,9 +47,9 @@ void dn_stop_slow_timer(struct sock *sk)
        sk_stop_timer(sk, &sk->sk_timer);
 }
 
-static void dn_slow_timer(unsigned long arg)
+static void dn_slow_timer(struct timer_list *t)
 {
-       struct sock *sk = (struct sock *)arg;
+       struct sock *sk = from_timer(sk, t, sk_timer);
        struct dn_scp *scp = DN_SK(sk);
 
        bh_lock_sock(sk);
index d6e7a642493b03223ab9890247983d9d1499cea0..a95a55f7913746bab3aa7a993265885ece25f35a 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/of_net.h>
 #include <linux/of_mdio.h>
 #include <linux/mdio.h>
-#include <linux/list.h>
 #include <net/rtnetlink.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_mirred.h>
index a4573bccd6da7b6763016d4f07d3032d44483d99..7a93359fbc7229389fc7bec67889ca1115f47a69 100644 (file)
@@ -1428,7 +1428,7 @@ skip:
 
 static bool inetdev_valid_mtu(unsigned int mtu)
 {
-       return mtu >= 68;
+       return mtu >= IPV4_MIN_MTU;
 }
 
 static void inetdev_send_gratuitous_arp(struct net_device *dev,
index f52d27a422c37298b2ad0c1dbba0e5307f1a46b6..08259d078b1ca821c581aeb34251c79a9aba8c8d 100644 (file)
@@ -1298,14 +1298,19 @@ err_table_hash_alloc:
 
 static void ip_fib_net_exit(struct net *net)
 {
-       unsigned int i;
+       int i;
 
        rtnl_lock();
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
        RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-       for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
+       /* Destroy the tables in reverse order to guarantee that the
+        * local table, ID 255, is destroyed before the main table, ID
+        * 254. This is necessary as the local table may contain
+        * references to data contained in the main table.
+        */
+       for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[i];
                struct hlist_node *tmp;
                struct fib_table *tb;
index f04d944f8abe0bfbb840837bb35d28fe6d8d25d0..c586597da20dbb0e46eb0f693fd65bccfc8f3633 100644 (file)
@@ -698,7 +698,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
 
        nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
                int type = nla_type(nla);
-               u32 val;
+               u32 fi_val, val;
 
                if (!type)
                        continue;
@@ -715,7 +715,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
                        val = nla_get_u32(nla);
                }
 
-               if (fi->fib_metrics->metrics[type - 1] != val)
+               fi_val = fi->fib_metrics->metrics[type - 1];
+               if (type == RTAX_FEATURES)
+                       fi_val &= ~DST_FEATURE_ECN_CA;
+
+               if (fi_val != val)
                        return false;
        }
 
index ab183af0b5b6a8f9b7fd02b32b56d32487518f7a..726f6b6082748896686ae603546fa189348f9142 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/times.h>
 #include <linux/pkt_sched.h>
+#include <linux/byteorder/generic.h>
 
 #include <net/net_namespace.h>
 #include <net/arp.h>
@@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
        return scount;
 }
 
+/* source address selection per RFC 3376 section 4.2.13 */
+static __be32 igmpv3_get_srcaddr(struct net_device *dev,
+                                const struct flowi4 *fl4)
+{
+       struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+       if (!in_dev)
+               return htonl(INADDR_ANY);
+
+       for_ifa(in_dev) {
+               if (inet_ifa_match(fl4->saddr, ifa))
+                       return fl4->saddr;
+       } endfor_ifa(in_dev);
+
+       return htonl(INADDR_ANY);
+}
+
 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
 {
        struct sk_buff *skb;
@@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
        pip->frag_off = htons(IP_DF);
        pip->ttl      = 1;
        pip->daddr    = fl4.daddr;
-       pip->saddr    = fl4.saddr;
+       pip->saddr    = igmpv3_get_srcaddr(dev, &fl4);
        pip->protocol = IPPROTO_IGMP;
        pip->tot_len  = 0;      /* filled in later */
        ip_select_ident(net, skb, NULL);
@@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
 }
 
 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
-       int type, struct igmpv3_grec **ppgr)
+       int type, struct igmpv3_grec **ppgr, unsigned int mtu)
 {
        struct net_device *dev = pmc->interface->dev;
        struct igmpv3_report *pih;
        struct igmpv3_grec *pgr;
 
-       if (!skb)
-               skb = igmpv3_newpack(dev, dev->mtu);
-       if (!skb)
-               return NULL;
+       if (!skb) {
+               skb = igmpv3_newpack(dev, mtu);
+               if (!skb)
+                       return NULL;
+       }
        pgr = skb_put(skb, sizeof(struct igmpv3_grec));
        pgr->grec_type = type;
        pgr->grec_auxwords = 0;
@@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
        struct igmpv3_grec *pgr = NULL;
        struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
        int scount, stotal, first, isquery, truncate;
+       unsigned int mtu;
 
        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                return skb;
        if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
                return skb;
 
+       mtu = READ_ONCE(dev->mtu);
+       if (mtu < IPV4_MIN_MTU)
+               return skb;
+
        isquery = type == IGMPV3_MODE_IS_INCLUDE ||
                  type == IGMPV3_MODE_IS_EXCLUDE;
        truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
@@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
                    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
                        if (skb)
                                igmpv3_sendpack(skb);
-                       skb = igmpv3_newpack(dev, dev->mtu);
+                       skb = igmpv3_newpack(dev, mtu);
                }
        }
        first = 1;
@@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
                                pgr->grec_nsrcs = htons(scount);
                        if (skb)
                                igmpv3_sendpack(skb);
-                       skb = igmpv3_newpack(dev, dev->mtu);
+                       skb = igmpv3_newpack(dev, mtu);
                        first = 1;
                        scount = 0;
                }
                if (first) {
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                        first = 0;
                }
                if (!skb)
@@ -538,7 +562,7 @@ empty_source:
                                igmpv3_sendpack(skb);
                                skb = NULL; /* add_grhead will get a new one */
                        }
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                }
        }
        if (pgr)
@@ -752,18 +776,18 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        return ip_local_out(net, skb->sk, skb);
 }
 
-static void igmp_gq_timer_expire(unsigned long data)
+static void igmp_gq_timer_expire(struct timer_list *t)
 {
-       struct in_device *in_dev = (struct in_device *)data;
+       struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
 
        in_dev->mr_gq_running = 0;
        igmpv3_send_report(in_dev, NULL);
        in_dev_put(in_dev);
 }
 
-static void igmp_ifc_timer_expire(unsigned long data)
+static void igmp_ifc_timer_expire(struct timer_list *t)
 {
-       struct in_device *in_dev = (struct in_device *)data;
+       struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
 
        igmpv3_send_cr(in_dev);
        if (in_dev->mr_ifc_count) {
@@ -784,9 +808,9 @@ static void igmp_ifc_event(struct in_device *in_dev)
 }
 
 
-static void igmp_timer_expire(unsigned long data)
+static void igmp_timer_expire(struct timer_list *t)
 {
-       struct ip_mc_list *im = (struct ip_mc_list *)data;
+       struct ip_mc_list *im = from_timer(im, t, timer);
        struct in_device *in_dev = im->interface;
 
        spin_lock(&im->lock);
@@ -1385,7 +1409,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        refcount_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
-       setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
+       timer_setup(&im->timer, igmp_timer_expire, 0);
        im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
 #endif
 
@@ -1695,10 +1719,8 @@ void ip_mc_init_dev(struct in_device *in_dev)
        ASSERT_RTNL();
 
 #ifdef CONFIG_IP_MULTICAST
-       setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
-                       (unsigned long)in_dev);
-       setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
-                       (unsigned long)in_dev);
+       timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
+       timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
        in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
 #endif
 
index c690cd0d9b3f0af53c23b9a1ecc87be4098ae059..b563e0c46bac2362acccf38495546a8b6b726384 100644 (file)
@@ -93,7 +93,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
 }
 
 /*
- * Enter the time wait state.
+ * Enter the time wait state. This is called with locally disabled BH.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
  * from the SK, and mess with hash chains and list linkage.
  */
@@ -111,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
         */
        bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
                        hashinfo->bhash_size)];
-       spin_lock_bh(&bhead->lock);
+       spin_lock(&bhead->lock);
        tw->tw_tb = icsk->icsk_bind_hash;
        WARN_ON(!icsk->icsk_bind_hash);
        inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
@@ -137,7 +137,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        if (__sk_nulls_del_node_init_rcu(sk))
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-       spin_unlock_bh(lock);
+       spin_unlock(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
 
index bb6239169b1ab943a6418494dab5018843bcde3c..45ffd3d045d240cad8e4d0ed8dd0dd7da997bf9e 100644 (file)
@@ -266,7 +266,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        len = gre_hdr_len + sizeof(*ershdr);
 
        if (unlikely(!pskb_may_pull(skb, len)))
-               return -ENOMEM;
+               return PACKET_REJECT;
 
        iph = ip_hdr(skb);
        ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
@@ -1310,6 +1310,7 @@ static const struct net_device_ops erspan_netdev_ops = {
 static void ipgre_tap_setup(struct net_device *dev)
 {
        ether_setup(dev);
+       dev->max_mtu = 0;
        dev->netdev_ops = &gre_tap_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
index fe6fee728ce49d01b55aa478698e1a3bcf9a3bdb..5ddb1cb52bd405ed10cce43195a25607d136efbf 100644 (file)
@@ -349,8 +349,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
        dev->needed_headroom = t_hlen + hlen;
        mtu -= (dev->hard_header_len + t_hlen);
 
-       if (mtu < 68)
-               mtu = 68;
+       if (mtu < IPV4_MIN_MTU)
+               mtu = IPV4_MIN_MTU;
 
        return mtu;
 }
index 40a43ad294cb3751839cc1dbfc02a360e101d401..fd5f19c988e48a00e5447f5504bd670326ef7939 100644 (file)
@@ -112,7 +112,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
                                 int cmd);
 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
 static void mroute_clean_tables(struct mr_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
 #define ipmr_for_each_table(mrt, net) \
@@ -375,8 +375,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        INIT_LIST_HEAD(&mrt->mfc_cache_list);
        INIT_LIST_HEAD(&mrt->mfc_unres_queue);
 
-       setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
-                   (unsigned long)mrt);
+       timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
 
        mrt->mroute_reg_vif_num = -1;
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -804,9 +803,9 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
 }
 
 /* Timer process for the unresolved queue. */
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
 {
-       struct mr_table *mrt = (struct mr_table *)arg;
+       struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
        unsigned long now;
        unsigned long expires;
        struct mfc_cache *c, *next;
index f88221aebc9d7b61cf2c09f2b3d2351c4095f64f..0c3c944a7b7201f74dace535c8a26d58b8039a1f 100644 (file)
@@ -373,7 +373,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                        if (!xt_find_jump_offset(offsets, newpos,
                                                                 newinfo->number))
                                                return 0;
-                                       e = entry0 + newpos;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
index 4cbe5e80f3bf079755cd08f33c24a4077c6c4a63..2e0d339028bbcb6766f92e5b87d70866a419b893 100644 (file)
@@ -439,7 +439,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                        if (!xt_find_jump_offset(offsets, newpos,
                                                                 newinfo->number))
                                                return 0;
-                                       e = entry0 + newpos;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
index 17b4ca562944c35b50015bbc40aa580ca62ddfb7..69060e3abe8598b350e6bfe5815a702c4b3d2ade 100644 (file)
@@ -813,12 +813,13 @@ static int clusterip_net_init(struct net *net)
 
 static void clusterip_net_exit(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
        struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+#ifdef CONFIG_PROC_FS
        proc_remove(cn->procdir);
        cn->procdir = NULL;
 #endif
        nf_unregister_net_hook(net, &cip_arp_ops);
+       WARN_ON_ONCE(!list_empty(&cn->configs));
 }
 
 static struct pernet_operations clusterip_net_ops = {
index 33b70bfd1122f08f4897ea6a68eb51e3a74bb1e5..125c1eab3eaa6d894804c3aa8918aa7fcc736ca0 100644 (file)
@@ -513,11 +513,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int err;
        struct ip_options_data opt_copy;
        struct raw_frag_vec rfv;
+       int hdrincl;
 
        err = -EMSGSIZE;
        if (len > 0xFFFF)
                goto out;
 
+       /* hdrincl should be READ_ONCE(inet->hdrincl)
+        * but READ_ONCE() doesn't work with bit fields
+        */
+       hdrincl = inet->hdrincl;
        /*
         *      Check the flags.
         */
@@ -593,7 +598,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                /* Linux does not mangle headers on raw sockets,
                 * so that IP options + IP_HDRINCL is non-sense.
                 */
-               if (inet->hdrincl)
+               if (hdrincl)
                        goto done;
                if (ipc.opt->opt.srr) {
                        if (!daddr)
@@ -615,12 +620,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
                           RT_SCOPE_UNIVERSE,
-                          inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+                          hdrincl ? IPPROTO_RAW : sk->sk_protocol,
                           inet_sk_flowi_flags(sk) |
-                           (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+                           (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
                           daddr, saddr, 0, 0, sk->sk_uid);
 
-       if (!inet->hdrincl) {
+       if (!hdrincl) {
                rfv.msg = msg;
                rfv.hlen = 0;
 
@@ -645,7 +650,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                goto do_confirm;
 back_from_confirm:
 
-       if (inet->hdrincl)
+       if (hdrincl)
                err = raw_send_hdrinc(sk, &fl4, msg, len,
                                      &rt, msg->msg_flags, &ipc.sockc);
 
index bf97317e6c974285a652664d02e8b2c48a8cfe96..f08eebe60446e2e99d19bd0ea51e5fafb96aca63 100644 (file)
@@ -2412,6 +2412,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->snd_cwnd_cnt = 0;
        tp->window_clamp = 0;
        tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
        /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
index 69ee877574d08b36bc990f890899037108eafe05..8322f26e770e4406fe9accb386a99659941cc874 100644 (file)
@@ -110,7 +110,8 @@ struct bbr {
        u32     lt_last_lost;        /* LT intvl start: tp->lost */
        u32     pacing_gain:10, /* current gain for setting pacing rate */
                cwnd_gain:10,   /* current gain for setting cwnd */
-               full_bw_cnt:3,  /* number of rounds without large bw gains */
+               full_bw_reached:1,   /* reached full bw in Startup? */
+               full_bw_cnt:2,  /* number of rounds without large bw gains */
                cycle_idx:3,    /* current index in pacing_gain cycle array */
                has_seen_rtt:1, /* have we seen an RTT sample yet? */
                unused_b:5;
@@ -180,7 +181,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
 {
        const struct bbr *bbr = inet_csk_ca(sk);
 
-       return bbr->full_bw_cnt >= bbr_full_bw_cnt;
+       return bbr->full_bw_reached;
 }
 
 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
@@ -717,6 +718,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
                return;
        }
        ++bbr->full_bw_cnt;
+       bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
 }
 
 /* If pipe is probably full, drain the queue and then enter steady-state. */
@@ -850,6 +852,7 @@ static void bbr_init(struct sock *sk)
        bbr->restore_cwnd = 0;
        bbr->round_start = 0;
        bbr->idle_restart = 0;
+       bbr->full_bw_reached = 0;
        bbr->full_bw = 0;
        bbr->full_bw_cnt = 0;
        bbr->cycle_mstamp = 0;
@@ -871,6 +874,11 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
  */
 static u32 bbr_undo_cwnd(struct sock *sk)
 {
+       struct bbr *bbr = inet_csk_ca(sk);
+
+       bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
+       bbr->full_bw_cnt = 0;
+       bbr_reset_lt_bw_sampling(sk);
        return tcp_sk(sk)->snd_cwnd;
 }
 
index 734cfc8ff76edf3453921b50620be2986bfcfdb9..45f750e85714da11f569ae0c6522f1cc56c6d2a2 100644 (file)
@@ -508,9 +508,6 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
        u32 new_sample = tp->rcv_rtt_est.rtt_us;
        long m = sample;
 
-       if (m == 0)
-               m = 1;
-
        if (new_sample != 0) {
                /* If we sample in larger samples in the non-timestamp
                 * case, we could grossly overestimate the RTT especially
@@ -547,6 +544,8 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
        if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
                return;
        delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
+       if (!delta_us)
+               delta_us = 1;
        tcp_rcv_rtt_update(tp, delta_us, 1);
 
 new_measure:
@@ -563,8 +562,11 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
            (TCP_SKB_CB(skb)->end_seq -
             TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
                u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
-               u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+               u32 delta_us;
 
+               if (!delta)
+                       delta = 1;
+               delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
                tcp_rcv_rtt_update(tp, delta_us, 0);
        }
 }
@@ -579,6 +581,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
        int time;
        int copied;
 
+       tcp_mstamp_refresh(tp);
        time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
        if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
                return;
@@ -1941,6 +1944,8 @@ void tcp_enter_loss(struct sock *sk)
        if (is_reneg) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
                tp->sacked_out = 0;
+               /* Mark SACK reneging until we recover from this loss event. */
+               tp->is_sack_reneg = 1;
        }
        tcp_clear_all_retrans_hints(tp);
 
@@ -2326,6 +2331,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
        }
        tp->snd_cwnd_stamp = tcp_jiffies32;
        tp->undo_marker = 0;
+       tp->rack.advanced = 1; /* Force RACK to re-exam losses */
 }
 
 static inline bool tcp_may_undo(const struct tcp_sock *tp)
@@ -2364,6 +2370,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                return true;
        }
        tcp_set_ca_state(sk, TCP_CA_Open);
+       tp->is_sack_reneg = 0;
        return false;
 }
 
@@ -2397,8 +2404,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
                        NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPSPURIOUSRTOS);
                inet_csk(sk)->icsk_retransmits = 0;
-               if (frto_undo || tcp_is_sack(tp))
+               if (frto_undo || tcp_is_sack(tp)) {
                        tcp_set_ca_state(sk, TCP_CA_Open);
+                       tp->is_sack_reneg = 0;
+               }
                return true;
        }
        return false;
@@ -3495,6 +3504,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        struct tcp_sacktag_state sack_state;
        struct rate_sample rs = { .prior_delivered = 0 };
        u32 prior_snd_una = tp->snd_una;
+       bool is_sack_reneg = tp->is_sack_reneg;
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
        bool is_dupack = false;
@@ -3611,7 +3621,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
        lost = tp->lost - lost;                 /* freshly marked lost */
-       tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+       tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
        tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
        tcp_xmit_recovery(sk, rexmit);
        return 1;
index c6bc0c4d19c624888b0d0b5a4246c7183edf63f5..94e28350f4205b1a57809d5471d7e2ade51f5196 100644 (file)
@@ -848,7 +848,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                        tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
                        req->ts_recent,
                        0,
-                       tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
+                       tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
                                          AF_INET),
                        inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
                        ip_hdr(skb)->tos);
@@ -1591,6 +1591,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_filter);
 
+static void tcp_v4_restore_cb(struct sk_buff *skb)
+{
+       memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
+               sizeof(struct inet_skb_parm));
+}
+
+static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
+                          const struct tcphdr *th)
+{
+       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
+        * barrier() makes sure compiler wont play fool^Waliasing games.
+        */
+       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
+               sizeof(struct inet_skb_parm));
+       barrier();
+
+       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+                                   skb->len - th->doff * 4);
+       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
+       TCP_SKB_CB(skb)->sacked  = 0;
+       TCP_SKB_CB(skb)->has_rxtstamp =
+                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
+}
+
 /*
  *     From tcp_input.c
  */
@@ -1631,24 +1659,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
 
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
-       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
-        * barrier() makes sure compiler wont play fool^Waliasing games.
-        */
-       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
-               sizeof(struct inet_skb_parm));
-       barrier();
-
-       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
-       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
-                                   skb->len - th->doff * 4);
-       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
-       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
-       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
-       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
-       TCP_SKB_CB(skb)->sacked  = 0;
-       TCP_SKB_CB(skb)->has_rxtstamp =
-                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
-
 lookup:
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
                               th->dest, sdif, &refcounted);
@@ -1679,14 +1689,19 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       iph = ip_hdr(skb);
+                       tcp_v4_fill_cb(skb, iph, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
                }
                if (nsk == sk) {
                        reqsk_put(req);
+                       tcp_v4_restore_cb(skb);
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
                        goto discard_and_relse;
@@ -1712,6 +1727,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
+       tcp_v4_fill_cb(skb, iph, th);
 
        skb->dev = NULL;
 
@@ -1742,6 +1758,8 @@ no_tcp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard_it;
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
 csum_error:
                __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@@ -1768,6 +1786,8 @@ do_time_wait:
                goto discard_it;
        }
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
                inet_twsk_put(inet_twsk(sk));
                goto csum_error;
@@ -1784,6 +1804,7 @@ do_time_wait:
                if (sk2) {
                        inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
+                       tcp_v4_restore_cb(skb);
                        refcounted = false;
                        goto process;
                }
index e36eff0403f4e80c4f7291a70614f40125652133..b079b619b60ca577d5ef20a5065fce87acecd96c 100644 (file)
@@ -310,10 +310,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                if (state == TCP_TIME_WAIT)
                        timeo = TCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
index 3330a370d3061edd7cda90e1f50713ed0e7868a1..c61240e43923d6dd6a5d6215074e2da2c2bc71f4 100644 (file)
@@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
 
 /* Update the connection delivery information and generate a rate sample. */
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-                 struct rate_sample *rs)
+                 bool is_sack_reneg, struct rate_sample *rs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 snd_us, ack_us;
@@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
 
        rs->acked_sacked = delivered;   /* freshly ACKed or SACKed */
        rs->losses = lost;              /* freshly marked lost */
-       /* Return an invalid sample if no timing information is available. */
-       if (!rs->prior_mstamp) {
+       /* Return an invalid sample if no timing information is available or
+        * in recovery from loss with SACK reneging. Rate samples taken during
+        * a SACK reneging event may overestimate bw by including packets that
+        * were SACKed before the reneg.
+        */
+       if (!rs->prior_mstamp || is_sack_reneg) {
                rs->delivered = -1;
                rs->interval_us = -1;
                return;
index d3ea89020c69c17189f6a5eefb28e92bd97ac2e1..3a81720ac0c40877386e37c99f4f321ab4127fa4 100644 (file)
@@ -55,7 +55,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
         * to queuing or delayed ACKs.
         */
        reo_wnd = 1000;
-       if ((tp->rack.reord || !tp->lost_out) && min_rtt != ~0U) {
+       if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
+           min_rtt != ~0U) {
                reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
                reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
        }
@@ -79,12 +80,12 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
                 */
                remaining = tp->rack.rtt_us + reo_wnd -
                            tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
-               if (remaining < 0) {
+               if (remaining <= 0) {
                        tcp_rack_mark_skb_lost(sk, skb);
                        list_del_init(&skb->tcp_tsorted_anchor);
                } else {
-                       /* Record maximum wait time (+1 to avoid 0) */
-                       *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
+                       /* Record maximum wait time */
+                       *reo_timeout = max_t(u32, *reo_timeout, remaining);
                }
        }
 }
@@ -116,13 +117,8 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
 {
        u32 rtt_us;
 
-       if (tp->rack.mstamp &&
-           !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
-                                end_seq, tp->rack.end_seq))
-               return;
-
        rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
-       if (sacked & TCPCB_RETRANS) {
+       if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
                /* If the sacked packet was retransmitted, it's ambiguous
                 * whether the retransmission or the original (or the prior
                 * retransmission) was sacked.
@@ -133,13 +129,15 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
                 * so it's at least one RTT (i.e., retransmission is at least
                 * an RTT later).
                 */
-               if (rtt_us < tcp_min_rtt(tp))
-                       return;
+               return;
        }
-       tp->rack.rtt_us = rtt_us;
-       tp->rack.mstamp = xmit_time;
-       tp->rack.end_seq = end_seq;
        tp->rack.advanced = 1;
+       tp->rack.rtt_us = rtt_us;
+       if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
+                               end_seq, tp->rack.end_seq)) {
+               tp->rack.mstamp = xmit_time;
+               tp->rack.end_seq = end_seq;
+       }
 }
 
 /* We have waited long enough to accommodate reordering. Mark the expired
index 16df6dd44b988a128d97df3a7953437499a216e8..968fda1983762e6d7c078a28ccfcbd9066788daf 100644 (file)
@@ -264,6 +264,7 @@ void tcp_delack_timer_handler(struct sock *sk)
                        icsk->icsk_ack.pingpong = 0;
                        icsk->icsk_ack.ato      = TCP_ATO_MIN;
                }
+               tcp_mstamp_refresh(tcp_sk(sk));
                tcp_send_ack(sk);
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
@@ -632,6 +633,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
                goto out;
        }
 
+       tcp_mstamp_refresh(tp);
        if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
                if (tp->linger2 >= 0) {
                        const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
index a0ae1c9d37dfc9712da564a084e6191d56ab48a1..f49bd7897e95f15a381e4700660991f2d3c3fed4 100644 (file)
@@ -188,7 +188,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_work(struct work_struct *w);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
 static void addrconf_dad_run(struct inet6_dev *idev);
-static void addrconf_rs_timer(unsigned long data);
+static void addrconf_rs_timer(struct timer_list *t);
 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
 
@@ -388,8 +388,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        rwlock_init(&ndev->lock);
        ndev->dev = dev;
        INIT_LIST_HEAD(&ndev->addr_list);
-       setup_timer(&ndev->rs_timer, addrconf_rs_timer,
-                   (unsigned long)ndev);
+       timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
        memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
 
        if (ndev->cnf.stable_secret.initialized)
@@ -3741,9 +3740,9 @@ restart:
        return 0;
 }
 
-static void addrconf_rs_timer(unsigned long data)
+static void addrconf_rs_timer(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, rs_timer);
        struct net_device *dev = idev->dev;
        struct in6_addr lladdr;
 
index c26f71234b9c01a82ec9d40423ee28957468ed46..c9441ca4539936486291147a47a84ef1b2ecf095 100644 (file)
@@ -210,7 +210,6 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
-       np->autoflowlabel = ip6_default_np_autolabel(net);
        np->repflow     = net->ipv6.sysctl.flowlabel_reflect;
        sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
index 2e2804f5823e4ee0baeb42e0d7fdfce310397950..f5285f4e1d08acb60d42fb6fd10a0c38a239324f 100644 (file)
@@ -70,7 +70,7 @@ static int fib6_walk_continue(struct fib6_walker *w);
  *     result of redirects, path MTU changes, etc.
  */
 
-static void fib6_gc_timer_cb(unsigned long arg);
+static void fib6_gc_timer_cb(struct timer_list *t);
 
 #define FOR_WALKERS(net, w) \
        list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh)
@@ -2026,9 +2026,11 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
        spin_unlock_bh(&net->ipv6.fib6_gc_lock);
 }
 
-static void fib6_gc_timer_cb(unsigned long arg)
+static void fib6_gc_timer_cb(struct timer_list *t)
 {
-       fib6_run_gc(0, (struct net *)arg, true);
+       struct net *arg = from_timer(arg, t, ipv6.ip6_fib_timer);
+
+       fib6_run_gc(0, arg, true);
 }
 
 static int __net_init fib6_net_init(struct net *net)
@@ -2043,7 +2045,7 @@ static int __net_init fib6_net_init(struct net *net)
        spin_lock_init(&net->ipv6.fib6_gc_lock);
        rwlock_init(&net->ipv6.fib6_walker_lock);
        INIT_LIST_HEAD(&net->ipv6.fib6_walkers);
-       setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
+       timer_setup(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, 0);
 
        net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
        if (!net->ipv6.rt6_stats)
index 9f2e73c71768d917ff3ca0cb0e22aed28f710ce5..7f59c8fabeeb95e10e8315e7bb3363300469b77e 100644 (file)
@@ -46,7 +46,7 @@
 static atomic_t fl_size = ATOMIC_INIT(0);
 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
 
-static void ip6_fl_gc(unsigned long dummy);
+static void ip6_fl_gc(struct timer_list *unused);
 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
 
 /* FL hash table lock: it protects only of GC */
@@ -127,7 +127,7 @@ static void fl_release(struct ip6_flowlabel *fl)
        spin_unlock_bh(&ip6_fl_lock);
 }
 
-static void ip6_fl_gc(unsigned long dummy)
+static void ip6_fl_gc(struct timer_list *unused)
 {
        int i;
        unsigned long now = jiffies;
index 4cfd8e0696fe77f6d7af7ca3579a2418aef972f6..772695960890893f9ab7862cf64728b783f5bb96 100644 (file)
@@ -1014,6 +1014,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        eth_random_addr(dev->perm_addr);
 }
 
+#define GRE6_FEATURES (NETIF_F_SG |            \
+                      NETIF_F_FRAGLIST |       \
+                      NETIF_F_HIGHDMA |        \
+                      NETIF_F_HW_CSUM)
+
+static void ip6gre_tnl_init_features(struct net_device *dev)
+{
+       struct ip6_tnl *nt = netdev_priv(dev);
+
+       dev->features           |= GRE6_FEATURES;
+       dev->hw_features        |= GRE6_FEATURES;
+
+       if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
+               /* TCP offload with GRE SEQ is not supported, nor
+                * can we support 2 levels of outer headers requiring
+                * an update.
+                */
+               if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
+                   nt->encap.type == TUNNEL_ENCAP_NONE) {
+                       dev->features    |= NETIF_F_GSO_SOFTWARE;
+                       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+               }
+
+               /* Can use a lockless transmit, unless we generate
+                * output sequences
+                */
+               dev->features |= NETIF_F_LLTX;
+       }
+}
+
 static int ip6gre_tunnel_init_common(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
@@ -1048,6 +1078,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
        if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                dev->mtu -= 8;
 
+       ip6gre_tnl_init_features(dev);
+
        return 0;
 }
 
@@ -1298,16 +1330,12 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
        .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
-#define GRE6_FEATURES (NETIF_F_SG |            \
-                      NETIF_F_FRAGLIST |       \
-                      NETIF_F_HIGHDMA |                \
-                      NETIF_F_HW_CSUM)
-
 static void ip6gre_tap_setup(struct net_device *dev)
 {
 
        ether_setup(dev);
 
+       dev->max_mtu = 0;
        dev->netdev_ops = &ip6gre_tap_netdev_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
@@ -1382,26 +1410,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
        nt->net = dev_net(dev);
        ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
 
-       dev->features           |= GRE6_FEATURES;
-       dev->hw_features        |= GRE6_FEATURES;
-
-       if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
-               /* TCP offload with GRE SEQ is not supported, nor
-                * can we support 2 levels of outer headers requiring
-                * an update.
-                */
-               if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
-                   (nt->encap.type == TUNNEL_ENCAP_NONE)) {
-                       dev->features    |= NETIF_F_GSO_SOFTWARE;
-                       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
-               }
-
-               /* Can use a lockless transmit, unless we generate
-                * output sequences
-                */
-               dev->features |= NETIF_F_LLTX;
-       }
-
        err = register_netdevice(dev);
        if (err)
                goto out;
index 5110a418cc4d0c1040506394460cb482698d8c15..f7dd51c4231415fd1321fd431194d896ea2d1689 100644 (file)
@@ -166,6 +166,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
+static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+{
+       if (!np->autoflowlabel_set)
+               return ip6_default_np_autolabel(net);
+       else
+               return np->autoflowlabel;
+}
+
 /*
  * xmit an sk_buff (used by TCP, SCTP and DCCP)
  * Note : socket lock is not held for SYNACK packets, but might be modified
@@ -230,7 +238,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                hlimit = ip6_dst_hoplimit(dst);
 
        ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                                    np->autoflowlabel, fl6));
+                               ip6_autoflowlabel(net, np), fl6));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -1626,7 +1634,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
 
        ip6_flow_hdr(hdr, v6_cork->tclass,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                       np->autoflowlabel, fl6));
+                                       ip6_autoflowlabel(net, np), fl6));
        hdr->hop_limit = v6_cork->hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index 3d3092adf1d2d5962b5fc87bdf08419762d1b1ee..931c38f6ff4a42fb17cf129cf6035706a24176dc 100644 (file)
@@ -904,7 +904,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
                if (t->parms.collect_md) {
                        tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
                        if (!tun_dst)
-                               return 0;
+                               goto drop;
                }
                ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
                                    log_ecn_error);
@@ -1123,8 +1123,13 @@ route_lookup:
                max_headroom += 8;
                mtu -= 8;
        }
-       if (mtu < IPV6_MIN_MTU)
-               mtu = IPV6_MIN_MTU;
+       if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+       } else if (mtu < 576) {
+               mtu = 576;
+       }
+
        if (skb_dst(skb) && !t->parms.collect_md)
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
index 9c24b85949c1060011774d9ff743b112206f186d..a2e1a864eb4695ee4323ce2f85f2a560efd73ee4 100644 (file)
@@ -120,7 +120,7 @@ static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
                               struct netlink_callback *cb);
 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
 
 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 #define ip6mr_for_each_table(mrt, net) \
@@ -320,8 +320,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 
        INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
 
-       setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
-                   (unsigned long)mrt);
+       timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
 
 #ifdef CONFIG_IPV6_PIMSM_V2
        mrt->mroute_reg_vif_num = -1;
@@ -888,9 +887,9 @@ static void ipmr_do_expire_process(struct mr6_table *mrt)
                mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 }
 
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
 {
-       struct mr6_table *mrt = (struct mr6_table *)arg;
+       struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
 
        if (!spin_trylock(&mfc_unres_lock)) {
                mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
index b9404feabd7857fe0873fbc4f346d281f3600807..2d4680e0376f41deee6c999eadaf9409353e0b4a 100644 (file)
@@ -886,6 +886,7 @@ pref_skip_coa:
                break;
        case IPV6_AUTOFLOWLABEL:
                np->autoflowlabel = valbool;
+               np->autoflowlabel_set = 1;
                retv = 0;
                break;
        case IPV6_RECVFRAGSIZE:
index 12b7c27ce5ce917bfb49ec8a56502c02ee02edf6..844642682b8363c4c32d329ed92474f834a59618 100644 (file)
@@ -75,10 +75,10 @@ static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
 
 static void igmp6_join_group(struct ifmcaddr6 *ma);
 static void igmp6_leave_group(struct ifmcaddr6 *ma);
-static void igmp6_timer_handler(unsigned long data);
+static void igmp6_timer_handler(struct timer_list *t);
 
-static void mld_gq_timer_expire(unsigned long data);
-static void mld_ifc_timer_expire(unsigned long data);
+static void mld_gq_timer_expire(struct timer_list *t);
+static void mld_ifc_timer_expire(struct timer_list *t);
 static void mld_ifc_event(struct inet6_dev *idev);
 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
@@ -839,7 +839,7 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
        if (!mc)
                return NULL;
 
-       setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
+       timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
 
        mc->mca_addr = *addr;
        mc->idev = idev; /* reference taken by caller */
@@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
 }
 
 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
-       int type, struct mld2_grec **ppgr)
+       int type, struct mld2_grec **ppgr, unsigned int mtu)
 {
-       struct net_device *dev = pmc->idev->dev;
        struct mld2_report *pmr;
        struct mld2_grec *pgr;
 
-       if (!skb)
-               skb = mld_newpack(pmc->idev, dev->mtu);
-       if (!skb)
-               return NULL;
+       if (!skb) {
+               skb = mld_newpack(pmc->idev, mtu);
+               if (!skb)
+                       return NULL;
+       }
        pgr = skb_put(skb, sizeof(struct mld2_grec));
        pgr->grec_type = type;
        pgr->grec_auxwords = 0;
@@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
        struct mld2_grec *pgr = NULL;
        struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
        int scount, stotal, first, isquery, truncate;
+       unsigned int mtu;
 
        if (pmc->mca_flags & MAF_NOREPORT)
                return skb;
 
+       mtu = READ_ONCE(dev->mtu);
+       if (mtu < IPV6_MIN_MTU)
+               return skb;
+
        isquery = type == MLD2_MODE_IS_INCLUDE ||
                  type == MLD2_MODE_IS_EXCLUDE;
        truncate = type == MLD2_MODE_IS_EXCLUDE ||
@@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
                    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
                        if (skb)
                                mld_sendpack(skb);
-                       skb = mld_newpack(idev, dev->mtu);
+                       skb = mld_newpack(idev, mtu);
                }
        }
        first = 1;
@@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
                                pgr->grec_nsrcs = htons(scount);
                        if (skb)
                                mld_sendpack(skb);
-                       skb = mld_newpack(idev, dev->mtu);
+                       skb = mld_newpack(idev, mtu);
                        first = 1;
                        scount = 0;
                }
                if (first) {
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                        first = 0;
                }
                if (!skb)
@@ -1814,7 +1819,7 @@ empty_source:
                                mld_sendpack(skb);
                                skb = NULL; /* add_grhead will get a new one */
                        }
-                       skb = add_grhead(skb, pmc, type, &pgr);
+                       skb = add_grhead(skb, pmc, type, &pgr, mtu);
                }
        }
        if (pgr)
@@ -2083,9 +2088,9 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
        }
 }
 
-static void mld_dad_timer_expire(unsigned long data)
+static void mld_dad_timer_expire(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
 
        mld_send_initial_cr(idev);
        if (idev->mc_dad_count) {
@@ -2432,18 +2437,18 @@ static void igmp6_leave_group(struct ifmcaddr6 *ma)
        }
 }
 
-static void mld_gq_timer_expire(unsigned long data)
+static void mld_gq_timer_expire(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
 
        idev->mc_gq_running = 0;
        mld_send_report(idev, NULL);
        in6_dev_put(idev);
 }
 
-static void mld_ifc_timer_expire(unsigned long data)
+static void mld_ifc_timer_expire(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
 
        mld_send_cr(idev);
        if (idev->mc_ifc_count) {
@@ -2462,9 +2467,9 @@ static void mld_ifc_event(struct inet6_dev *idev)
        mld_ifc_start_timer(idev, 1);
 }
 
-static void igmp6_timer_handler(unsigned long data)
+static void igmp6_timer_handler(struct timer_list *t)
 {
-       struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
+       struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
 
        if (mld_in_v1_mode(ma->idev))
                igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
@@ -2552,14 +2557,11 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
        write_lock_bh(&idev->lock);
        spin_lock_init(&idev->mc_lock);
        idev->mc_gq_running = 0;
-       setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
-                       (unsigned long)idev);
+       timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
        idev->mc_tomb = NULL;
        idev->mc_ifc_count = 0;
-       setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
-                       (unsigned long)idev);
-       setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
-                   (unsigned long)idev);
+       timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
+       timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
        ipv6_mc_reset(idev);
        write_unlock_bh(&idev->lock);
 }
index f06e25065a342e361d7ae68ae1d60304b3f43f39..1d7ae93663351297395208f2c9a65bd5fba236e5 100644 (file)
@@ -458,7 +458,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                        if (!xt_find_jump_offset(offsets, newpos,
                                                                 newinfo->number))
                                                return 0;
-                                       e = entry0 + newpos;
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
index 2b1a15846f9ac1f40d45aef52af6aab92d515408..92c0047e7e33dc5925054c41143fe200db06f125 100644 (file)
@@ -33,13 +33,19 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
 
        if (range->flags & NF_NAT_RANGE_MAP_IPS)
                return -EINVAL;
-       return 0;
+       return nf_ct_netns_get(par->net, par->family);
+}
+
+static void masquerade_tg6_destroy(const struct xt_tgdtor_param *par)
+{
+       nf_ct_netns_put(par->net, par->family);
 }
 
 static struct xt_target masquerade_tg6_reg __read_mostly = {
        .name           = "MASQUERADE",
        .family         = NFPROTO_IPV6,
        .checkentry     = masquerade_tg6_checkentry,
+       .destroy        = masquerade_tg6_destroy,
        .target         = masquerade_tg6,
        .targetsize     = sizeof(struct nf_nat_range),
        .table          = "nat",
index 7a8d1500d374b4089e623ed2b20d68110cff498e..0458b761f3c56ce765841e0a3a7e5e78f90b95eb 100644 (file)
@@ -2336,6 +2336,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        }
 
        rt->dst.flags |= DST_HOST;
+       rt->dst.input = ip6_input;
        rt->dst.output  = ip6_output;
        rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
@@ -4297,19 +4298,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                if (!ipv6_addr_any(&fl6.saddr))
                        flags |= RT6_LOOKUP_F_HAS_SADDR;
 
-               if (!fibmatch)
-                       dst = ip6_route_input_lookup(net, dev, &fl6, flags);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_input_lookup(net, dev, &fl6, flags);
 
                rcu_read_unlock();
        } else {
                fl6.flowi6_oif = oif;
 
-               if (!fibmatch)
-                       dst = ip6_route_output(net, NULL, &fl6);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_output(net, NULL, &fl6);
        }
 
 
@@ -4326,6 +4321,15 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                goto errout;
        }
 
+       if (fibmatch && rt->dst.from) {
+               struct rt6_info *ort = container_of(rt->dst.from,
+                                                   struct rt6_info, dst);
+
+               dst_hold(&ort->dst);
+               ip6_rt_put(rt);
+               rt = ort;
+       }
+
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb) {
                ip6_rt_put(rt);
index d60ddcb0bfe240d5351089ed43464683e68c1db8..d7dc23c1b2ca32fb554cccf1fbf50f736a7f6f4c 100644 (file)
@@ -1098,6 +1098,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
        ipip6_tunnel_link(sitn, t);
        t->parms.iph.ttl = p->iph.ttl;
        t->parms.iph.tos = p->iph.tos;
+       t->parms.iph.frag_off = p->iph.frag_off;
        if (t->parms.link != p->link || t->fwmark != fwmark) {
                t->parms.link = p->link;
                t->fwmark = fwmark;
index 6bb98c93edfe2ed2f16fe5229605f8108cfc7f9a..7178476b3d2f64f01832fe3292c7dec849ec2265 100644 (file)
@@ -994,7 +994,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                        req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
                        tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
                        req->ts_recent, sk->sk_bound_dev_if,
-                       tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
+                       tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
                        0, 0);
 }
 
@@ -1454,7 +1454,6 @@ process:
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               tcp_v6_fill_cb(skb, hdr, th);
                if (tcp_v6_inbound_md5_hash(sk, skb)) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
@@ -1467,8 +1466,12 @@ process:
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       hdr = ipv6_hdr(skb);
+                       tcp_v6_fill_cb(skb, hdr, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
@@ -1492,8 +1495,6 @@ process:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       tcp_v6_fill_cb(skb, hdr, th);
-
        if (tcp_v6_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
@@ -1501,6 +1502,7 @@ process:
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        hdr = ipv6_hdr(skb);
+       tcp_v6_fill_cb(skb, hdr, th);
 
        skb->dev = NULL;
 
@@ -1590,7 +1592,6 @@ do_time_wait:
                tcp_v6_timewait_ack(sk, skb);
                break;
        case TCP_TW_RST:
-               tcp_v6_restore_cb(skb);
                tcp_v6_send_reset(sk, skb);
                inet_twsk_deschedule_put(inet_twsk(sk));
                goto discard_it;
index 0b750a22c4b9bf92e079fcd4a694fccf81f00a8e..d4e98f20fc2ac1c55a1f1db67498af900e0842ea 100644 (file)
@@ -1625,60 +1625,30 @@ static struct proto kcm_proto = {
 };
 
 /* Clone a kcm socket. */
-static int kcm_clone(struct socket *osock, struct kcm_clone *info,
-                    struct socket **newsockp)
+static struct file *kcm_clone(struct socket *osock)
 {
        struct socket *newsock;
        struct sock *newsk;
-       struct file *newfile;
-       int err, newfd;
 
-       err = -ENFILE;
        newsock = sock_alloc();
        if (!newsock)
-               goto out;
+               return ERR_PTR(-ENFILE);
 
        newsock->type = osock->type;
        newsock->ops = osock->ops;
 
        __module_get(newsock->ops->owner);
 
-       newfd = get_unused_fd_flags(0);
-       if (unlikely(newfd < 0)) {
-               err = newfd;
-               goto out_fd_fail;
-       }
-
-       newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
-       if (IS_ERR(newfile)) {
-               err = PTR_ERR(newfile);
-               goto out_sock_alloc_fail;
-       }
-
        newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
                         &kcm_proto, true);
        if (!newsk) {
-               err = -ENOMEM;
-               goto out_sk_alloc_fail;
+               sock_release(newsock);
+               return ERR_PTR(-ENOMEM);
        }
-
        sock_init_data(newsock, newsk);
        init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
 
-       fd_install(newfd, newfile);
-       *newsockp = newsock;
-       info->fd = newfd;
-
-       return 0;
-
-out_sk_alloc_fail:
-       fput(newfile);
-out_sock_alloc_fail:
-       put_unused_fd(newfd);
-out_fd_fail:
-       sock_release(newsock);
-out:
-       return err;
+       return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
 }
 
 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1708,17 +1678,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        }
        case SIOCKCMCLONE: {
                struct kcm_clone info;
-               struct socket *newsock = NULL;
-
-               err = kcm_clone(sock, &info, &newsock);
-               if (!err) {
-                       if (copy_to_user((void __user *)arg, &info,
-                                        sizeof(info))) {
-                               err = -EFAULT;
-                               sys_close(info.fd);
-                       }
-               }
+               struct file *file;
+
+               info.fd = get_unused_fd_flags(0);
+               if (unlikely(info.fd < 0))
+                       return info.fd;
 
+               file = kcm_clone(sock);
+               if (IS_ERR(file)) {
+                       put_unused_fd(info.fd);
+                       return PTR_ERR(file);
+               }
+               if (copy_to_user((void __user *)arg, &info,
+                                sizeof(info))) {
+                       put_unused_fd(info.fd);
+                       fput(file);
+                       return -EFAULT;
+               }
+               fd_install(info.fd, file);
+               err = 0;
                break;
        }
        default:
index 8bb469cb3abeb239a9b89e997dd1104a38c455aa..5d4ae01951b562c602fed13787719edfaad3c635 100644 (file)
@@ -42,7 +42,7 @@ void lapb_start_t1timer(struct lapb_cb *lapb)
 {
        del_timer(&lapb->t1timer);
 
-       lapb->t1timer.function = (TIMER_FUNC_TYPE)lapb_t1timer_expiry;
+       lapb->t1timer.function = lapb_t1timer_expiry;
        lapb->t1timer.expires  = jiffies + lapb->t1;
 
        add_timer(&lapb->t1timer);
@@ -52,7 +52,7 @@ void lapb_start_t2timer(struct lapb_cb *lapb)
 {
        del_timer(&lapb->t2timer);
 
-       lapb->t2timer.function = (TIMER_FUNC_TYPE)lapb_t2timer_expiry;
+       lapb->t2timer.function = lapb_t2timer_expiry;
        lapb->t2timer.expires  = jiffies + lapb->t2;
 
        add_timer(&lapb->t2timer);
index 167f83b853e6bd391256e15ef99439b792e18cdc..1621b6ab17ba45e63f79e85a42563781b5536dc2 100644 (file)
@@ -291,16 +291,15 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
        int i;
 
        mutex_lock(&sta->ampdu_mlme.mtx);
-       for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
+       for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
                ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
                                                WLAN_REASON_QSTA_LEAVE_QBSS,
                                                reason != AGG_STOP_DESTROY_STA &&
                                                reason != AGG_STOP_PEER_REQUEST);
-       }
-       mutex_unlock(&sta->ampdu_mlme.mtx);
 
        for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
                ___ieee80211_stop_tx_ba_session(sta, i, reason);
+       mutex_unlock(&sta->ampdu_mlme.mtx);
 
        /* stopping might queue the work again - so cancel only afterwards */
        cancel_work_sync(&sta->ampdu_mlme.work);
index a2b904a718c6124d133aa94a59b86448f666b739..c989211bbabc6475d805f6adf1957157b2e6727c 100644 (file)
@@ -184,9 +184,9 @@ report:
        nd->handler(nd);
 }
 
-static void ncsi_channel_monitor(unsigned long data)
+static void ncsi_channel_monitor(struct timer_list *t)
 {
-       struct ncsi_channel *nc = (struct ncsi_channel *)data;
+       struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
        struct ncsi_package *np = nc->package;
        struct ncsi_dev_priv *ndp = np->ndp;
        struct ncsi_channel_mode *ncm;
@@ -313,8 +313,7 @@ struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
        nc->package = np;
        nc->state = NCSI_CHANNEL_INACTIVE;
        nc->monitor.enabled = false;
-       setup_timer(&nc->monitor.timer,
-                   ncsi_channel_monitor, (unsigned long)nc);
+       timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
        spin_lock_init(&nc->lock);
        INIT_LIST_HEAD(&nc->link);
        for (index = 0; index < NCSI_CAP_MAX; index++)
@@ -529,9 +528,9 @@ struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
        return NULL;
 }
 
-static void ncsi_request_timeout(unsigned long data)
+static void ncsi_request_timeout(struct timer_list *t)
 {
-       struct ncsi_request *nr = (struct ncsi_request *)data;
+       struct ncsi_request *nr = from_timer(nr, t, timer);
        struct ncsi_dev_priv *ndp = nr->ndp;
        unsigned long flags;
 
@@ -1577,9 +1576,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
        for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
                ndp->requests[i].id = i;
                ndp->requests[i].ndp = ndp;
-               setup_timer(&ndp->requests[i].timer,
-                           ncsi_request_timeout,
-                           (unsigned long)&ndp->requests[i]);
+               timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
        }
 
        spin_lock_irqsave(&ncsi_dev_lock, flags);
index 64778f9a85481fd69faff2fcd0eaa64031ab06d9..d6748a8a79c5666b11d963a8755df557d8d77e59 100644 (file)
@@ -67,9 +67,9 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
 }
 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
 
-static void nf_ct_expectation_timed_out(unsigned long ul_expect)
+static void nf_ct_expectation_timed_out(struct timer_list *t)
 {
-       struct nf_conntrack_expect *exp = (void *)ul_expect;
+       struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
 
        spin_lock_bh(&nf_conntrack_expect_lock);
        nf_ct_unlink_expect(exp);
@@ -368,8 +368,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        /* two references : one for hash insert, one for the timer */
        refcount_add(2, &exp->use);
 
-       setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
-                   (unsigned long)exp);
+       timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
        helper = rcu_dereference_protected(master_help->helper,
                                           lockdep_is_held(&nf_conntrack_expect_lock));
        if (helper) {
index cf1bf2605c1027207a86889f93da667d8b2313b9..dc6347342e34c499eaef5403f63034b137ad14e3 100644 (file)
@@ -103,7 +103,6 @@ struct bitstr {
 #define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
 #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
 #define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
-#define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND)
 static unsigned int get_len(struct bitstr *bs);
 static unsigned int get_bit(struct bitstr *bs);
 static unsigned int get_bits(struct bitstr *bs, unsigned int b);
@@ -165,6 +164,19 @@ static unsigned int get_len(struct bitstr *bs)
        return v;
 }
 
+static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
+{
+       bits += bs->bit;
+       bytes += bits / BITS_PER_BYTE;
+       if (bits % BITS_PER_BYTE > 0)
+               bytes++;
+
+       if (*bs->cur + bytes > *bs->end)
+               return 1;
+
+       return 0;
+}
+
 /****************************************************************************/
 static unsigned int get_bit(struct bitstr *bs)
 {
@@ -279,8 +291,8 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f,
        PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
 
        INC_BIT(bs);
-
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -293,11 +305,14 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f,
        PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
 
        BYTE_ALIGN(bs);
-       CHECK_BOUND(bs, 1);
+       if (nf_h323_error_boundary(bs, 1, 0))
+               return H323_ERROR_BOUND;
+
        len = *bs->cur++;
        bs->cur += len;
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
 
-       CHECK_BOUND(bs, 0);
        return H323_ERROR_NONE;
 }
 
@@ -319,6 +334,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
                bs->cur += 2;
                break;
        case CONS:              /* 64K < Range < 4G */
+               if (nf_h323_error_boundary(bs, 0, 2))
+                       return H323_ERROR_BOUND;
                len = get_bits(bs, 2) + 1;
                BYTE_ALIGN(bs);
                if (base && (f->attr & DECODE)) {       /* timeToLive */
@@ -330,7 +347,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
                break;
        case UNCO:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
                bs->cur += len;
                break;
@@ -341,7 +359,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
 
        PRINT("\n");
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -357,7 +376,8 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f,
                INC_BITS(bs, f->sz);
        }
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -375,12 +395,14 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
                len = f->lb;
                break;
        case WORD:              /* 2-byte length */
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = (*bs->cur++) << 8;
                len += (*bs->cur++) + f->lb;
                break;
        case SEMI:
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
                break;
        default:
@@ -391,7 +413,8 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
        bs->cur += len >> 3;
        bs->bit = len & 7;
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -404,12 +427,15 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f,
        PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
 
        /* 2 <= Range <= 255 */
+       if (nf_h323_error_boundary(bs, 0, f->sz))
+               return H323_ERROR_BOUND;
        len = get_bits(bs, f->sz) + f->lb;
 
        BYTE_ALIGN(bs);
        INC_BITS(bs, (len << 2));
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -440,15 +466,19 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
                break;
        case BYTE:              /* Range == 256 */
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 1);
+               if (nf_h323_error_boundary(bs, 1, 0))
+                       return H323_ERROR_BOUND;
                len = (*bs->cur++) + f->lb;
                break;
        case SEMI:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs) + f->lb;
                break;
        default:                /* 2 <= Range <= 255 */
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                len = get_bits(bs, f->sz) + f->lb;
                BYTE_ALIGN(bs);
                break;
@@ -458,7 +488,8 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
 
        PRINT("\n");
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -473,10 +504,13 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
        switch (f->sz) {
        case BYTE:              /* Range == 256 */
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 1);
+               if (nf_h323_error_boundary(bs, 1, 0))
+                       return H323_ERROR_BOUND;
                len = (*bs->cur++) + f->lb;
                break;
        default:                /* 2 <= Range <= 255 */
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                len = get_bits(bs, f->sz) + f->lb;
                BYTE_ALIGN(bs);
                break;
@@ -484,7 +518,8 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
 
        bs->cur += len << 1;
 
-       CHECK_BOUND(bs, 0);
+       if (nf_h323_error_boundary(bs, 0, 0))
+               return H323_ERROR_BOUND;
        return H323_ERROR_NONE;
 }
 
@@ -503,9 +538,13 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
        base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
 
        /* Extensible? */
+       if (nf_h323_error_boundary(bs, 0, 1))
+               return H323_ERROR_BOUND;
        ext = (f->attr & EXT) ? get_bit(bs) : 0;
 
        /* Get fields bitmap */
+       if (nf_h323_error_boundary(bs, 0, f->sz))
+               return H323_ERROR_BOUND;
        bmp = get_bitmap(bs, f->sz);
        if (base)
                *(unsigned int *)base = bmp;
@@ -525,9 +564,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
 
                /* Decode */
                if (son->attr & OPEN) { /* Open field */
-                       CHECK_BOUND(bs, 2);
+                       if (nf_h323_error_boundary(bs, 2, 0))
+                               return H323_ERROR_BOUND;
                        len = get_len(bs);
-                       CHECK_BOUND(bs, len);
+                       if (nf_h323_error_boundary(bs, len, 0))
+                               return H323_ERROR_BOUND;
                        if (!base || !(son->attr & DECODE)) {
                                PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
                                      " ", son->name);
@@ -555,8 +596,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
                return H323_ERROR_NONE;
 
        /* Get the extension bitmap */
+       if (nf_h323_error_boundary(bs, 0, 7))
+               return H323_ERROR_BOUND;
        bmp2_len = get_bits(bs, 7) + 1;
-       CHECK_BOUND(bs, (bmp2_len + 7) >> 3);
+       if (nf_h323_error_boundary(bs, 0, bmp2_len))
+               return H323_ERROR_BOUND;
        bmp2 = get_bitmap(bs, bmp2_len);
        bmp |= bmp2 >> f->sz;
        if (base)
@@ -567,9 +611,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
        for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
                /* Check Range */
                if (i >= f->ub) {       /* Newer Version? */
-                       CHECK_BOUND(bs, 2);
+                       if (nf_h323_error_boundary(bs, 2, 0))
+                               return H323_ERROR_BOUND;
                        len = get_len(bs);
-                       CHECK_BOUND(bs, len);
+                       if (nf_h323_error_boundary(bs, len, 0))
+                               return H323_ERROR_BOUND;
                        bs->cur += len;
                        continue;
                }
@@ -583,9 +629,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
                if (!((0x80000000 >> opt) & bmp2))      /* Not present */
                        continue;
 
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
-               CHECK_BOUND(bs, len);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                if (!base || !(son->attr & DECODE)) {
                        PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
                              son->name);
@@ -623,22 +671,27 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
        switch (f->sz) {
        case BYTE:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 1);
+               if (nf_h323_error_boundary(bs, 1, 0))
+                       return H323_ERROR_BOUND;
                count = *bs->cur++;
                break;
        case WORD:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                count = *bs->cur++;
                count <<= 8;
                count += *bs->cur++;
                break;
        case SEMI:
                BYTE_ALIGN(bs);
-               CHECK_BOUND(bs, 2);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                count = get_len(bs);
                break;
        default:
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                count = get_bits(bs, f->sz);
                break;
        }
@@ -658,8 +711,11 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
        for (i = 0; i < count; i++) {
                if (son->attr & OPEN) {
                        BYTE_ALIGN(bs);
+                       if (nf_h323_error_boundary(bs, 2, 0))
+                               return H323_ERROR_BOUND;
                        len = get_len(bs);
-                       CHECK_BOUND(bs, len);
+                       if (nf_h323_error_boundary(bs, len, 0))
+                               return H323_ERROR_BOUND;
                        if (!base || !(son->attr & DECODE)) {
                                PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
                                      " ", son->name);
@@ -710,11 +766,17 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
        base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
 
        /* Decode the choice index number */
+       if (nf_h323_error_boundary(bs, 0, 1))
+               return H323_ERROR_BOUND;
        if ((f->attr & EXT) && get_bit(bs)) {
                ext = 1;
+               if (nf_h323_error_boundary(bs, 0, 7))
+                       return H323_ERROR_BOUND;
                type = get_bits(bs, 7) + f->lb;
        } else {
                ext = 0;
+               if (nf_h323_error_boundary(bs, 0, f->sz))
+                       return H323_ERROR_BOUND;
                type = get_bits(bs, f->sz);
                if (type >= f->lb)
                        return H323_ERROR_RANGE;
@@ -727,8 +789,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
        /* Check Range */
        if (type >= f->ub) {    /* Newer version? */
                BYTE_ALIGN(bs);
+               if (nf_h323_error_boundary(bs, 2, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
-               CHECK_BOUND(bs, len);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                bs->cur += len;
                return H323_ERROR_NONE;
        }
@@ -742,8 +807,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
 
        if (ext || (son->attr & OPEN)) {
                BYTE_ALIGN(bs);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                len = get_len(bs);
-               CHECK_BOUND(bs, len);
+               if (nf_h323_error_boundary(bs, len, 0))
+                       return H323_ERROR_BOUND;
                if (!base || !(son->attr & DECODE)) {
                        PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
                              son->name);
index 59c08997bfdfdb9c16aa7e9cc1d33f62a46a1769..382d49792f428099a1fa78ebc1f50224ba8b7d97 100644 (file)
@@ -45,7 +45,6 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/nf_conntrack_timestamp.h>
 #include <net/netfilter/nf_conntrack_labels.h>
-#include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_synproxy.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
@@ -1566,9 +1565,11 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
 static int ctnetlink_change_timeout(struct nf_conn *ct,
                                    const struct nlattr * const cda[])
 {
-       u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
+       u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
 
-       ct->timeout = nfct_time_stamp + timeout * HZ;
+       if (timeout > INT_MAX)
+               timeout = INT_MAX;
+       ct->timeout = nfct_time_stamp + (u32)timeout;
 
        if (test_bit(IPS_DYING_BIT, &ct->status))
                return -ETIME;
@@ -1768,6 +1769,7 @@ ctnetlink_create_conntrack(struct net *net,
        int err = -EINVAL;
        struct nf_conntrack_helper *helper;
        struct nf_conn_tstamp *tstamp;
+       u64 timeout;
 
        ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
        if (IS_ERR(ct))
@@ -1776,7 +1778,10 @@ ctnetlink_create_conntrack(struct net *net,
        if (!cda[CTA_TIMEOUT])
                goto err1;
 
-       ct->timeout = nfct_time_stamp + ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
+       timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
+       if (timeout > INT_MAX)
+               timeout = INT_MAX;
+       ct->timeout = (u32)timeout + nfct_time_stamp;
 
        rcu_read_lock();
        if (cda[CTA_HELP]) {
index b12fc07111d0847b014410291df947bddc32d46a..37ef35b861f24365c843a4eec5ecc5ad8292cd22 100644 (file)
@@ -1039,6 +1039,9 @@ static int tcp_packet(struct nf_conn *ct,
                 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
                 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
                timeout = timeouts[TCP_CONNTRACK_UNACK];
+       else if (ct->proto.tcp.last_win == 0 &&
+                timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
+               timeout = timeouts[TCP_CONNTRACK_RETRANS];
        else
                timeout = timeouts[new_state];
        spin_unlock_bh(&ct->lock);
index d8327b43e4dce64593573178a397b160488a6355..10798b35748180746266aa0a84187dfd7f0f1ceb 100644 (file)
@@ -5847,6 +5847,12 @@ static int __net_init nf_tables_init_net(struct net *net)
        return 0;
 }
 
+static void __net_exit nf_tables_exit_net(struct net *net)
+{
+       WARN_ON_ONCE(!list_empty(&net->nft.af_info));
+       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+}
+
 int __nft_release_basechain(struct nft_ctx *ctx)
 {
        struct nft_rule *rule, *nr;
@@ -5917,6 +5923,7 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 
 static struct pernet_operations nf_tables_net_ops = {
        .init   = nf_tables_init_net,
+       .exit   = nf_tables_exit_net,
 };
 
 static int __init nf_tables_module_init(void)
index 41628b3936731b77885717c27cf4dfa8e62b3c0f..d33ce6d5ebce92db2fab30cb4286c11ffd8c321a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/errno.h>
+#include <linux/capability.h>
 #include <net/netlink.h>
 #include <net/sock.h>
 
@@ -407,6 +408,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
        struct nfnl_cthelper *nlcth;
        int ret = 0;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
                return -EINVAL;
 
@@ -611,6 +615,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
        struct nfnl_cthelper *nlcth;
        bool tuple_set = false;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
                        .dump = nfnl_cthelper_dump_table,
@@ -678,6 +685,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
        struct nfnl_cthelper *nlcth, *n;
        int j = 0, ret;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (tb[NFCTH_NAME])
                helper_name = nla_data(tb[NFCTH_NAME]);
 
index cad6498f10b03fca0e873eb8718734b471a65f9c..e955bec0acc6a949a32ac8d1e6ea383b31983c73 100644 (file)
@@ -151,7 +151,7 @@ instance_put(struct nfulnl_instance *inst)
                call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
 }
 
-static void nfulnl_timer(unsigned long data);
+static void nfulnl_timer(struct timer_list *t);
 
 static struct nfulnl_instance *
 instance_create(struct net *net, u_int16_t group_num,
@@ -184,7 +184,7 @@ instance_create(struct net *net, u_int16_t group_num,
        /* needs to be two, since we _put() after creation */
        refcount_set(&inst->use, 2);
 
-       setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
+       timer_setup(&inst->timer, nfulnl_timer, 0);
 
        inst->net = get_net(net);
        inst->peer_user_ns = user_ns;
@@ -377,9 +377,9 @@ __nfulnl_flush(struct nfulnl_instance *inst)
 }
 
 static void
-nfulnl_timer(unsigned long data)
+nfulnl_timer(struct timer_list *t)
 {
-       struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
+       struct nfulnl_instance *inst = from_timer(inst, t, timer);
 
        spin_lock_bh(&inst->lock);
        if (inst->skb)
@@ -1093,10 +1093,15 @@ static int __net_init nfnl_log_net_init(struct net *net)
 
 static void __net_exit nfnl_log_net_exit(struct net *net)
 {
+       struct nfnl_log_net *log = nfnl_log_pernet(net);
+       unsigned int i;
+
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
 #endif
        nf_log_unset(net, &nfulnl_logger);
+       for (i = 0; i < INSTANCE_BUCKETS; i++)
+               WARN_ON_ONCE(!hlist_empty(&log->instance_table[i]));
 }
 
 static struct pernet_operations nfnl_log_net_ops = {
index a16356cacec3646a9b70a0d0b443db28a696a0a0..c09b36755ed721f45be12523c4c328c97fd0e166 100644 (file)
@@ -1512,10 +1512,15 @@ static int __net_init nfnl_queue_net_init(struct net *net)
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+       struct nfnl_queue_net *q = nfnl_queue_pernet(net);
+       unsigned int i;
+
        nf_unregister_queue_handler(net);
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
 #endif
+       for (i = 0; i < INSTANCE_BUCKETS; i++)
+               WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
 }
 
 static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
index a0a93d987a3bd440dc19bafe9e65b023c6baf217..47ec1046ad11536e337f709d1f41a267a77cf1d3 100644 (file)
@@ -214,6 +214,8 @@ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
        [NFTA_EXTHDR_OFFSET]            = { .type = NLA_U32 },
        [NFTA_EXTHDR_LEN]               = { .type = NLA_U32 },
        [NFTA_EXTHDR_FLAGS]             = { .type = NLA_U32 },
+       [NFTA_EXTHDR_OP]                = { .type = NLA_U32 },
+       [NFTA_EXTHDR_SREG]              = { .type = NLA_U32 },
 };
 
 static int nft_exthdr_init(const struct nft_ctx *ctx,
index a77dd514297c9627d6103dbcb6428bb6bdd165ad..55802e97f906d1987ed78b4296584deb38e5f876 100644 (file)
@@ -1729,8 +1729,17 @@ static int __net_init xt_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit xt_net_exit(struct net *net)
+{
+       int i;
+
+       for (i = 0; i < NFPROTO_NUMPROTO; i++)
+               WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
+}
+
 static struct pernet_operations xt_net_ops = {
        .init = xt_net_init,
+       .exit = xt_net_exit,
 };
 
 static int __init xt_init(void)
index daf45da448fab4406cf4b5727404c88c1f0759be..ee3421ad108da72bd769de43e5ff5540212b11fe 100644 (file)
@@ -107,9 +107,9 @@ static void idletimer_tg_work(struct work_struct *work)
        sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
 }
 
-static void idletimer_tg_expired(unsigned long data)
+static void idletimer_tg_expired(struct timer_list *t)
 {
-       struct idletimer_tg *timer = (struct idletimer_tg *) data;
+       struct idletimer_tg *timer = from_timer(timer, t, timer);
 
        pr_debug("timer %s expired\n", timer->attr.attr.name);
 
@@ -143,8 +143,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
 
        list_add(&info->timer->entry, &idletimer_tg_list);
 
-       setup_timer(&info->timer->timer, idletimer_tg_expired,
-                   (unsigned long) info->timer);
+       timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
        info->timer->refcnt = 1;
 
        mod_timer(&info->timer->timer,
index 3ba31c194ccec3101aa0979bb18a693d30d1c68b..0971634e5444559cb5380a9f9927dc3294db3452 100644 (file)
@@ -85,9 +85,10 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static void led_timeout_callback(unsigned long data)
+static void led_timeout_callback(struct timer_list *t)
 {
-       struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data;
+       struct xt_led_info_internal *ledinternal = from_timer(ledinternal, t,
+                                                             timer);
 
        led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
 }
@@ -143,8 +144,7 @@ static int led_tg_check(const struct xt_tgchk_param *par)
 
        /* See if we need to set up a timer */
        if (ledinfo->delay > 0)
-               setup_timer(&ledinternal->timer, led_timeout_callback,
-                           (unsigned long)ledinternal);
+               timer_setup(&ledinternal->timer, led_timeout_callback, 0);
 
        list_add_tail(&ledinternal->list, &xt_led_triggers);
 
index 041da0d9c06f2b1c2ecb31851932ac5a350122a9..1f7fbd3c7e5a6de0cbe7e0bb5c847a98df371aff 100644 (file)
@@ -27,6 +27,9 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
 {
        struct sock_fprog_kern program;
 
+       if (len > XT_BPF_MAX_NUM_INSTR)
+               return -EINVAL;
+
        program.len = len;
        program.filter = insns;
 
@@ -55,6 +58,9 @@ static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
        mm_segment_t oldfs = get_fs();
        int retval, fd;
 
+       if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
+               return -EINVAL;
+
        set_fs(KERNEL_DS);
        fd = bpf_obj_get_user(path, 0);
        set_fs(oldfs);
index 36e14b1f061ddf7eb77327a3e717e15b6af83bfb..a34f314a8c2380e6b6a223dd6d38dcc88ca2c1ac 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 
+#include <linux/capability.h>
 #include <linux/if.h>
 #include <linux/inetdevice.h>
 #include <linux/ip.h>
@@ -70,6 +71,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
        struct xt_osf_finger *kf = NULL, *sf;
        int err = 0;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (!osf_attrs[OSF_ATTR_FINGER])
                return -EINVAL;
 
@@ -115,6 +119,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
        struct xt_osf_finger *sf;
        int err = -ENOENT;
 
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
        if (!osf_attrs[OSF_ATTR_FINGER])
                return -EINVAL;
 
index b9e0ee4e22f57066d0ac0bc5f64181fbb65e6acc..79cc1bf36e4af7d2c70575e56203a482ba2dca97 100644 (file)
@@ -253,6 +253,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
        struct sock *sk = skb->sk;
        int ret = -ENOMEM;
 
+       if (!net_eq(dev_net(dev), sock_net(sk)))
+               return 0;
+
        dev_hold(dev);
 
        if (is_vmalloc_addr(skb->head))
index 2dec3583c97d00df654de19752db89c256f7de6d..7ed9d4422a73decbe395eb603d7d106436d9f3a4 100644 (file)
@@ -284,7 +284,7 @@ void nr_destroy_socket(struct sock *sk)
 
        if (sk_has_allocations(sk)) {
                /* Defer: outstanding buffers */
-               sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_destroy_timer;
+               sk->sk_timer.function = nr_destroy_timer;
                sk->sk_timer.expires  = jiffies + 2 * HZ;
                add_timer(&sk->sk_timer);
        } else
index 989ae647825ef4568c0bd5717c8721e85cde46c6..215ad22a96476ebb9d30919e99d67bda8e1ce88f 100644 (file)
@@ -15,7 +15,7 @@
 #include <net/netrom.h>
 #include <linux/init.h>
 
-static void nr_loopback_timer(unsigned long);
+static void nr_loopback_timer(struct timer_list *);
 
 static struct sk_buff_head loopback_queue;
 static DEFINE_TIMER(loopback_timer, nr_loopback_timer);
@@ -48,7 +48,7 @@ int nr_loopback_queue(struct sk_buff *skb)
        return 1;
 }
 
-static void nr_loopback_timer(unsigned long param)
+static void nr_loopback_timer(struct timer_list *unused)
 {
        struct sk_buff *skb;
        ax25_address *nr_dest;
index 43569aea0f5e2d9bcd941247e2df3be0d79913a8..cbd51ed5a2d7bef7540ea8a8a2e0fea9ff8fc02f 100644 (file)
@@ -45,7 +45,7 @@ void nr_init_timers(struct sock *sk)
        timer_setup(&nr->idletimer, nr_idletimer_expiry, 0);
 
        /* initialized by sock_init_data */
-       sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_heartbeat_expiry;
+       sk->sk_timer.function = nr_heartbeat_expiry;
 }
 
 void nr_start_t1timer(struct sock *sk)
index c25e9b4179c34b571c6adbb65f3cff9f60dad06d..074960154993fef5061cece1009b014c10668737 100644 (file)
@@ -591,18 +591,18 @@ static int nci_close_device(struct nci_dev *ndev)
 }
 
 /* NCI command timer function */
-static void nci_cmd_timer(unsigned long arg)
+static void nci_cmd_timer(struct timer_list *t)
 {
-       struct nci_dev *ndev = (void *) arg;
+       struct nci_dev *ndev = from_timer(ndev, t, cmd_timer);
 
        atomic_set(&ndev->cmd_cnt, 1);
        queue_work(ndev->cmd_wq, &ndev->cmd_work);
 }
 
 /* NCI data exchange timer function */
-static void nci_data_timer(unsigned long arg)
+static void nci_data_timer(struct timer_list *t)
 {
-       struct nci_dev *ndev = (void *) arg;
+       struct nci_dev *ndev = from_timer(ndev, t, data_timer);
 
        set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
        queue_work(ndev->rx_wq, &ndev->rx_work);
@@ -1232,10 +1232,8 @@ int nci_register_device(struct nci_dev *ndev)
        skb_queue_head_init(&ndev->rx_q);
        skb_queue_head_init(&ndev->tx_q);
 
-       setup_timer(&ndev->cmd_timer, nci_cmd_timer,
-                   (unsigned long) ndev);
-       setup_timer(&ndev->data_timer, nci_data_timer,
-                   (unsigned long) ndev);
+       timer_setup(&ndev->cmd_timer, nci_cmd_timer, 0);
+       timer_setup(&ndev->data_timer, nci_data_timer, 0);
 
        mutex_init(&ndev->req_lock);
        INIT_LIST_HEAD(&ndev->conn_info_list);
index dbe2379329c5517fb164b6024d40fabebe7855c8..f039064ce922f3aac8419dcda65ad875f89e966b 100644 (file)
@@ -579,6 +579,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                        return -EINVAL;
 
                skb_reset_network_header(skb);
+               key->eth.type = skb->protocol;
        } else {
                eth = eth_hdr(skb);
                ether_addr_copy(key->eth.src, eth->h_source);
@@ -592,15 +593,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                if (unlikely(parse_vlan(skb, key)))
                        return -ENOMEM;
 
-               skb->protocol = parse_ethertype(skb);
-               if (unlikely(skb->protocol == htons(0)))
+               key->eth.type = parse_ethertype(skb);
+               if (unlikely(key->eth.type == htons(0)))
                        return -ENOMEM;
 
+               /* Multiple tagged packets need to retain TPID to satisfy
+                * skb_vlan_pop(), which will later shift the ethertype into
+                * skb->protocol.
+                */
+               if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
+                       skb->protocol = key->eth.cvlan.tpid;
+               else
+                       skb->protocol = key->eth.type;
+
                skb_reset_network_header(skb);
                __skb_push(skb, skb->data - skb_mac_header(skb));
        }
        skb_reset_mac_len(skb);
-       key->eth.type = skb->protocol;
 
        /* Network layer. */
        if (key->eth.type == htons(ETH_P_IP)) {
index 8886f15abe90e16005d7e02f3514476c68d2b0db..bc2f1e0977d657ec09f176cfcecf28839eb1fab0 100644 (file)
@@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
        long i;
        int ret;
 
-       if (rs->rs_bound_addr == 0) {
+       if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
                ret = -ENOTCONN; /* XXX not a great errno */
                goto out;
        }
index b52cdc8ae428819a5853509a06852ec2ebc43a5a..f72466c63f0c5657a2f44e11ae432dd1457991cf 100644 (file)
@@ -1009,6 +1009,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
                        continue;
 
                if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
+                       if (cmsg->cmsg_len <
+                           CMSG_LEN(sizeof(struct rds_rdma_args)))
+                               return -EINVAL;
                        args = CMSG_DATA(cmsg);
                        *rdma_bytes += args->remote_vec.bytes;
                }
index cda4c6678ef16708c185b220f24647ee308f9a93..62055d3069d2a6849d6bc37df8f1d2b98ab420ac 100644 (file)
@@ -37,7 +37,7 @@ void rose_start_ftimer(struct rose_neigh *neigh)
 {
        del_timer(&neigh->ftimer);
 
-       neigh->ftimer.function = (TIMER_FUNC_TYPE)rose_ftimer_expiry;
+       neigh->ftimer.function = rose_ftimer_expiry;
        neigh->ftimer.expires  =
                jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
 
@@ -48,7 +48,7 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
 {
        del_timer(&neigh->t0timer);
 
-       neigh->t0timer.function = (TIMER_FUNC_TYPE)rose_t0timer_expiry;
+       neigh->t0timer.function = rose_t0timer_expiry;
        neigh->t0timer.expires  =
                jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
 
index ea613b2a97358a9c60a21b7744e7d564f1eb7d4c..74555fb9561547a2e01e188c38304b629c3bf8ec 100644 (file)
@@ -36,7 +36,7 @@ void rose_start_heartbeat(struct sock *sk)
 {
        del_timer(&sk->sk_timer);
 
-       sk->sk_timer.function = (TIMER_FUNC_TYPE)rose_heartbeat_expiry;
+       sk->sk_timer.function = rose_heartbeat_expiry;
        sk->sk_timer.expires  = jiffies + 5 * HZ;
 
        add_timer(&sk->sk_timer);
@@ -48,7 +48,7 @@ void rose_start_t1timer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t1;
 
        add_timer(&rose->timer);
@@ -60,7 +60,7 @@ void rose_start_t2timer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t2;
 
        add_timer(&rose->timer);
@@ -72,7 +72,7 @@ void rose_start_t3timer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t3;
 
        add_timer(&rose->timer);
@@ -84,7 +84,7 @@ void rose_start_hbtimer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->hb;
 
        add_timer(&rose->timer);
@@ -97,7 +97,7 @@ void rose_start_idletimer(struct sock *sk)
        del_timer(&rose->idletimer);
 
        if (rose->idle > 0) {
-               rose->idletimer.function = (TIMER_FUNC_TYPE)rose_idletimer_expiry;
+               rose->idletimer.function = rose_idletimer_expiry;
                rose->idletimer.expires  = jiffies + rose->idle;
 
                add_timer(&rose->idletimer);
index 8f7cf4c042be2b9b4379968655bea594a2928546..dcd818fa837e0af91978d6f1128085f93eb80f15 100644 (file)
@@ -860,6 +860,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
 static int rxrpc_release_sock(struct sock *sk)
 {
        struct rxrpc_sock *rx = rxrpc_sk(sk);
+       struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 
        _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
@@ -895,8 +896,8 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_release_calls_on_socket(rx);
        flush_workqueue(rxrpc_workqueue);
        rxrpc_purge_queue(&sk->sk_receive_queue);
-       rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
-       rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
+       rxrpc_queue_work(&rxnet->service_conn_reaper);
+       rxrpc_queue_work(&rxnet->client_conn_reaper);
 
        rxrpc_put_local(rx->local);
        rx->local = NULL;
index bda952ffe6a6eab394e39220a6fe6a6af19c8e08..ad2ab11031899fd0d1b398622deee579b6fa9f42 100644 (file)
@@ -123,7 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                else
                        ack_at = expiry;
 
-               ack_at = jiffies + expiry;
+               ack_at += now;
                if (time_before(ack_at, call->ack_at)) {
                        WRITE_ONCE(call->ack_at, ack_at);
                        rxrpc_reduce_call_timer(call, ack_at, now,
@@ -426,7 +426,7 @@ recheck_state:
        next = call->expect_rx_by;
 
 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
-       
+
        set(call->expect_req_by);
        set(call->expect_term_by);
        set(call->ack_at);
index 7ee3d6ce5aa2b681fae6d3133adddbf1732ec8d6..0b2db38dd32d4c2418827236faf5219bc70cc97f 100644 (file)
@@ -45,9 +45,9 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
 
 struct kmem_cache *rxrpc_call_jar;
 
-static void rxrpc_call_timer_expired(unsigned long _call)
+static void rxrpc_call_timer_expired(struct timer_list *t)
 {
-       struct rxrpc_call *call = (struct rxrpc_call *)_call;
+       struct rxrpc_call *call = from_timer(call, t, timer);
 
        _enter("%d", call->debug_id);
 
@@ -126,8 +126,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
                lockdep_set_class(&call->user_mutex,
                                  &rxrpc_call_user_mutex_lock_class_key);
 
-       setup_timer(&call->timer, rxrpc_call_timer_expired,
-                   (unsigned long)call);
+       timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
        INIT_WORK(&call->processor, &rxrpc_process_call);
        INIT_LIST_HEAD(&call->link);
        INIT_LIST_HEAD(&call->chan_wait_link);
index 9e9a8db1bc9cd0f1afd3efd7e9e26c4d2890a7d3..4ca11be6be3cadcfda93eab7892292ca1ec127b5 100644 (file)
@@ -30,22 +30,18 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
        struct rxrpc_channel *chan;
        struct msghdr msg;
-       struct kvec iov;
+       struct kvec iov[3];
        struct {
                struct rxrpc_wire_header whdr;
                union {
-                       struct {
-                               __be32 code;
-                       } abort;
-                       struct {
-                               struct rxrpc_ackpacket ack;
-                               u8 padding[3];
-                               struct rxrpc_ackinfo info;
-                       };
+                       __be32 abort_code;
+                       struct rxrpc_ackpacket ack;
                };
        } __attribute__((packed)) pkt;
+       struct rxrpc_ackinfo ack_info;
        size_t len;
-       u32 serial, mtu, call_id;
+       int ioc;
+       u32 serial, mtu, call_id, padding;
 
        _enter("%d", conn->debug_id);
 
@@ -66,6 +62,13 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        msg.msg_controllen = 0;
        msg.msg_flags   = 0;
 
+       iov[0].iov_base = &pkt;
+       iov[0].iov_len  = sizeof(pkt.whdr);
+       iov[1].iov_base = &padding;
+       iov[1].iov_len  = 3;
+       iov[2].iov_base = &ack_info;
+       iov[2].iov_len  = sizeof(ack_info);
+
        pkt.whdr.epoch          = htonl(conn->proto.epoch);
        pkt.whdr.cid            = htonl(conn->proto.cid);
        pkt.whdr.callNumber     = htonl(call_id);
@@ -80,8 +83,10 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        len = sizeof(pkt.whdr);
        switch (chan->last_type) {
        case RXRPC_PACKET_TYPE_ABORT:
-               pkt.abort.code  = htonl(chan->last_abort);
-               len += sizeof(pkt.abort);
+               pkt.abort_code  = htonl(chan->last_abort);
+               iov[0].iov_len += sizeof(pkt.abort_code);
+               len += sizeof(pkt.abort_code);
+               ioc = 1;
                break;
 
        case RXRPC_PACKET_TYPE_ACK:
@@ -94,13 +99,19 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                pkt.ack.serial          = htonl(skb ? sp->hdr.serial : 0);
                pkt.ack.reason          = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
                pkt.ack.nAcks           = 0;
-               pkt.info.rxMTU          = htonl(rxrpc_rx_mtu);
-               pkt.info.maxMTU         = htonl(mtu);
-               pkt.info.rwind          = htonl(rxrpc_rx_window_size);
-               pkt.info.jumbo_max      = htonl(rxrpc_rx_jumbo_max);
+               ack_info.rxMTU          = htonl(rxrpc_rx_mtu);
+               ack_info.maxMTU         = htonl(mtu);
+               ack_info.rwind          = htonl(rxrpc_rx_window_size);
+               ack_info.jumbo_max      = htonl(rxrpc_rx_jumbo_max);
                pkt.whdr.flags          |= RXRPC_SLOW_START_OK;
-               len += sizeof(pkt.ack) + sizeof(pkt.info);
+               padding                 = 0;
+               iov[0].iov_len += sizeof(pkt.ack);
+               len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
+               ioc = 3;
                break;
+
+       default:
+               return;
        }
 
        /* Resync with __rxrpc_disconnect_call() and check that the last call
@@ -110,9 +121,6 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        if (READ_ONCE(chan->last_call) != call_id)
                return;
 
-       iov.iov_base    = &pkt;
-       iov.iov_len     = len;
-
        serial = atomic_inc_return(&conn->serial);
        pkt.whdr.serial = htonl(serial);
 
@@ -127,7 +135,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                break;
        }
 
-       kernel_sendmsg(conn->params.local->socket, &msg, &iov, 1, len);
+       kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
        _leave("");
        return;
 }
index 1aad04a32d5e203ab17928e2247275c8b01c954d..c628351eb9008da7059102f48dad7f605343de5b 100644 (file)
@@ -424,7 +424,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
        if (earliest != now + MAX_JIFFY_OFFSET) {
                _debug("reschedule reaper %ld", (long)earliest - (long)now);
                ASSERT(time_after(earliest, now));
-               rxrpc_set_service_reap_timer(rxnet, earliest);          
+               rxrpc_set_service_reap_timer(rxnet, earliest);
        }
 
        while (!list_empty(&graveyard)) {
index 23a5e61d8f79a01622c29de07061fcff94a14f3c..6fc61400337fb3e8a96658ed685efa9a8280f70e 100644 (file)
@@ -976,7 +976,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
                rxrpc_reduce_call_timer(call, expect_rx_by, now,
                                        rxrpc_timer_set_for_normal);
        }
-       
+
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
                rxrpc_input_data(call, skb, skew);
@@ -1213,7 +1213,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                                goto reupgrade;
                        conn->service_id = sp->hdr.serviceId;
                }
-               
+
                if (sp->hdr.callNumber == 0) {
                        /* Connection-level packet */
                        _debug("CONN %p {%d}", conn, conn->debug_id);
index a1c53ac066a10bda169b0222b6d6177066c6dca9..09f2a3e0522163e0e5ae900555c56b74ace26b7a 100644 (file)
@@ -233,7 +233,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                if (resend_at < 1)
                        resend_at = 1;
 
-               resend_at = now + rxrpc_resend_timeout;
+               resend_at += now;
                WRITE_ONCE(call->resend_at, resend_at);
                rxrpc_reduce_call_timer(call, resend_at, now,
                                        rxrpc_timer_set_for_send);
index 1e3f10e5da996a868b8315c53cfac964842bbec3..6445184b2759a783f05a48578af330e0872f5d11 100644 (file)
@@ -22,7 +22,6 @@
 #include <net/pkt_sched.h>
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
-#include <linux/rtnetlink.h>
 
 static int skbmark_encode(struct sk_buff *skb, void *skbdata,
                          struct tcf_meta_info *e)
index 2ea1f26c9e966b26076f48757c5351287d9f0943..7221437ca3a6fadad5ebc3f286cc1fc5d35d89e1 100644 (file)
@@ -22,7 +22,6 @@
 #include <net/pkt_sched.h>
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
-#include <linux/rtnetlink.h>
 
 static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
                             struct tcf_meta_info *e)
index 8b5abcd2f32faeaa2a283bcc8fb388201f7a86e2..9438969290a6147c16c971558aeef3d01d21dde5 100644 (file)
@@ -96,23 +96,16 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        return ret;
 }
 
-static void tcf_sample_cleanup_rcu(struct rcu_head *rcu)
+static void tcf_sample_cleanup(struct tc_action *a, int bind)
 {
-       struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu);
+       struct tcf_sample *s = to_sample(a);
        struct psample_group *psample_group;
 
-       psample_group = rcu_dereference_protected(s->psample_group, 1);
+       psample_group = rtnl_dereference(s->psample_group);
        RCU_INIT_POINTER(s->psample_group, NULL);
        psample_group_put(psample_group);
 }
 
-static void tcf_sample_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_sample *s = to_sample(a);
-
-       call_rcu(&s->rcu, tcf_sample_cleanup_rcu);
-}
-
 static bool tcf_sample_dev_ok_push(struct net_device *dev)
 {
        switch (dev->type) {
@@ -264,7 +257,6 @@ static int __init sample_init_module(void)
 
 static void __exit sample_cleanup_module(void)
 {
-       rcu_barrier();
        tcf_unregister_action(&act_sample_ops, &sample_net_ops);
 }
 
index ddcf04b4ab43732c001869f70d63ea193768ebc3..b9d63d2246e667329c30606165dd485b9dc777aa 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/skbuff.h>
 #include <linux/init.h>
 #include <linux/kmod.h>
-#include <linux/err.h>
 #include <linux/slab.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -352,6 +351,8 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 {
        struct tcf_chain *chain;
 
+       if (!block)
+               return;
        /* Hold a refcnt for all chains, except 0, so that they don't disappear
         * while we are iterating.
         */
index 6fe798c2df1a5303cd61cd3ad53cd2f9385d16de..8d78e7f4ecc33082517aaab5767a30c119f49dc0 100644 (file)
@@ -42,7 +42,6 @@ struct cls_bpf_prog {
        struct list_head link;
        struct tcf_result res;
        bool exts_integrated;
-       bool offloaded;
        u32 gen_flags;
        struct tcf_exts exts;
        u32 handle;
@@ -148,33 +147,37 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
 }
 
 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
-                              enum tc_clsbpf_command cmd)
+                              struct cls_bpf_prog *oldprog)
 {
-       bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
        struct tcf_block *block = tp->chain->block;
-       bool skip_sw = tc_skip_sw(prog->gen_flags);
        struct tc_cls_bpf_offload cls_bpf = {};
+       struct cls_bpf_prog *obj;
+       bool skip_sw;
        int err;
 
+       skip_sw = prog && tc_skip_sw(prog->gen_flags);
+       obj = prog ?: oldprog;
+
        tc_cls_common_offload_init(&cls_bpf.common, tp);
-       cls_bpf.command = cmd;
-       cls_bpf.exts = &prog->exts;
-       cls_bpf.prog = prog->filter;
-       cls_bpf.name = prog->bpf_name;
-       cls_bpf.exts_integrated = prog->exts_integrated;
-       cls_bpf.gen_flags = prog->gen_flags;
+       cls_bpf.command = TC_CLSBPF_OFFLOAD;
+       cls_bpf.exts = &obj->exts;
+       cls_bpf.prog = prog ? prog->filter : NULL;
+       cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
+       cls_bpf.name = obj->bpf_name;
+       cls_bpf.exts_integrated = obj->exts_integrated;
+       cls_bpf.gen_flags = obj->gen_flags;
 
        err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
-       if (addorrep) {
+       if (prog) {
                if (err < 0) {
-                       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+                       cls_bpf_offload_cmd(tp, oldprog, prog);
                        return err;
                } else if (err > 0) {
                        prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
                }
        }
 
-       if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
+       if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
                return -EINVAL;
 
        return 0;
@@ -183,38 +186,17 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
                           struct cls_bpf_prog *oldprog)
 {
-       struct cls_bpf_prog *obj = prog;
-       enum tc_clsbpf_command cmd;
-       bool skip_sw;
-       int ret;
-
-       skip_sw = tc_skip_sw(prog->gen_flags) ||
-               (oldprog && tc_skip_sw(oldprog->gen_flags));
-
-       if (oldprog && oldprog->offloaded) {
-               if (!tc_skip_hw(prog->gen_flags)) {
-                       cmd = TC_CLSBPF_REPLACE;
-               } else if (!tc_skip_sw(prog->gen_flags)) {
-                       obj = oldprog;
-                       cmd = TC_CLSBPF_DESTROY;
-               } else {
-                       return -EINVAL;
-               }
-       } else {
-               if (tc_skip_hw(prog->gen_flags))
-                       return skip_sw ? -EINVAL : 0;
-               cmd = TC_CLSBPF_ADD;
-       }
-
-       ret = cls_bpf_offload_cmd(tp, obj, cmd);
-       if (ret)
-               return ret;
+       if (prog && oldprog && prog->gen_flags != oldprog->gen_flags)
+               return -EINVAL;
 
-       obj->offloaded = true;
-       if (oldprog)
-               oldprog->offloaded = false;
+       if (prog && tc_skip_hw(prog->gen_flags))
+               prog = NULL;
+       if (oldprog && tc_skip_hw(oldprog->gen_flags))
+               oldprog = NULL;
+       if (!prog && !oldprog)
+               return 0;
 
-       return 0;
+       return cls_bpf_offload_cmd(tp, prog, oldprog);
 }
 
 static void cls_bpf_stop_offload(struct tcf_proto *tp,
@@ -222,25 +204,26 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp,
 {
        int err;
 
-       if (!prog->offloaded)
-               return;
-
-       err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
-       if (err) {
+       err = cls_bpf_offload_cmd(tp, NULL, prog);
+       if (err)
                pr_err("Stopping hardware offload failed: %d\n", err);
-               return;
-       }
-
-       prog->offloaded = false;
 }
 
 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
                                         struct cls_bpf_prog *prog)
 {
-       if (!prog->offloaded)
-               return;
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_bpf_offload cls_bpf = {};
+
+       tc_cls_common_offload_init(&cls_bpf.common, tp);
+       cls_bpf.command = TC_CLSBPF_STATS;
+       cls_bpf.exts = &prog->exts;
+       cls_bpf.prog = prog->filter;
+       cls_bpf.name = prog->bpf_name;
+       cls_bpf.exts_integrated = prog->exts_integrated;
+       cls_bpf.gen_flags = prog->gen_flags;
 
-       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
+       tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
 }
 
 static int cls_bpf_init(struct tcf_proto *tp)
index ac152b4f4247d61d1761886352a93cb03b585cbf..507859cdd1cb1e7d97751ebad5cd688cb02b14ea 100644 (file)
@@ -45,7 +45,6 @@
 #include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
-#include <linux/netdevice.h>
 #include <linux/idr.h>
 
 struct tc_u_knode {
index b6c4f536876b70b0ad24fee686129a396e07f573..0f1eab99ff4edb6e7e27f4b4b34552b5ee996cbf 100644 (file)
@@ -795,6 +795,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        tcm->tcm_info = refcount_read(&q->refcnt);
        if (nla_put_string(skb, TCA_KIND, q->ops->id))
                goto nla_put_failure;
+       if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
+               goto nla_put_failure;
        if (q->ops->dump && q->ops->dump(q, skb) < 0)
                goto nla_put_failure;
        qlen = q->q.qlen;
index b30a2c70bd489b36a2295a11707b6a36d4eb9ac0..531250fceb9e5a75d6a8b843e5e5fd9d481fddf2 100644 (file)
@@ -369,6 +369,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
+
        if (ctl->limit > CHOKE_MAX_QUEUE)
                return -EINVAL;
 
index 3839cbbdc32b1eadd2cae6a42a6b8c998ca88a15..661c7144b53af048b3a65484777910e2d60f25aa 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
@@ -277,6 +278,8 @@ unsigned long dev_trans_start(struct net_device *dev)
 
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
+       else if (netif_is_macvlan(dev))
+               dev = macvlan_dev_real_dev(dev);
        res = netdev_get_tx_queue(dev, 0)->trans_start;
        for (i = 1; i < dev->num_tx_queues; i++) {
                val = netdev_get_tx_queue(dev, i)->trans_start;
@@ -1037,6 +1040,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
 
        if (!tp_head) {
                RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
+               /* Wait for flying RCU callback before it is freed. */
+               rcu_barrier_bh();
                return;
        }
 
@@ -1052,7 +1057,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
        rcu_assign_pointer(*miniqp->p_miniq, miniq);
 
        if (miniq_old)
-               /* This is counterpart of the rcu barrier above. We need to
+               /* This is counterpart of the rcu barriers above. We need to
                 * block potential new user of miniq_old until all readers
                 * are not seeing it.
                 */
index 17c7130454bd90e8af1d17e95f477ea558fb481d..bc30f9186ac67cd7b1c21d4d2b0035d3a6b886af 100644 (file)
@@ -356,6 +356,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q = table->tab[dp];
 
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
+
        if (!q) {
                table->tab[dp] = q = *prealloc;
                *prealloc = NULL;
index 5ecc38f35d4774fdfa402d9a4c4a0e655e1c91c2..fc1286f499c1462ab29c5054f734237788974e0e 100644 (file)
@@ -68,6 +68,8 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
        struct net_device *dev = qdisc_dev(sch);
        int err;
 
+       net_inc_ingress_queue();
+
        mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
 
        q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
@@ -78,7 +80,6 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
        if (err)
                return err;
 
-       net_inc_ingress_queue();
        sch->flags |= TCQ_F_CPUSTATS;
 
        return 0;
@@ -172,6 +173,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
        struct net_device *dev = qdisc_dev(sch);
        int err;
 
+       net_inc_ingress_queue();
+       net_inc_egress_queue();
+
        mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
 
        q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
@@ -190,18 +194,11 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
 
        err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
        if (err)
-               goto err_egress_block_get;
-
-       net_inc_ingress_queue();
-       net_inc_egress_queue();
+               return err;
 
        sch->flags |= TCQ_F_CPUSTATS;
 
        return 0;
-
-err_egress_block_get:
-       tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
-       return err;
 }
 
 static void clsact_destroy(struct Qdisc *sch)
index 7f8ea9e297c36acd0969b0330ab479e0199f47ac..f0747eb87dc4784e67e0b5872dcf37effaaa4060 100644 (file)
@@ -157,6 +157,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
                .handle = sch->handle,
                .parent = sch->parent,
        };
+       int err;
 
        if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
                return -EOPNOTSUPP;
@@ -171,7 +172,14 @@ static int red_offload(struct Qdisc *sch, bool enable)
                opt.command = TC_RED_DESTROY;
        }
 
-       return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
+       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
+
+       if (!err && enable)
+               sch->flags |= TCQ_F_OFFLOADED;
+       else
+               sch->flags &= ~TCQ_F_OFFLOADED;
+
+       return err;
 }
 
 static void red_destroy(struct Qdisc *sch)
@@ -212,6 +220,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_RED_PARMS]);
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+               return -EINVAL;
 
        if (ctl->limit > 0) {
                child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
@@ -272,7 +282,7 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt)
        return red_change(sch, opt);
 }
 
-static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt)
+static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct tc_red_qopt_offload hw_stats = {
@@ -284,21 +294,12 @@ static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt)
                        .stats.qstats = &sch->qstats,
                },
        };
-       int err;
 
-       opt->flags &= ~TC_RED_OFFLOADED;
-       if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
-               return 0;
-
-       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
-                                           &hw_stats);
-       if (err == -EOPNOTSUPP)
+       if (!(sch->flags & TCQ_F_OFFLOADED))
                return 0;
 
-       if (!err)
-               opt->flags |= TC_RED_OFFLOADED;
-
-       return err;
+       return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+                                            &hw_stats);
 }
 
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -317,7 +318,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        int err;
 
        sch->qstats.backlog = q->qdisc->qstats.backlog;
-       err = red_dump_offload(sch, &opt);
+       err = red_dump_offload_stats(sch, &opt);
        if (err)
                goto nla_put_failure;
 
@@ -345,7 +346,7 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
                .marked = q->stats.prob_mark + q->stats.forced_mark,
        };
 
-       if (tc_can_offload(dev) &&  dev->netdev_ops->ndo_setup_tc) {
+       if (sch->flags & TCQ_F_OFFLOADED) {
                struct red_stats hw_stats = {0};
                struct tc_red_qopt_offload hw_stats_request = {
                        .command = TC_RED_XSTATS,
index 09c1203c17119829d183fbdd0dfe9757460b863e..930e5bd26d3d7650a41b9472463c3fc39732495b 100644 (file)
@@ -639,6 +639,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        if (ctl->divisor &&
            (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
                return -EINVAL;
+       if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+                                       ctl_v1->Wlog))
+               return -EINVAL;
        if (ctl_v1 && ctl_v1->qth_min) {
                p = kmalloc(sizeof(*p), GFP_KERNEL);
                if (!p)
index 7b261afc47b9d709fdd780a93aaba874f35d79be..7f8baa48e7c2a834aea292106fd319c2489432a3 100644 (file)
@@ -53,6 +53,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
        msg->send_failed = 0;
        msg->send_error = 0;
        msg->can_delay = 1;
+       msg->abandoned = 0;
        msg->expires_at = 0;
        INIT_LIST_HEAD(&msg->chunks);
 }
@@ -304,6 +305,13 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
        if (!chunk->asoc->peer.prsctp_capable)
                return 0;
 
+       if (chunk->msg->abandoned)
+               return 1;
+
+       if (!chunk->has_tsn &&
+           !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
+               return 0;
+
        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
            time_after(jiffies, chunk->msg->expires_at)) {
                struct sctp_stream_out *streamout =
@@ -316,6 +324,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                        streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
                }
+               chunk->msg->abandoned = 1;
                return 1;
        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
                   chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
@@ -324,10 +333,12 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 
                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
                streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
+               chunk->msg->abandoned = 1;
                return 1;
        } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
                   chunk->msg->expires_at &&
                   time_after(jiffies, chunk->msg->expires_at)) {
+               chunk->msg->abandoned = 1;
                return 1;
        }
        /* PRIO policy is processed by sendmsg, not here */
index 3f619fdcbf0a0b4a6f35ece8021c011f874a2d79..291c97b07058218635fcfcd06214aa79d74ec80d 100644 (file)
@@ -78,6 +78,9 @@ const char *sctp_cname(const union sctp_subtype cid)
        case SCTP_CID_AUTH:
                return "AUTH";
 
+       case SCTP_CID_RECONF:
+               return "RECONF";
+
        default:
                break;
        }
index 4db012aa25f7a042f063bc17b56270effebc6cc6..7d67feeeffc1e758ae4be4ef1ddaea23276d1f5e 100644 (file)
@@ -364,10 +364,12 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
        list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
                struct sctp_stream_out *streamout;
 
-               if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
+               if (!chk->msg->abandoned &&
+                   (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
                        continue;
 
+               chk->msg->abandoned = 1;
                list_del_init(&chk->transmitted_list);
                sctp_insert_list(&asoc->outqueue.abandoned,
                                 &chk->transmitted_list);
@@ -377,7 +379,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
                asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
                streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
 
-               if (!chk->tsn_gap_acked) {
+               if (queue != &asoc->outqueue.retransmit &&
+                   !chk->tsn_gap_acked) {
                        if (chk->transport)
                                chk->transport->flight_size -=
                                                sctp_data_size(chk);
@@ -403,10 +406,13 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
        q->sched->unsched_all(&asoc->stream);
 
        list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
-               if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
-                   chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
+               if (!chk->msg->abandoned &&
+                   (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
+                    !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
                        continue;
 
+               chk->msg->abandoned = 1;
                sctp_sched_dequeue_common(q, chk);
                asoc->sent_cnt_removable--;
                asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@@ -1434,7 +1440,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                        /* If this chunk has not been acked, stop
                         * considering it as 'outstanding'.
                         */
-                       if (!tchunk->tsn_gap_acked) {
+                       if (transmitted_queue != &q->retransmit &&
+                           !tchunk->tsn_gap_acked) {
                                if (tchunk->transport)
                                        tchunk->transport->flight_size -=
                                                        sctp_data_size(tchunk);
index 014847e25648182dbf99d8fb095e094af76264bb..3253f724a995256084dcb1f6610de3384b475e79 100644 (file)
@@ -3891,13 +3891,17 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen < sizeof(struct sctp_reset_streams))
+       if (optlen < sizeof(*params))
                return -EINVAL;
 
        params = memdup_user(optval, optlen);
        if (IS_ERR(params))
                return PTR_ERR(params);
 
+       if (params->srs_number_streams * sizeof(__u16) >
+           optlen - sizeof(*params))
+               goto out;
+
        asoc = sctp_id2assoc(sk, params->srs_assoc_id);
        if (!asoc)
                goto out;
@@ -5080,7 +5084,6 @@ static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *p
        *newfile = sock_alloc_file(newsock, 0, NULL);
        if (IS_ERR(*newfile)) {
                put_unused_fd(retval);
-               sock_release(newsock);
                retval = PTR_ERR(*newfile);
                *newfile = NULL;
                return retval;
index a71be33f3afeb0aaaef174ee082c4c547aab1e2d..e36ec5dd64c6ff969fc30aae893d1d5ca8c221bf 100644 (file)
@@ -1084,29 +1084,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                      gfp_t gfp)
 {
-       struct sctp_association *asoc;
-       __u16 needed, freed;
-
-       asoc = ulpq->asoc;
+       struct sctp_association *asoc = ulpq->asoc;
+       __u32 freed = 0;
+       __u16 needed;
 
-       if (chunk) {
-               needed = ntohs(chunk->chunk_hdr->length);
-               needed -= sizeof(struct sctp_data_chunk);
-       } else
-               needed = SCTP_DEFAULT_MAXWINDOW;
-
-       freed = 0;
+       needed = ntohs(chunk->chunk_hdr->length) -
+                sizeof(struct sctp_data_chunk);
 
        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
                freed = sctp_ulpq_renege_order(ulpq, needed);
-               if (freed < needed) {
+               if (freed < needed)
                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
-               }
        }
        /* If able to free enough room, accept this chunk. */
-       if (chunk && (freed >= needed)) {
-               int retval;
-               retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+       if (freed >= needed) {
+               int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
                /*
                 * Enter partial delivery if chunk has not been
                 * delivered; otherwise, drain the reassembly queue.
index 42d8e9c9ccd5028793ebeb27fb319911a0f4ce35..05f361faec451cdd69168dd5e2cd7c2ca7c8f7fb 100644 (file)
@@ -406,8 +406,10 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
                name.len = strlen(name.name);
        }
        path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
-       if (unlikely(!path.dentry))
+       if (unlikely(!path.dentry)) {
+               sock_release(sock);
                return ERR_PTR(-ENOMEM);
+       }
        path.mnt = mntget(sock_mnt);
 
        d_instantiate(path.dentry, SOCK_INODE(sock));
@@ -415,9 +417,11 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
        file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
                  &socket_file_ops);
        if (IS_ERR(file)) {
-               /* drop dentry, keep inode */
+               /* drop dentry, keep inode for a bit */
                ihold(d_inode(path.dentry));
                path_put(&path);
+               /* ... and now kill it properly */
+               sock_release(sock);
                return file;
        }
 
@@ -1330,19 +1334,9 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
 
        retval = sock_create(family, type, protocol, &sock);
        if (retval < 0)
-               goto out;
-
-       retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
-       if (retval < 0)
-               goto out_release;
-
-out:
-       /* It may be already another descriptor 8) Not kernel problem. */
-       return retval;
+               return retval;
 
-out_release:
-       sock_release(sock);
-       return retval;
+       return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 }
 
 /*
@@ -1365,88 +1359,73 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
        if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
                flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
 
+       /*
+        * reserve descriptors and make sure we won't fail
+        * to return them to userland.
+        */
+       fd1 = get_unused_fd_flags(flags);
+       if (unlikely(fd1 < 0))
+               return fd1;
+
+       fd2 = get_unused_fd_flags(flags);
+       if (unlikely(fd2 < 0)) {
+               put_unused_fd(fd1);
+               return fd2;
+       }
+
+       err = put_user(fd1, &usockvec[0]);
+       if (err)
+               goto out;
+
+       err = put_user(fd2, &usockvec[1]);
+       if (err)
+               goto out;
+
        /*
         * Obtain the first socket and check if the underlying protocol
         * supports the socketpair call.
         */
 
        err = sock_create(family, type, protocol, &sock1);
-       if (err < 0)
+       if (unlikely(err < 0))
                goto out;
 
        err = sock_create(family, type, protocol, &sock2);
-       if (err < 0)
-               goto out_release_1;
-
-       err = sock1->ops->socketpair(sock1, sock2);
-       if (err < 0)
-               goto out_release_both;
-
-       fd1 = get_unused_fd_flags(flags);
-       if (unlikely(fd1 < 0)) {
-               err = fd1;
-               goto out_release_both;
+       if (unlikely(err < 0)) {
+               sock_release(sock1);
+               goto out;
        }
 
-       fd2 = get_unused_fd_flags(flags);
-       if (unlikely(fd2 < 0)) {
-               err = fd2;
-               goto out_put_unused_1;
+       err = sock1->ops->socketpair(sock1, sock2);
+       if (unlikely(err < 0)) {
+               sock_release(sock2);
+               sock_release(sock1);
+               goto out;
        }
 
        newfile1 = sock_alloc_file(sock1, flags, NULL);
        if (IS_ERR(newfile1)) {
                err = PTR_ERR(newfile1);
-               goto out_put_unused_both;
+               sock_release(sock2);
+               goto out;
        }
 
        newfile2 = sock_alloc_file(sock2, flags, NULL);
        if (IS_ERR(newfile2)) {
                err = PTR_ERR(newfile2);
-               goto out_fput_1;
+               fput(newfile1);
+               goto out;
        }
 
-       err = put_user(fd1, &usockvec[0]);
-       if (err)
-               goto out_fput_both;
-
-       err = put_user(fd2, &usockvec[1]);
-       if (err)
-               goto out_fput_both;
-
        audit_fd_pair(fd1, fd2);
 
        fd_install(fd1, newfile1);
        fd_install(fd2, newfile2);
-       /* fd1 and fd2 may be already another descriptors.
-        * Not kernel problem.
-        */
-
        return 0;
 
-out_fput_both:
-       fput(newfile2);
-       fput(newfile1);
-       put_unused_fd(fd2);
-       put_unused_fd(fd1);
-       goto out;
-
-out_fput_1:
-       fput(newfile1);
-       put_unused_fd(fd2);
-       put_unused_fd(fd1);
-       sock_release(sock2);
-       goto out;
-
-out_put_unused_both:
+out:
        put_unused_fd(fd2);
-out_put_unused_1:
        put_unused_fd(fd1);
-out_release_both:
-       sock_release(sock2);
-out_release_1:
-       sock_release(sock1);
-out:
        return err;
 }
 
@@ -1562,7 +1541,6 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
        if (IS_ERR(newfile)) {
                err = PTR_ERR(newfile);
                put_unused_fd(newfd);
-               sock_release(newsock);
                goto out_put;
        }
 
index c4778cae58ef12c191958261a50e58e5f67082e8..444380f968f1158660f6a01a10cd8223c9db6081 100644 (file)
@@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
                        goto out_free_groups;
                creds->cr_group_info->gid[i] = kgid;
        }
+       groups_sort(creds->cr_group_info);
 
        return 0;
 out_free_groups:
index 73165e9ca5bfd2c2a928f7d1c16569cd1b59e65a..26531193fce4d07f4b6d513093544b4a760f96ab 100644 (file)
@@ -264,7 +264,7 @@ out:
        return status;
 }
 
-static struct cache_detail rsi_cache_template = {
+static const struct cache_detail rsi_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = RSI_HASHMAX,
        .name           = "auth.rpcsec.init",
@@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
                                goto out;
                        rsci.cred.cr_group_info->gid[i] = kgid;
                }
+               groups_sort(rsci.cred.cr_group_info);
 
                /* mech name */
                len = qword_get(&mesg, buf, mlen);
@@ -524,7 +525,7 @@ out:
        return status;
 }
 
-static struct cache_detail rsc_cache_template = {
+static const struct cache_detail rsc_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = RSC_HASHMAX,
        .name           = "auth.rpcsec.context",
index 79d55d949d9a794a1501aee45f4807e76c7bfa1d..e68943895be48e36f6225280b5f7510abb541dec 100644 (file)
@@ -1674,7 +1674,7 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net)
 }
 EXPORT_SYMBOL_GPL(cache_unregister_net);
 
-struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
+struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
 {
        struct cache_detail *cd;
        int i;
index a801da812f8660246df1ac0949cf66c99e991907..e2a4184f3c5df94b953bbbfe8782edd185959f72 100644 (file)
@@ -1841,6 +1841,7 @@ call_bind_status(struct rpc_task *task)
        case -ECONNABORTED:
        case -ENOTCONN:
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -ENOBUFS:
@@ -1917,6 +1918,7 @@ call_connect_status(struct rpc_task *task)
                /* fall through */
        case -ECONNRESET:
        case -ECONNABORTED:
+       case -ENETDOWN:
        case -ENETUNREACH:
        case -EHOSTUNREACH:
        case -EADDRINUSE:
@@ -2022,6 +2024,7 @@ call_transmit_status(struct rpc_task *task)
                 */
        case -ECONNREFUSED:
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -EPERM:
@@ -2071,6 +2074,7 @@ call_bc_transmit(struct rpc_task *task)
        switch (task->tk_status) {
        case 0:
                /* Success */
+       case -ENETDOWN:
        case -EHOSTDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
@@ -2139,6 +2143,7 @@ call_status(struct rpc_task *task)
        task->tk_status = 0;
        switch(status) {
        case -EHOSTDOWN:
+       case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
        case -EPERM:
index e8e0831229cfcce48b2d6802493e80a429aa108b..f9307bd6644b704ad4e038dcd850ea13c062146f 100644 (file)
@@ -745,7 +745,7 @@ static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt
        serv->sv_tmpcnt++;
        if (serv->sv_temptimer.function == NULL) {
                /* setup timer to age temp transports */
-               serv->sv_temptimer.function = (TIMER_FUNC_TYPE)svc_age_temp_xprts;
+               serv->sv_temptimer.function = svc_age_temp_xprts;
                mod_timer(&serv->sv_temptimer,
                          jiffies + svc_conn_age_period * HZ);
        }
index f81eaa8e08888a1a16041548521a5908bf8a9a50..af7f28fb8102e4313f5ced6aa585e30f3911ca6c 100644 (file)
@@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
                ug.gi->gid[i] = kgid;
        }
 
+       groups_sort(ug.gi);
        ugp = unix_gid_lookup(cd, uid);
        if (ugp) {
                struct cache_head *ch;
@@ -569,7 +570,7 @@ static int unix_gid_show(struct seq_file *m,
        return 0;
 }
 
-static struct cache_detail unix_gid_cache_template = {
+static const struct cache_detail unix_gid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = GID_HASHMAX,
        .name           = "auth.unix.gid",
@@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
                kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
                cred->cr_group_info->gid[i] = kgid;
        }
+       groups_sort(cred->cr_group_info);
        if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
                *authp = rpc_autherr_badverf;
                return SVC_DENIED;
@@ -862,7 +864,7 @@ struct auth_ops svcauth_unix = {
        .set_client     = svcauth_unix_set_client,
 };
 
-static struct cache_detail ip_map_cache_template = {
+static const struct cache_detail ip_map_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = IP_HASHMAX,
        .name           = "auth.unix.ip",
index 333b9d697ae5373d00c6001b9c7f75f3d6c0ed91..33b74fd8405185d906d07e315c9b5a83775e747d 100644 (file)
@@ -1001,6 +1001,7 @@ void xprt_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       unsigned int connect_cookie;
        int status, numreqs;
 
        dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
@@ -1024,6 +1025,7 @@ void xprt_transmit(struct rpc_task *task)
        } else if (!req->rq_bytes_sent)
                return;
 
+       connect_cookie = xprt->connect_cookie;
        req->rq_xtime = ktime_get();
        status = xprt->ops->send_request(task);
        trace_xprt_transmit(xprt, req->rq_xid, status);
@@ -1047,20 +1049,28 @@ void xprt_transmit(struct rpc_task *task)
        xprt->stat.bklog_u += xprt->backlog.qlen;
        xprt->stat.sending_u += xprt->sending.qlen;
        xprt->stat.pending_u += xprt->pending.qlen;
+       spin_unlock_bh(&xprt->transport_lock);
 
-       /* Don't race with disconnect */
-       if (!xprt_connected(xprt))
-               task->tk_status = -ENOTCONN;
-       else {
+       req->rq_connect_cookie = connect_cookie;
+       if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) {
                /*
-                * Sleep on the pending queue since
-                * we're expecting a reply.
+                * Sleep on the pending queue if we're expecting a reply.
+                * The spinlock ensures atomicity between the test of
+                * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
                 */
-               if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
+               spin_lock(&xprt->recv_lock);
+               if (!req->rq_reply_bytes_recvd) {
                        rpc_sleep_on(&xprt->pending, task, xprt_timer);
-               req->rq_connect_cookie = xprt->connect_cookie;
+                       /*
+                        * Send an extra queue wakeup call if the
+                        * connection was dropped in case the call to
+                        * rpc_sleep_on() raced.
+                        */
+                       if (!xprt_connected(xprt))
+                               xprt_wake_pending_tasks(xprt, -ENOTCONN);
+               }
+               spin_unlock(&xprt->recv_lock);
        }
-       spin_unlock_bh(&xprt->transport_lock);
 }
 
 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
index ed34dc0f144cce537fce51dcba5bb12fe0b6df1c..a3f2ab283aeba38b26514dd9eb0e948c71a9ee7e 100644 (file)
@@ -1408,11 +1408,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
        dprintk("RPC:       %s: reply %p completes request %p (xid 0x%08x)\n",
                __func__, rep, req, be32_to_cpu(rep->rr_xid));
 
-       if (list_empty(&req->rl_registered) &&
-           !test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags))
-               rpcrdma_complete_rqst(rep);
-       else
-               queue_work(rpcrdma_receive_wq, &rep->rr_work);
+       queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
        return;
 
 out_badstatus:
index 646c24494ea7eba7fb2a2296ba6339e8dbf8f31e..6ee1ad8978f3b2977de2798d1a76ded1af6f78c4 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/sunrpc/addr.h>
+#include <linux/smp.h>
 
 #include "xprt_rdma.h"
 
@@ -656,6 +657,7 @@ xprt_rdma_allocate(struct rpc_task *task)
                task->tk_pid, __func__, rqst->rq_callsize,
                rqst->rq_rcvsize, req);
 
+       req->rl_cpu = smp_processor_id();
        req->rl_connect_cookie = 0;     /* our reserved value */
        rpcrdma_set_xprtdata(rqst, req);
        rqst->rq_buffer = req->rl_sendbuf->rg_base;
index 710b3f77db82869cd23abb90ea308ca67beef2bf..8607c029c0dd820250f4547c68bda41b7daca313 100644 (file)
@@ -83,7 +83,7 @@ rpcrdma_alloc_wq(void)
        struct workqueue_struct *recv_wq;
 
        recv_wq = alloc_workqueue("xprtrdma_receive",
-                                 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI,
+                                 WQ_MEM_RECLAIM | WQ_HIGHPRI,
                                  0);
        if (!recv_wq)
                return -ENOMEM;
index 51686d9eac5f992d9d23d674f94df0e77f58bb72..1342f743f1c41acae0145a49962825aa1574311c 100644 (file)
@@ -342,6 +342,7 @@ enum {
 struct rpcrdma_buffer;
 struct rpcrdma_req {
        struct list_head        rl_list;
+       int                     rl_cpu;
        unsigned int            rl_connect_cookie;
        struct rpcrdma_buffer   *rl_buffer;
        struct rpcrdma_rep      *rl_reply;
index 9cc850c2719e7da3216f3fdba66b752e99152b76..6d0cc3b8f932c35f47adb2b08a6a34a755ef8900 100644 (file)
@@ -2440,7 +2440,9 @@ static void xs_tcp_setup_socket(struct work_struct *work)
                 */
        case -ECONNREFUSED:
        case -ECONNRESET:
+       case -ENETDOWN:
        case -ENETUNREACH:
+       case -EHOSTUNREACH:
        case -EADDRINUSE:
        case -ENOBUFS:
                /*
index 47ec121574ce4ef95850f688d85b50eff766a710..c8001471da6c3c53be6c63dde1311302b093f415 100644 (file)
@@ -324,6 +324,7 @@ restart:
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
+               kfree(b);
                return -EINVAL;
        }
 
@@ -347,8 +348,10 @@ restart:
        if (skb)
                tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
 
-       if (tipc_mon_create(net, bearer_id))
+       if (tipc_mon_create(net, bearer_id)) {
+               bearer_disable(net, b);
                return -ENOMEM;
+       }
 
        pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
                name,
index 95fec2c057d6ebdb223e19ef83bf9c383cb2156e..8e12ab55346b0cf45b9244c470b36c5f81db30d2 100644 (file)
@@ -351,8 +351,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
        if (m->window >= ADV_IDLE)
                return;
 
-       if (!list_empty(&m->congested))
-               return;
+       list_del_init(&m->congested);
 
        /* Sort member into congested members' list */
        list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
@@ -369,18 +368,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
        u16 prev = grp->bc_snd_nxt - 1;
        struct tipc_member *m;
        struct rb_node *n;
+       u16 ackers = 0;
 
        for (n = rb_first(&grp->members); n; n = rb_next(n)) {
                m = container_of(n, struct tipc_member, tree_node);
                if (tipc_group_is_enabled(m)) {
                        tipc_group_update_member(m, len);
                        m->bc_acked = prev;
+                       ackers++;
                }
        }
 
        /* Mark number of acknowledges to expect, if any */
        if (ack)
-               grp->bc_ackers = grp->member_cnt;
+               grp->bc_ackers = ackers;
        grp->bc_snd_nxt++;
 }
 
@@ -648,6 +649,7 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
        } else if (mtyp == GRP_REMIT_MSG) {
                msg_set_grp_remitted(hdr, m->window);
        }
+       msg_set_dest_droppable(hdr, true);
        __skb_queue_tail(xmitq, skb);
 }
 
@@ -689,15 +691,16 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                        msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                        __skb_queue_tail(inputq, m->event_msg);
                }
-               if (m->window < ADV_IDLE)
-                       tipc_group_update_member(m, 0);
-               else
-                       list_del_init(&m->congested);
+               list_del_init(&m->congested);
+               tipc_group_update_member(m, 0);
                return;
        case GRP_LEAVE_MSG:
                if (!m)
                        return;
                m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+               list_del_init(&m->list);
+               list_del_init(&m->congested);
+               *usr_wakeup = true;
 
                /* Wait until WITHDRAW event is received */
                if (m->state != MBR_LEAVING) {
@@ -709,8 +712,6 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                ehdr = buf_msg(m->event_msg);
                msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                __skb_queue_tail(inputq, m->event_msg);
-               *usr_wakeup = true;
-               list_del_init(&m->congested);
                return;
        case GRP_ADV_MSG:
                if (!m)
@@ -849,19 +850,29 @@ void tipc_group_member_evt(struct tipc_group *grp,
                *usr_wakeup = true;
                m->usr_pending = false;
                node_up = tipc_node_is_up(net, node);
-
-               /* Hold back event if more messages might be expected */
-               if (m->state != MBR_LEAVING && node_up) {
-                       m->event_msg = skb;
-                       tipc_group_decr_active(grp, m);
-                       m->state = MBR_LEAVING;
-               } else {
-                       if (node_up)
+               m->event_msg = NULL;
+
+               if (node_up) {
+                       /* Hold back event if a LEAVE msg should be expected */
+                       if (m->state != MBR_LEAVING) {
+                               m->event_msg = skb;
+                               tipc_group_decr_active(grp, m);
+                               m->state = MBR_LEAVING;
+                       } else {
                                msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-                       else
+                               __skb_queue_tail(inputq, skb);
+                       }
+               } else {
+                       if (m->state != MBR_LEAVING) {
+                               tipc_group_decr_active(grp, m);
+                               m->state = MBR_LEAVING;
                                msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
+                       } else {
+                               msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
+                       }
                        __skb_queue_tail(inputq, skb);
                }
+               list_del_init(&m->list);
                list_del_init(&m->congested);
        }
        *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
index 8e884ed06d4b13d9751c27a51959b216b7f48b18..32dc33a94bc714f762a066389a4907e558244cd7 100644 (file)
@@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
 {
        struct tipc_net *tn = tipc_net(net);
        struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
-       struct tipc_peer *self = get_self(net, bearer_id);
+       struct tipc_peer *self;
        struct tipc_peer *peer, *tmp;
 
+       if (!mon)
+               return;
+
+       self = get_self(net, bearer_id);
        write_lock_bh(&mon->lock);
        tn->monitors[bearer_id] = NULL;
        list_for_each_entry_safe(peer, tmp, &self->list, list) {
index acaef80fb88cfca4ea569a003f52ca04a3e2f577..d60c303423275db5c0c7c45c9bf1a7d61987bf27 100644 (file)
@@ -314,6 +314,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
        newcon->usr_data = s->tipc_conn_new(newcon->conid);
        if (!newcon->usr_data) {
                sock_release(newsock);
+               conn_put(newcon);
                return -ENOMEM;
        }
 
@@ -511,7 +512,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
        s = con->server;
        scbr = s->tipc_conn_new(*conid);
        if (!scbr) {
-               tipc_close_conn(con);
+               conn_put(con);
                return false;
        }
 
index 5d18c0caa92b213740e5c6e3152ec8ce37717dc2..41127d0b925ea4d515e7c7bbe6739dee99a442f2 100644 (file)
@@ -1140,7 +1140,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                                __skb_dequeue(arrvq);
                                __skb_queue_tail(inputq, skb);
                        }
-                       refcount_dec(&skb->users);
+                       kfree_skb(skb);
                        spin_unlock_bh(&inputq->lock);
                        continue;
                }
index ecca64fc6a6f223bf8c0e09e3a299fe4fd62509d..3deabcab4882165b668f65319a3555027bf3b292 100644 (file)
@@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
                        goto rcu_out;
        }
 
-       tipc_rcv(sock_net(sk), skb, b);
-       rcu_read_unlock();
-       return 0;
-
 rcu_out:
        rcu_read_unlock();
 out:
index 5583df708b8cfedb61ca2e6fec93a79789d6f949..a827547aa102be4f3cf46a267c8c684e815e02d6 100644 (file)
@@ -487,7 +487,7 @@ static void hvs_release(struct vsock_sock *vsk)
 
        lock_sock(sk);
 
-       sk->sk_state = SS_DISCONNECTING;
+       sk->sk_state = TCP_CLOSING;
        vsock_remove_sock(vsk);
 
        release_sock(sk);
index 278d979c211a7e1f3581e2b33895f6ecfd160994..1d84f91bbfb0c8c9087e309821eb687325733358 100644 (file)
@@ -23,19 +23,36 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
 cfg80211-y += extra-certs.o
 endif
 
-$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509)
+$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
        @$(kecho) "  GEN     $@"
-       @echo '#include "reg.h"' > $@
-       @echo 'const u8 shipped_regdb_certs[] = {' >> $@
-       @for f in $^ ; do hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ ; done
-       @echo '};' >> $@
-       @echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);' >> $@
+       @(echo '#include "reg.h"'; \
+         echo 'const u8 shipped_regdb_certs[] = {'; \
+         cat $^ ; \
+         echo '};'; \
+         echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
+        ) > $@
 
 $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
                      $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
        @$(kecho) "  GEN     $@"
-       @echo '#include "reg.h"' > $@
-       @echo 'const u8 extra_regdb_certs[] = {' >> $@
-       @for f in $^ ; do test -f $$f && hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ || true ; done
-       @echo '};' >> $@
-       @echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);' >> $@
+       @(set -e; \
+         allf=""; \
+         for f in $^ ; do \
+             # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
+             thisf=$$(od -An -v -tx1 < $$f | \
+                          sed -e 's/ /\n/g' | \
+                          sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
+                          sed -e 's/^/0x/;s/$$/,/'); \
+             # file should not be empty - maybe command substitution failed? \
+             test ! -z "$$thisf";\
+             allf=$$allf$$thisf;\
+         done; \
+         ( \
+             echo '#include "reg.h"'; \
+             echo 'const u8 extra_regdb_certs[] = {'; \
+             echo "$$allf"; \
+             echo '};'; \
+             echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
+         ) > $@)
+
+clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/certs/sforshee.hex b/net/wireless/certs/sforshee.hex
new file mode 100644 (file)
index 0000000..14ea666
--- /dev/null
@@ -0,0 +1,86 @@
+/* Seth Forshee's regdb certificate */
+0x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c,
+0x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae,
+0xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
+0x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f,
+0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73,
+0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30,
+0x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30,
+0x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a,
+0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39,
+0x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35,
+0x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06,
+0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66,
+0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82,
+0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
+0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
+0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82,
+0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5,
+0x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2,
+0x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac,
+0x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c,
+0x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38,
+0x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d,
+0x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20,
+0x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b,
+0x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57,
+0x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b,
+0x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51,
+0x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a,
+0xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18,
+0x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98,
+0xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1,
+0x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28,
+0x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71,
+0x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a,
+0xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85,
+0xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30,
+0x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7,
+0x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65,
+0x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3,
+0x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18,
+0x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36,
+0x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1,
+0x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96,
+0x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c,
+0x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11,
+0x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7,
+0xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6,
+0xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0,
+0x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02,
+0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09,
+0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00,
+0x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf,
+0x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93,
+0x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7,
+0x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9,
+0x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3,
+0x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec,
+0xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0,
+0x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3,
+0x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4,
+0x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32,
+0xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74,
+0x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22,
+0x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86,
+0x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c,
+0xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06,
+0xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1,
+0x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58,
+0xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4,
+0xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72,
+0x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79,
+0xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a,
+0xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f,
+0x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47,
+0xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a,
+0x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28,
+0xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2,
+0xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87,
+0x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d,
+0x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc,
+0x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16,
+0x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f,
+0x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14,
diff --git a/net/wireless/certs/sforshee.x509 b/net/wireless/certs/sforshee.x509
deleted file mode 100644 (file)
index c6f8f9d..0000000
Binary files a/net/wireless/certs/sforshee.x509 and /dev/null differ
index 459611577d3dfa29f72442dfe1dcdfe4f2c6a502..801d4781a73b6724ce06f95c3be709bb9cd53174 100644 (file)
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(lib80211_crypto_lock);
 static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
                                          int force);
 static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
-static void lib80211_crypt_deinit_handler(unsigned long data);
+static void lib80211_crypt_deinit_handler(struct timer_list *t);
 
 int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
                                spinlock_t *lock)
@@ -55,8 +55,8 @@ int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
        info->lock = lock;
 
        INIT_LIST_HEAD(&info->crypt_deinit_list);
-       setup_timer(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
-                       (unsigned long)info);
+       timer_setup(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
+                   0);
 
        return 0;
 }
@@ -116,9 +116,10 @@ static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
        spin_unlock_irqrestore(info->lock, flags);
 }
 
-static void lib80211_crypt_deinit_handler(unsigned long data)
+static void lib80211_crypt_deinit_handler(struct timer_list *t)
 {
-       struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
+       struct lib80211_crypt_info *info = from_timer(info, t,
+                                                     crypt_deinit_timer);
        unsigned long flags;
 
        lib80211_crypt_deinit_entries(info, 0);
index b1ac23ca20c86be0af71e9a1ba92cc99d8d5a967..213d0c498c97d78b17c81d1fd8b850c8768f7057 100644 (file)
@@ -2610,7 +2610,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        case NL80211_IFTYPE_AP:
                if (wdev->ssid_len &&
                    nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
@@ -2623,7 +2623,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
                if (!ssid_ie)
                        break;
                if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                break;
                }
        default:
@@ -2635,6 +2635,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        genlmsg_end(msg, hdr);
        return 0;
 
+ nla_put_failure_locked:
+       wdev_unlock(wdev);
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
        return -EMSGSIZE;
index ea87143314f3048f8f08ae6974f7f9938e306350..562cc11131f6c8ba37cb5e4e09b1587dca3c8a2c 100644 (file)
@@ -415,7 +415,7 @@ static void __x25_destroy_socket(struct sock *sk)
        if (sk_has_allocations(sk)) {
                /* Defer: outstanding buffers */
                sk->sk_timer.expires  = jiffies + 10 * HZ;
-               sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_destroy_timer;
+               sk->sk_timer.function = x25_destroy_timer;
                add_timer(&sk->sk_timer);
        } else {
                /* drop last reference so sock_put will free */
index e0cd04d283527cde1c32d430b35cec56ebb02860..a6a8ab09b914660fcd600255cf6ff3b2dc46bc32 100644 (file)
@@ -36,7 +36,7 @@
 LIST_HEAD(x25_neigh_list);
 DEFINE_RWLOCK(x25_neigh_list_lock);
 
-static void x25_t20timer_expiry(unsigned long);
+static void x25_t20timer_expiry(struct timer_list *);
 
 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
 static void x25_transmit_restart_request(struct x25_neigh *nb);
@@ -49,9 +49,9 @@ static inline void x25_start_t20timer(struct x25_neigh *nb)
        mod_timer(&nb->t20timer, jiffies + nb->t20);
 }
 
-static void x25_t20timer_expiry(unsigned long param)
+static void x25_t20timer_expiry(struct timer_list *t)
 {
-       struct x25_neigh *nb = (struct x25_neigh *)param;
+       struct x25_neigh *nb = from_timer(nb, t, t20timer);
 
        x25_transmit_restart_request(nb);
 
@@ -252,7 +252,7 @@ void x25_link_device_up(struct net_device *dev)
                return;
 
        skb_queue_head_init(&nb->queue);
-       setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
+       timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
 
        dev_hold(dev);
        nb->dev      = dev;
index 1dfba3c23459e9f00904592857e74e730fd49216..fa3461002b3ea3bcdbabc4d195aef237b56a13f5 100644 (file)
@@ -36,7 +36,7 @@ void x25_init_timers(struct sock *sk)
        timer_setup(&x25->timer, x25_timer_expiry, 0);
 
        /* initialized by sock_init_data */
-       sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_heartbeat_expiry;
+       sk->sk_timer.function = x25_heartbeat_expiry;
 }
 
 void x25_start_heartbeat(struct sock *sk)
index 88d0a563e1413a12339eba6703c40eac3f119ac3..500b3391f474b96fe273060ff8eae16f1e23f3c2 100644 (file)
@@ -556,7 +556,7 @@ out:
        return HRTIMER_NORESTART;
 }
 
-static void xfrm_replay_timer_handler(unsigned long data);
+static void xfrm_replay_timer_handler(struct timer_list *t);
 
 struct xfrm_state *xfrm_state_alloc(struct net *net)
 {
@@ -574,8 +574,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
                INIT_HLIST_NODE(&x->byspi);
                tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
                                        CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
-               setup_timer(&x->rtimer, xfrm_replay_timer_handler,
-                               (unsigned long)x);
+               timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
                x->curlft.add_time = get_seconds();
                x->lft.soft_byte_limit = XFRM_INF;
                x->lft.soft_packet_limit = XFRM_INF;
@@ -1880,9 +1879,9 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
 }
 EXPORT_SYMBOL(xfrm_state_walk_done);
 
-static void xfrm_replay_timer_handler(unsigned long data)
+static void xfrm_replay_timer_handler(struct timer_list *t)
 {
-       struct xfrm_state *x = (struct xfrm_state *)data;
+       struct xfrm_state *x = from_timer(x, t, rtimer);
 
        spin_lock(&x->lock);
 
index 3b4945c1eab06aec48c326f6b1d822cb158461ff..adeaa1302f346a2c2af0843584d2e1803e8f66a6 100644 (file)
@@ -1,7 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := test_lru_dist
 hostprogs-y += sock_example
index 522ca9252d6cd41f5272b4755da876ff93cffdbb..242631aa4ea2366081711ba96284f189c9755569 100644 (file)
@@ -193,8 +193,18 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
                return -1;
        }
        event_fd[prog_cnt - 1] = efd;
-       ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
-       ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
+       err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
+       if (err < 0) {
+               printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
+                      strerror(errno));
+               return -1;
+       }
+       err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
+       if (err < 0) {
+               printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
+                      strerror(errno));
+               return -1;
+       }
 
        return 0;
 }
index f5c3012ffa795b676b7e4ff0bb63626844f1ad2e..dec1b22adf54afff9651f92eddb65a9aaa1e4d76 100644 (file)
@@ -1,7 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := hid-example
 
index 19a870eed82b398307404431947e86d727db1674..0e349b80686e76a421759b931ec04d194e446768 100644 (file)
@@ -1,7 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct
 
 HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
index 9291ab8e0f8c5a089425ae65f9ee20e718e0a9ff..73f1da4d116cf9a78a01cb3293d11f8a27a589cc 100644 (file)
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := sockmap
 
index 1f80a3d8cf45ca913e97ba5094443af07a12b6ae..59df7c25a9d1589caa4ad444768b33055c9f99f8 100644 (file)
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-$(CONFIG_SAMPLE_STATX) := test-statx
 
index c95a696560a7de59f3a7839ef85ec57d90c3aa29..8d7fd6190ac4e9f07cd2de96f9e144a3b73fdc5b 100644 (file)
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := uhid-example
 
index 65ea1e6aaaf6cdc30f0700c4f5d44138737ae84f..cb8997ed01497ccebdfded3aef8ccdd401fa8482 100644 (file)
@@ -76,7 +76,7 @@ lib-target := $(obj)/lib.a
 obj-y += $(obj)/lib-ksyms.o
 endif
 
-ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
+ifneq ($(strip $(obj-y) $(need-builtin)),)
 builtin-target := $(obj)/built-in.o
 endif
 
@@ -566,7 +566,7 @@ targets := $(filter-out $(PHONY), $(targets))
 
 PHONY += $(subdir-ym)
 $(subdir-ym):
-       $(Q)$(MAKE) $(build)=$@
+       $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1)
 
 # Add FORCE to the prequisites of a target to force it to be always rebuilt.
 # ---------------------------------------------------------------------------
index 08eb40a7729f35c25dbbca6c6f5bb7c5b1c0fe3b..1ca4dcd2d5005146a5c58b2385ac5f4c5ea0b8c1 100644 (file)
@@ -57,7 +57,7 @@ multi-objs-m := $(foreach m, $(multi-used-m), $($(m:.o=-objs)) $($(m:.o=-y)))
 subdir-obj-y := $(filter %/built-in.o, $(obj-y))
 
 # Replace multi-part objects by their individual parts, look at local dir only
-real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
+real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m)))
 real-objs-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m)))
 
 # DTB
index 6f099f915dcfe15a46541af65d39327e6494f21f..94b664817ad91e2e48c8fef6361a20ab2a632763 100755 (executable)
@@ -83,8 +83,11 @@ def print_result(symboltype, symbolformat, argc):
     for d, n in delta:
         if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
 
-    print("Total: Before=%d, After=%d, chg %+.2f%%" % \
-        (otot, ntot, (ntot - otot)*100.0/otot))
+    if otot:
+        percent = (ntot - otot) * 100.0 / otot
+    else:
+        percent = 0
+    print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent))
 
 if sys.argv[1] == "-c":
     print_result("Function", "tT", 3)
index 95cda3ecc66b859ec1c86762810bfbd200687ef0..31031f10fe56b0b15ec44916ca7996e174fd5090 100755 (executable)
@@ -5753,7 +5753,7 @@ sub process {
                        for (my $count = $linenr; $count <= $lc; $count++) {
                                my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
                                $fmt =~ s/%%//g;
-                               if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNO]).)/) {
+                               if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNOx]).)/) {
                                        $bad_extension = $1;
                                        last;
                                }
@@ -6233,28 +6233,6 @@ sub process {
                        }
                }
 
-# whine about ACCESS_ONCE
-               if ($^V && $^V ge 5.10.0 &&
-                   $line =~ /\bACCESS_ONCE\s*$balanced_parens\s*(=(?!=))?\s*($FuncArg)?/) {
-                       my $par = $1;
-                       my $eq = $2;
-                       my $fun = $3;
-                       $par =~ s/^\(\s*(.*)\s*\)$/$1/;
-                       if (defined($eq)) {
-                               if (WARN("PREFER_WRITE_ONCE",
-                                        "Prefer WRITE_ONCE(<FOO>, <BAR>) over ACCESS_ONCE(<FOO>) = <BAR>\n" . $herecurr) &&
-                                   $fix) {
-                                       $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)\s*$eq\s*\Q$fun\E/WRITE_ONCE($par, $fun)/;
-                               }
-                       } else {
-                               if (WARN("PREFER_READ_ONCE",
-                                        "Prefer READ_ONCE(<FOO>) over ACCESS_ONCE(<FOO>)\n" . $herecurr) &&
-                                   $fix) {
-                                       $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)/READ_ONCE($par)/;
-                               }
-                       }
-               }
-
 # check for mutex_trylock_recursive usage
                if ($line =~ /mutex_trylock_recursive/) {
                        ERROR("LOCKING",
index d5f28d5044e74e996a66311b691d9b5455b88389..ecfac64b39fe0c403d90e8700f92ca3aff400ead 100755 (executable)
@@ -30,12 +30,6 @@ else
        VERBOSE=0
 fi
 
-if [ -z "$J" ]; then
-       NPROC=$(getconf _NPROCESSORS_ONLN)
-else
-       NPROC="$J"
-fi
-
 FLAGS="--very-quiet"
 
 # You can use SPFLAGS to append extra arguments to coccicheck or override any
@@ -70,6 +64,9 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
     # Take only the last argument, which is the C file to test
     shift $(( $# - 1 ))
     OPTIONS="$COCCIINCLUDE $1"
+
+    # No need to parallelize Coccinelle since this mode takes one input file.
+    NPROC=1
 else
     ONLINE=0
     if [ "$KBUILD_EXTMOD" = "" ] ; then
@@ -77,6 +74,12 @@ else
     else
         OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
     fi
+
+    if [ -z "$J" ]; then
+        NPROC=$(getconf _NPROCESSORS_ONLN)
+    else
+        NPROC="$J"
+    fi
 fi
 
 if [ "$KBUILD_EXTMOD" != "" ] ; then
diff --git a/scripts/coccinelle/api/setup_timer.cocci b/scripts/coccinelle/api/setup_timer.cocci
deleted file mode 100644 (file)
index e457708..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/// Use setup_timer function instead of initializing timer with the function
-/// and data fields
-// Confidence: High
-// Copyright: (C) 2016 Vaishali Thakkar, Oracle. GPLv2
-// Copyright: (C) 2017 Kees Cook, Google. GPLv2
-// Options: --no-includes --include-headers
-// Keywords: init_timer, setup_timer
-
-virtual patch
-virtual context
-virtual org
-virtual report
-
-// Match the common cases first to avoid Coccinelle parsing loops with
-// "... when" clauses.
-
-@match_immediate_function_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e, func, da;
-@@
-
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-(
--\(e.function\|e->function\) = func;
--\(e.data\|e->data\) = da;
-|
--\(e.data\|e->data\) = da;
--\(e.function\|e->function\) = func;
-)
-
-@match_immediate_function_data_before_init_timer
-depends on patch && !context && !org && !report@
-expression e, func, da;
-@@
-
-(
--\(e.function\|e->function\) = func;
--\(e.data\|e->data\) = da;
-|
--\(e.data\|e->data\) = da;
--\(e.function\|e->function\) = func;
-)
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-
-@match_function_and_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e, e2, e3, e4, e5, func, da;
-@@
-
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
- ... when != func = e2
-     when != da = e3
-(
--e.function = func;
-... when != da = e4
--e.data = da;
-|
--e->function = func;
-... when != da = e4
--e->data = da;
-|
--e.data = da;
-... when != func = e5
--e.function = func;
-|
--e->data = da;
-... when != func = e5
--e->function = func;
-)
-
-@match_function_and_data_before_init_timer
-depends on patch && !context && !org && !report@
-expression e, e2, e3, e4, e5, func, da;
-@@
-(
--e.function = func;
-... when != da = e4
--e.data = da;
-|
--e->function = func;
-... when != da = e4
--e->data = da;
-|
--e.data = da;
-... when != func = e5
--e.function = func;
-|
--e->data = da;
-... when != func = e5
--e->function = func;
-)
-... when != func = e2
-    when != da = e3
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-
-@r1 exists@
-expression t;
-identifier f;
-position p;
-@@
-
-f(...) { ... when any
-  init_timer@p(\(&t\|t\))
-  ... when any
-}
-
-@r2 exists@
-expression r1.t;
-identifier g != r1.f;
-expression e8;
-@@
-
-g(...) { ... when any
-  \(t.data\|t->data\) = e8
-  ... when any
-}
-
-// It is dangerous to use setup_timer if data field is initialized
-// in another function.
-
-@script:python depends on r2@
-p << r1.p;
-@@
-
-cocci.include_match(False)
-
-@r3 depends on patch && !context && !org && !report@
-expression r1.t, func, e7;
-position r1.p;
-@@
-
-(
--init_timer@p(&t);
-+setup_timer(&t, func, 0UL);
-... when != func = e7
--t.function = func;
-|
--t.function = func;
-... when != func = e7
--init_timer@p(&t);
-+setup_timer(&t, func, 0UL);
-|
--init_timer@p(t);
-+setup_timer(t, func, 0UL);
-... when != func = e7
--t->function = func;
-|
--t->function = func;
-... when != func = e7
--init_timer@p(t);
-+setup_timer(t, func, 0UL);
-)
-
-// ----------------------------------------------------------------------------
-
-@match_immediate_function_data_after_init_timer_context
-depends on !patch && (context || org || report)@
-expression da, e, func;
-position j0, j1, j2;
-@@
-
-* init_timer@j0 (&e);
-(
-* e@j1.function = func;
-* e@j2.data = da;
-|
-* e@j1.data = da;
-* e@j2.function = func;
-)
-
-@match_function_and_data_after_init_timer_context
-depends on !patch && (context || org || report)@
-expression a, b, e1, e2, e3, e4, e5;
-position j0 != match_immediate_function_data_after_init_timer_context.j0,j1,j2;
-@@
-
-* init_timer@j0 (&e1);
-... when != a = e2
-    when != b = e3
-(
-* e1@j1.function = a;
-... when != b = e4
-* e1@j2.data = b;
-|
-* e1@j1.data = b;
-... when != a = e5
-* e1@j2.function = a;
-)
-
-@r3_context depends on !patch && (context || org || report)@
-expression c, e6, e7;
-position r1.p;
-position j0 !=
-  {match_immediate_function_data_after_init_timer_context.j0,
-   match_function_and_data_after_init_timer_context.j0}, j1;
-@@
-
-* init_timer@j0@p (&e6);
-... when != c = e7
-* e6@j1.function = c;
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_org
-depends on org@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-j2 << match_immediate_function_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python match_function_and_data_after_init_timer_org depends on org@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-j2 << match_function_and_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python r3_org depends on org@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_report
-depends on report@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python match_function_and_data_after_init_timer_report depends on report@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python r3_report depends on report@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
index 1f5ce959f5965b0249f70786e44f472581625955..7721d5b2b0c04ee923b3c14216214db68fcb7dd0 100755 (executable)
 set -o errexit
 set -o nounset
 
+READELF="${CROSS_COMPILE:-}readelf"
+ADDR2LINE="${CROSS_COMPILE:-}addr2line"
+SIZE="${CROSS_COMPILE:-}size"
+NM="${CROSS_COMPILE:-}nm"
+
 command -v awk >/dev/null 2>&1 || die "awk isn't installed"
-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
+command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
+command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
 
 usage() {
        echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
@@ -69,10 +76,10 @@ die() {
 find_dir_prefix() {
        local objfile=$1
 
-       local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+       local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
        [[ -z $start_kernel_addr ]] && return
 
-       local file_line=$(addr2line -e $objfile $start_kernel_addr)
+       local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
        [[ -z $file_line ]] && return
 
        local prefix=${file_line%init/main.c:*}
@@ -104,7 +111,7 @@ __faddr2line() {
 
        # Go through each of the object's symbols which match the func name.
        # In rare cases there might be duplicates.
-       file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
+       file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
        while read symbol; do
                local fields=($symbol)
                local sym_base=0x${fields[0]}
@@ -156,10 +163,10 @@ __faddr2line() {
 
                # pass real address to addr2line
                echo "$func+$offset/$sym_size:"
-               addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
+               ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
                DONE=1
 
-       done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
+       done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
 }
 
 [[ $# -lt 2 ]] && usage
index 20136ffefb23b814fa8b300b70a07b821b3ec71f..3c8bd9bb4267a874cd1fa112d4a21b569bd56296 100644 (file)
@@ -1061,7 +1061,7 @@ struct symbol **sym_re_search(const char *pattern)
        }
        if (sym_match_arr) {
                qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp);
-               sym_arr = malloc((cnt+1) * sizeof(struct symbol));
+               sym_arr = malloc((cnt+1) * sizeof(struct symbol *));
                if (!sym_arr)
                        goto sym_re_search_free;
                for (i = 0; i < cnt; i++)
index bd29a92b4b48aa1648f8c892c980f1a72c2ad67e..df0f045a9a89401e28b6ca36d6b38218f425ad85 100755 (executable)
@@ -3248,4 +3248,4 @@ if ($verbose && $warnings) {
   print STDERR "$warnings warnings\n";
 }
 
-exit($errors);
+exit($output_mode eq "none" ? 0 : $errors);
index 9ed96aefc72da9fcbe1f476727feea423aa23802..c23534925b38173d8d5adeeee99b298bd6053960 100644 (file)
@@ -39,14 +39,13 @@ if test "$(objtree)" != "$(srctree)"; then \
        false; \
 fi ; \
 $(srctree)/scripts/setlocalversion --save-scmversion; \
-ln -sf $(srctree) $(2); \
 tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
-       $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
-rm -f $(2) $(objtree)/.scmversion
+       --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
+rm -f $(objtree)/.scmversion
 
 # rpm-pkg
 # ---------------------------------------------------------------------------
-rpm-pkg rpm: FORCE
+rpm-pkg: FORCE
        $(MAKE) clean
        $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
        $(call cmd,src_tar,$(KERNELPATH),kernel.spec)
index 8542e9a55e1b8c718eca5f267a1bb71195599774..d4fa04d914395393289eaea5c26b344d7668325c 100644 (file)
@@ -2451,7 +2451,7 @@ static int __init aa_create_aafs(void)
        aafs_mnt = kern_mount(&aafs_ops);
        if (IS_ERR(aafs_mnt))
                panic("can't set apparmorfs up\n");
-       aafs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+       aafs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
 
        /* Populate fs tree. */
        error = entry_create_dir(&aa_sfs_entry, NULL);
index 620e811696592ddec6fb221e2bec07d436f258b4..4ac095118717022cfb3ea564b5ec37deaad91a90 100644 (file)
@@ -121,17 +121,19 @@ struct apparmor_audit_data {
                /* these entries require a custom callback fn */
                struct {
                        struct aa_label *peer;
-                       struct {
-                               const char *target;
-                               kuid_t ouid;
-                       } fs;
+                       union {
+                               struct {
+                                       const char *target;
+                                       kuid_t ouid;
+                               } fs;
+                               int signal;
+                       };
                };
                struct {
                        struct aa_profile *profile;
                        const char *ns;
                        long pos;
                } iface;
-               int signal;
                struct {
                        int rlim;
                        unsigned long max;
index f546707a2bbbe1c96a52c215baf492d70e23c00a..6505e1ad9e230605885f20f1e6f8df2029866cf2 100644 (file)
@@ -86,7 +86,7 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
 
 static inline bool path_mediated_fs(struct dentry *dentry)
 {
-       return !(dentry->d_sb->s_flags & MS_NOUSER);
+       return !(dentry->d_sb->s_flags & SB_NOUSER);
 }
 
 
index 6713fee893fb50ff1436a197202bed8e1b05a7cc..7207e6094dc1622c9a51beedc92288cdec244de1 100644 (file)
@@ -29,7 +29,7 @@ DECLARE_WORK(key_gc_work, key_garbage_collector);
 /*
  * Reaper for links from keyrings to dead keys.
  */
-static void key_gc_timer_func(unsigned long);
+static void key_gc_timer_func(struct timer_list *);
 static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
 
 static time64_t key_gc_next_run = TIME64_MAX;
@@ -84,7 +84,7 @@ void key_schedule_gc_links(void)
  * Some key's cleanup time was met after it expired, so we need to get the
  * reaper to go through a cycle finding expired keys.
  */
-static void key_gc_timer_func(unsigned long data)
+static void key_gc_timer_func(struct timer_list *unused)
 {
        kenter("");
        key_gc_next_run = TIME64_MAX;
index 66049183ad8961554cbf0a4ea0ccf403bd0b7579..d97c9394b5dd4f7479e9211a2dbb236503e05d2b 100644 (file)
@@ -833,7 +833,6 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
 
        key_check(keyring);
 
-       key_ref = ERR_PTR(-EPERM);
        if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
                restrict_link = keyring->restrict_link;
 
index 76d22f726ae49d7e112c648c59e9c0d3124063f1..1ffe60bb2845f97638157b01ed7fcc4f45714312 100644 (file)
@@ -1588,9 +1588,8 @@ error_keyring:
  * The caller must have Setattr permission to change keyring restrictions.
  *
  * The requested type name may be a NULL pointer to reject all attempts
- * to link to the keyring. If _type is non-NULL, _restriction can be
- * NULL or a pointer to a string describing the restriction. If _type is
- * NULL, _restriction must also be NULL.
+ * to link to the keyring.  In this case, _restriction must also be NULL.
+ * Otherwise, both _type and _restriction must be non-NULL.
  *
  * Returns 0 if successful.
  */
@@ -1598,7 +1597,6 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
                             const char __user *_restriction)
 {
        key_ref_t key_ref;
-       bool link_reject = !_type;
        char type[32];
        char *restriction = NULL;
        long ret;
@@ -1607,31 +1605,29 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
        if (IS_ERR(key_ref))
                return PTR_ERR(key_ref);
 
+       ret = -EINVAL;
        if (_type) {
-               ret = key_get_type_from_user(type, _type, sizeof(type));
-               if (ret < 0)
+               if (!_restriction)
                        goto error;
-       }
 
-       if (_restriction) {
-               if (!_type) {
-                       ret = -EINVAL;
+               ret = key_get_type_from_user(type, _type, sizeof(type));
+               if (ret < 0)
                        goto error;
-               }
 
                restriction = strndup_user(_restriction, PAGE_SIZE);
                if (IS_ERR(restriction)) {
                        ret = PTR_ERR(restriction);
                        goto error;
                }
+       } else {
+               if (_restriction)
+                       goto error;
        }
 
-       ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction);
+       ret = keyring_restrict(key_ref, _type ? type : NULL, restriction);
        kfree(restriction);
-
 error:
        key_ref_put(key_ref);
-
        return ret;
 }
 
index e8036cd0ad5430a87ec2e2ea1496e921ae941b3d..114f7408feee620b868801cbb53b578e7b44f615 100644 (file)
@@ -251,11 +251,12 @@ static int construct_key(struct key *key, const void *callout_info,
  * The keyring selected is returned with an extra reference upon it which the
  * caller must release.
  */
-static void construct_get_dest_keyring(struct key **_dest_keyring)
+static int construct_get_dest_keyring(struct key **_dest_keyring)
 {
        struct request_key_auth *rka;
        const struct cred *cred = current_cred();
        struct key *dest_keyring = *_dest_keyring, *authkey;
+       int ret;
 
        kenter("%p", dest_keyring);
 
@@ -264,6 +265,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
                /* the caller supplied one */
                key_get(dest_keyring);
        } else {
+               bool do_perm_check = true;
+
                /* use a default keyring; falling through the cases until we
                 * find one that we actually have */
                switch (cred->jit_keyring) {
@@ -278,8 +281,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
                                        dest_keyring =
                                                key_get(rka->dest_keyring);
                                up_read(&authkey->sem);
-                               if (dest_keyring)
+                               if (dest_keyring) {
+                                       do_perm_check = false;
                                        break;
+                               }
                        }
 
                case KEY_REQKEY_DEFL_THREAD_KEYRING:
@@ -314,11 +319,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
                default:
                        BUG();
                }
+
+               /*
+                * Require Write permission on the keyring.  This is essential
+                * because the default keyring may be the session keyring, and
+                * joining a keyring only requires Search permission.
+                *
+                * However, this check is skipped for the "requestor keyring" so
+                * that /sbin/request-key can itself use request_key() to add
+                * keys to the original requestor's destination keyring.
+                */
+               if (dest_keyring && do_perm_check) {
+                       ret = key_permission(make_key_ref(dest_keyring, 1),
+                                            KEY_NEED_WRITE);
+                       if (ret) {
+                               key_put(dest_keyring);
+                               return ret;
+                       }
+               }
        }
 
        *_dest_keyring = dest_keyring;
        kleave(" [dk %d]", key_serial(dest_keyring));
-       return;
+       return 0;
 }
 
 /*
@@ -444,11 +467,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
        if (ctx->index_key.type == &key_type_keyring)
                return ERR_PTR(-EPERM);
 
-       user = key_user_lookup(current_fsuid());
-       if (!user)
-               return ERR_PTR(-ENOMEM);
+       ret = construct_get_dest_keyring(&dest_keyring);
+       if (ret)
+               goto error;
 
-       construct_get_dest_keyring(&dest_keyring);
+       user = key_user_lookup(current_fsuid());
+       if (!user) {
+               ret = -ENOMEM;
+               goto error_put_dest_keyring;
+       }
 
        ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
        key_user_put(user);
@@ -463,7 +490,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
        } else if (ret == -EINPROGRESS) {
                ret = 0;
        } else {
-               goto couldnt_alloc_key;
+               goto error_put_dest_keyring;
        }
 
        key_put(dest_keyring);
@@ -473,8 +500,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
 construction_failed:
        key_negate_and_link(key, key_negative_timeout, NULL, NULL);
        key_put(key);
-couldnt_alloc_key:
+error_put_dest_keyring:
        key_put(dest_keyring);
+error:
        kleave(" = %d", ret);
        return ERR_PTR(ret);
 }
@@ -546,9 +574,7 @@ struct key *request_key_and_link(struct key_type *type,
        if (!IS_ERR(key_ref)) {
                key = key_ref_to_ptr(key_ref);
                if (dest_keyring) {
-                       construct_get_dest_keyring(&dest_keyring);
                        ret = key_link(dest_keyring, key);
-                       key_put(dest_keyring);
                        if (ret < 0) {
                                key_put(key);
                                key = ERR_PTR(ret);
index 9070f277f8db99421eadf7b55ef2c5c675cb50b3..09ee8c6b9f75e3828663c6b8874e93506d450ac9 100644 (file)
@@ -153,7 +153,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
                                err = -ENXIO;
                                goto _error;
                        }
+                       mutex_lock(&pcm->open_mutex);
                        err = snd_pcm_info_user(substream, info);
+                       mutex_unlock(&pcm->open_mutex);
                _error:
                        mutex_unlock(&register_mutex);
                        return err;
index b3b353d7252724e10f23a9288cd24aab3ef34007..f055ca10bbc1d33c9c1cee1fd913b7c930984ac1 100644 (file)
@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
        return 0;
 }
 
-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+static int __snd_rawmidi_info_select(struct snd_card *card,
+                                    struct snd_rawmidi_info *info)
 {
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_str *pstr;
        struct snd_rawmidi_substream *substream;
 
-       mutex_lock(&register_mutex);
        rmidi = snd_rawmidi_search(card, info->device);
-       mutex_unlock(&register_mutex);
        if (!rmidi)
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
        }
        return -ENXIO;
 }
+
+int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+{
+       int ret;
+
+       mutex_lock(&register_mutex);
+       ret = __snd_rawmidi_info_select(card, info);
+       mutex_unlock(&register_mutex);
+       return ret;
+}
 EXPORT_SYMBOL(snd_rawmidi_info_select);
 
 static int snd_rawmidi_info_select_user(struct snd_card *card,
index 37d9cfbc29f9c829facd29ffdc2224ada7a63efd..b80985fbc334cc6598d7ca2953d1ce79edb9f68b 100644 (file)
@@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
        unsigned long freq;
 
        t = tmr->timeri->timer;
-       if (snd_BUG_ON(!t))
+       if (!t)
                return -EINVAL;
 
        freq = tmr->preferred_resolution;
index c19c81d230bd7423b4153d2266a45e09333f8714..b4f1b6e88305496f91d028ceb82fe9b8a6a60ccb 100644 (file)
@@ -55,10 +55,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
 #define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
 #define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
                                ((codec)->core.vendor_id == 0x80862800))
+#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
 #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
                                || is_skylake(codec) || is_broxton(codec) \
-                               || is_kabylake(codec)) || is_geminilake(codec)
-
+                               || is_kabylake(codec)) || is_geminilake(codec) \
+                               || is_cannonlake(codec)
 #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
 #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
 #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -3841,6 +3842,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI",     patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI",   patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
index 921a10eff43a36ad76bb1631d6de41f52df32c17..6a4db00511ab14593e8a0d33500c547a2e9656ae 100644 (file)
@@ -330,6 +330,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0236:
        case 0x10ec0255:
        case 0x10ec0256:
+       case 0x10ec0257:
        case 0x10ec0282:
        case 0x10ec0283:
        case 0x10ec0286:
@@ -2772,6 +2773,7 @@ enum {
        ALC269_TYPE_ALC298,
        ALC269_TYPE_ALC255,
        ALC269_TYPE_ALC256,
+       ALC269_TYPE_ALC257,
        ALC269_TYPE_ALC215,
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
@@ -2805,6 +2807,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC298:
        case ALC269_TYPE_ALC255:
        case ALC269_TYPE_ALC256:
+       case ALC269_TYPE_ALC257:
        case ALC269_TYPE_ALC215:
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
@@ -5182,6 +5185,22 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
        }
 }
 
+/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
+static void alc274_fixup_bind_dacs(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       static hda_nid_t preferred_pairs[] = {
+               0x21, 0x03, 0x1b, 0x03, 0x16, 0x02,
+               0
+       };
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       spec->gen.preferred_dacs = preferred_pairs;
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -5299,6 +5318,8 @@ enum {
        ALC233_FIXUP_LENOVO_MULTI_CODECS,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC700_FIXUP_INTEL_REFERENCE,
+       ALC274_FIXUP_DELL_BIND_DACS,
+       ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6109,6 +6130,21 @@ static const struct hda_fixup alc269_fixups[] = {
                        {}
                }
        },
+       [ALC274_FIXUP_DELL_BIND_DACS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc274_fixup_bind_dacs,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
+       [ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x0401102f },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC274_FIXUP_DELL_BIND_DACS
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6575,7 +6611,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
-       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
                {0x16, 0x90170110},
@@ -6867,6 +6903,10 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
                break;
+       case 0x10ec0257:
+               spec->codec_variant = ALC269_TYPE_ALC257;
+               spec->gen.mixer_nid = 0;
+               break;
        case 0x10ec0215:
        case 0x10ec0285:
        case 0x10ec0289:
@@ -7914,6 +7954,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
        HDA_CODEC_ENTRY(0x10ec0262, "ALC262", patch_alc262),
        HDA_CODEC_ENTRY(0x10ec0267, "ALC267", patch_alc268),
index 4f9613e5fc9ec2a967456d5d84e67bdc5347f239..c1376bfdc90b2add14a7f9e2804b4e6ac6c9e534 100644 (file)
@@ -201,7 +201,7 @@ static int line6_send_raw_message_async_part(struct message *msg,
 void line6_start_timer(struct timer_list *timer, unsigned long msecs,
                       void (*function)(struct timer_list *t))
 {
-       timer->function = (TIMER_FUNC_TYPE)function;
+       timer->function = function;
        mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL_GPL(line6_start_timer);
index 0537c632299082eb1c40617491a77c964fb9c011..2b4ceda36291c01c6cca69d3a1cacd6c23014f40 100644 (file)
@@ -204,6 +204,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
                                    int index, char *buf, int maxlen)
 {
        int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
+
+       if (len < 0)
+               return 0;
+
        buf[len] = 0;
        return len;
 }
@@ -1476,9 +1480,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
                        return -EINVAL;
                }
                csize = hdr->bControlSize;
-               if (csize <= 1) {
+               if (!csize) {
                        usb_audio_dbg(state->chip,
-                                     "unit %u: invalid bControlSize <= 1\n",
+                                     "unit %u: invalid bControlSize == 0\n",
                                      unitid);
                        return -EINVAL;
                }
@@ -2169,19 +2173,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
        kctl->private_value = (unsigned long)namelist;
        kctl->private_free = usb_mixer_selector_elem_free;
 
-       nameid = uac_selector_unit_iSelector(desc);
+       /* check the static mapping table at first */
        len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
-       if (len)
-               ;
-       else if (nameid)
-               snd_usb_copy_string_desc(state, nameid, kctl->id.name,
-                                        sizeof(kctl->id.name));
-       else {
-               len = get_term_name(state, &state->oterm,
+       if (!len) {
+               /* no mapping ? */
+               /* if iSelector is given, use it */
+               nameid = uac_selector_unit_iSelector(desc);
+               if (nameid)
+                       len = snd_usb_copy_string_desc(state, nameid,
+                                                      kctl->id.name,
+                                                      sizeof(kctl->id.name));
+               /* ... or pick up the terminal name at next */
+               if (!len)
+                       len = get_term_name(state, &state->oterm,
                                    kctl->id.name, sizeof(kctl->id.name), 0);
+               /* ... or use the fixed string "USB" as the last resort */
                if (!len)
                        strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
 
+               /* and add the proper suffix */
                if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
                        append_ctl_name(kctl, " Clock Source");
                else if ((state->oterm.type & 0xff00) == 0x0100)
index 77eecaa4db1f32c9b7af87273c599181bf307443..a66ef5777887a78d7416e64c049c73b26477c7f7 100644 (file)
@@ -1166,10 +1166,11 @@ static bool is_marantz_denon_dac(unsigned int id)
 /* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
  * between PCM/DOP and native DSD mode
  */
-static bool is_teac_50X_dac(unsigned int id)
+static bool is_teac_dsd_dac(unsigned int id)
 {
        switch (id) {
        case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+       case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
                return true;
        }
        return false;
@@ -1202,7 +1203,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
                        break;
                }
                mdelay(20);
-       } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+       } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
                /* Vendor mode switch cmd is required. */
                switch (fmt->altsetting) {
                case 3: /* DSD mode (DSD_U32) requested */
@@ -1392,7 +1393,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        }
 
        /* TEAC devices with USB DAC functionality */
-       if (is_teac_50X_dac(chip->usb_id)) {
+       if (is_teac_dsd_dac(chip->usb_id)) {
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
        }
index 1f57bbe82b6fb8582c2a3a1617345266c22e33e8..6edd177bb1c7c66e0ec32caf7ec8d2c3680ed2f3 100644 (file)
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
        (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
 #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
 
+/* PL1 Physical Timer Registers */
+#define KVM_REG_ARM_PTIMER_CTL         ARM_CP15_REG32(0, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CNT         ARM_CP15_REG64(0, 14)
+#define KVM_REG_ARM_PTIMER_CVAL                ARM_CP15_REG64(2, 14)
+
+/* Virtual Timer Registers */
 #define KVM_REG_ARM_TIMER_CTL          ARM_CP15_REG32(0, 14, 3, 1)
 #define KVM_REG_ARM_TIMER_CNT          ARM_CP15_REG64(1, 14)
 #define KVM_REG_ARM_TIMER_CVAL         ARM_CP15_REG64(3, 14)
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
 #define   KVM_DEV_ARM_ITS_SAVE_TABLES          1
 #define   KVM_DEV_ARM_ITS_RESTORE_TABLES       2
 #define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define   KVM_DEV_ARM_ITS_CTRL_RESET           4
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 51149ec75fe480b324fd74d2697579a936438fdc..9abbf30446545a0668083b0891461f015563bcb1 100644 (file)
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
 
 #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
 
+/* Physical Timer EL0 Registers */
+#define KVM_REG_ARM_PTIMER_CTL         ARM64_SYS_REG(3, 3, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CVAL                ARM64_SYS_REG(3, 3, 14, 2, 2)
+#define KVM_REG_ARM_PTIMER_CNT         ARM64_SYS_REG(3, 3, 14, 0, 1)
+
+/* EL0 Virtual Timer Registers */
 #define KVM_REG_ARM_TIMER_CTL          ARM64_SYS_REG(3, 3, 14, 3, 1)
 #define KVM_REG_ARM_TIMER_CNT          ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL         ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
 #define   KVM_DEV_ARM_ITS_SAVE_TABLES           1
 #define   KVM_DEV_ARM_ITS_RESTORE_TABLES        2
 #define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define   KVM_DEV_ARM_ITS_CTRL_RESET           4
 
 /* Device Control API on vcpu fd */
 #define KVM_ARM_VCPU_PMU_V3_CTRL       0
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..0a8e37a
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include "ptrace.h"
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 9ad172dcd912d5763b0bf954617c9e398ad31aa8..38535a57fef8327c3b08bf20e1f8621fd93c776a 100644 (file)
@@ -6,10 +6,6 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index c36c97ffdc6fa24f246c41bfe993790410e4e032..84606b8cc49e47c794fb3b16ef5681de098bfc6f 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright 2014 IBM Corp.
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef __LINUX_KVM_PERF_S390_H
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
new file mode 100644 (file)
index 0000000..543dd70
--- /dev/null
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *  S390 version
+ *    Copyright IBM Corp. 1999, 2000
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef _UAPI_S390_PTRACE_H
+#define _UAPI_S390_PTRACE_H
+
+/*
+ * Offsets in the user_regs_struct. They are used for the ptrace
+ * system call and in entry.S
+ */
+#ifndef __s390x__
+
+#define PT_PSWMASK  0x00
+#define PT_PSWADDR  0x04
+#define PT_GPR0     0x08
+#define PT_GPR1     0x0C
+#define PT_GPR2     0x10
+#define PT_GPR3     0x14
+#define PT_GPR4     0x18
+#define PT_GPR5     0x1C
+#define PT_GPR6     0x20
+#define PT_GPR7     0x24
+#define PT_GPR8     0x28
+#define PT_GPR9     0x2C
+#define PT_GPR10    0x30
+#define PT_GPR11    0x34
+#define PT_GPR12    0x38
+#define PT_GPR13    0x3C
+#define PT_GPR14    0x40
+#define PT_GPR15    0x44
+#define PT_ACR0     0x48
+#define PT_ACR1     0x4C
+#define PT_ACR2     0x50
+#define PT_ACR3     0x54
+#define PT_ACR4            0x58
+#define PT_ACR5            0x5C
+#define PT_ACR6            0x60
+#define PT_ACR7            0x64
+#define PT_ACR8            0x68
+#define PT_ACR9            0x6C
+#define PT_ACR10    0x70
+#define PT_ACR11    0x74
+#define PT_ACR12    0x78
+#define PT_ACR13    0x7C
+#define PT_ACR14    0x80
+#define PT_ACR15    0x84
+#define PT_ORIGGPR2 0x88
+#define PT_FPC     0x90
+/*
+ * A nasty fact of life that the ptrace api
+ * only supports passing of longs.
+ */
+#define PT_FPR0_HI  0x98
+#define PT_FPR0_LO  0x9C
+#define PT_FPR1_HI  0xA0
+#define PT_FPR1_LO  0xA4
+#define PT_FPR2_HI  0xA8
+#define PT_FPR2_LO  0xAC
+#define PT_FPR3_HI  0xB0
+#define PT_FPR3_LO  0xB4
+#define PT_FPR4_HI  0xB8
+#define PT_FPR4_LO  0xBC
+#define PT_FPR5_HI  0xC0
+#define PT_FPR5_LO  0xC4
+#define PT_FPR6_HI  0xC8
+#define PT_FPR6_LO  0xCC
+#define PT_FPR7_HI  0xD0
+#define PT_FPR7_LO  0xD4
+#define PT_FPR8_HI  0xD8
+#define PT_FPR8_LO  0XDC
+#define PT_FPR9_HI  0xE0
+#define PT_FPR9_LO  0xE4
+#define PT_FPR10_HI 0xE8
+#define PT_FPR10_LO 0xEC
+#define PT_FPR11_HI 0xF0
+#define PT_FPR11_LO 0xF4
+#define PT_FPR12_HI 0xF8
+#define PT_FPR12_LO 0xFC
+#define PT_FPR13_HI 0x100
+#define PT_FPR13_LO 0x104
+#define PT_FPR14_HI 0x108
+#define PT_FPR14_LO 0x10C
+#define PT_FPR15_HI 0x110
+#define PT_FPR15_LO 0x114
+#define PT_CR_9            0x118
+#define PT_CR_10    0x11C
+#define PT_CR_11    0x120
+#define PT_IEEE_IP  0x13C
+#define PT_LASTOFF  PT_IEEE_IP
+#define PT_ENDREGS  0x140-1
+
+#define GPR_SIZE       4
+#define CR_SIZE                4
+
+#define STACK_FRAME_OVERHEAD   96      /* size of minimum stack frame */
+
+#else /* __s390x__ */
+
+#define PT_PSWMASK  0x00
+#define PT_PSWADDR  0x08
+#define PT_GPR0     0x10
+#define PT_GPR1     0x18
+#define PT_GPR2     0x20
+#define PT_GPR3     0x28
+#define PT_GPR4     0x30
+#define PT_GPR5     0x38
+#define PT_GPR6     0x40
+#define PT_GPR7     0x48
+#define PT_GPR8     0x50
+#define PT_GPR9     0x58
+#define PT_GPR10    0x60
+#define PT_GPR11    0x68
+#define PT_GPR12    0x70
+#define PT_GPR13    0x78
+#define PT_GPR14    0x80
+#define PT_GPR15    0x88
+#define PT_ACR0     0x90
+#define PT_ACR1     0x94
+#define PT_ACR2     0x98
+#define PT_ACR3     0x9C
+#define PT_ACR4            0xA0
+#define PT_ACR5            0xA4
+#define PT_ACR6            0xA8
+#define PT_ACR7            0xAC
+#define PT_ACR8            0xB0
+#define PT_ACR9            0xB4
+#define PT_ACR10    0xB8
+#define PT_ACR11    0xBC
+#define PT_ACR12    0xC0
+#define PT_ACR13    0xC4
+#define PT_ACR14    0xC8
+#define PT_ACR15    0xCC
+#define PT_ORIGGPR2 0xD0
+#define PT_FPC     0xD8
+#define PT_FPR0     0xE0
+#define PT_FPR1     0xE8
+#define PT_FPR2     0xF0
+#define PT_FPR3     0xF8
+#define PT_FPR4     0x100
+#define PT_FPR5     0x108
+#define PT_FPR6     0x110
+#define PT_FPR7     0x118
+#define PT_FPR8     0x120
+#define PT_FPR9     0x128
+#define PT_FPR10    0x130
+#define PT_FPR11    0x138
+#define PT_FPR12    0x140
+#define PT_FPR13    0x148
+#define PT_FPR14    0x150
+#define PT_FPR15    0x158
+#define PT_CR_9     0x160
+#define PT_CR_10    0x168
+#define PT_CR_11    0x170
+#define PT_IEEE_IP  0x1A8
+#define PT_LASTOFF  PT_IEEE_IP
+#define PT_ENDREGS  0x1B0-1
+
+#define GPR_SIZE       8
+#define CR_SIZE                8
+
+#define STACK_FRAME_OVERHEAD   160      /* size of minimum stack frame */
+
+#endif /* __s390x__ */
+
+#define NUM_GPRS       16
+#define NUM_FPRS       16
+#define NUM_CRS                16
+#define NUM_ACRS       16
+
+#define NUM_CR_WORDS   3
+
+#define FPR_SIZE       8
+#define FPC_SIZE       4
+#define FPC_PAD_SIZE   4 /* gcc insists on aligning the fpregs */
+#define ACR_SIZE       4
+
+
+#define PTRACE_OLDSETOPTIONS        21
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+typedef union {
+       float   f;
+       double  d;
+       __u64   ui;
+       struct
+       {
+               __u32 hi;
+               __u32 lo;
+       } fp;
+} freg_t;
+
+typedef struct {
+       __u32   fpc;
+       __u32   pad;
+       freg_t  fprs[NUM_FPRS];
+} s390_fp_regs;
+
+#define FPC_EXCEPTION_MASK     0xF8000000
+#define FPC_FLAGS_MASK         0x00F80000
+#define FPC_DXC_MASK           0x0000FF00
+#define FPC_RM_MASK            0x00000003
+
+/* this typedef defines how a Program Status Word looks like */
+typedef struct {
+       unsigned long mask;
+       unsigned long addr;
+} __attribute__ ((aligned(8))) psw_t;
+
+#ifndef __s390x__
+
+#define PSW_MASK_PER           0x40000000UL
+#define PSW_MASK_DAT           0x04000000UL
+#define PSW_MASK_IO            0x02000000UL
+#define PSW_MASK_EXT           0x01000000UL
+#define PSW_MASK_KEY           0x00F00000UL
+#define PSW_MASK_BASE          0x00080000UL    /* always one */
+#define PSW_MASK_MCHECK                0x00040000UL
+#define PSW_MASK_WAIT          0x00020000UL
+#define PSW_MASK_PSTATE                0x00010000UL
+#define PSW_MASK_ASC           0x0000C000UL
+#define PSW_MASK_CC            0x00003000UL
+#define PSW_MASK_PM            0x00000F00UL
+#define PSW_MASK_RI            0x00000000UL
+#define PSW_MASK_EA            0x00000000UL
+#define PSW_MASK_BA            0x00000000UL
+
+#define PSW_MASK_USER          0x0000FF00UL
+
+#define PSW_ADDR_AMODE         0x80000000UL
+#define PSW_ADDR_INSN          0x7FFFFFFFUL
+
+#define PSW_DEFAULT_KEY                (((unsigned long) PAGE_DEFAULT_ACC) << 20)
+
+#define PSW_ASC_PRIMARY                0x00000000UL
+#define PSW_ASC_ACCREG         0x00004000UL
+#define PSW_ASC_SECONDARY      0x00008000UL
+#define PSW_ASC_HOME           0x0000C000UL
+
+#else /* __s390x__ */
+
+#define PSW_MASK_PER           0x4000000000000000UL
+#define PSW_MASK_DAT           0x0400000000000000UL
+#define PSW_MASK_IO            0x0200000000000000UL
+#define PSW_MASK_EXT           0x0100000000000000UL
+#define PSW_MASK_BASE          0x0000000000000000UL
+#define PSW_MASK_KEY           0x00F0000000000000UL
+#define PSW_MASK_MCHECK                0x0004000000000000UL
+#define PSW_MASK_WAIT          0x0002000000000000UL
+#define PSW_MASK_PSTATE                0x0001000000000000UL
+#define PSW_MASK_ASC           0x0000C00000000000UL
+#define PSW_MASK_CC            0x0000300000000000UL
+#define PSW_MASK_PM            0x00000F0000000000UL
+#define PSW_MASK_RI            0x0000008000000000UL
+#define PSW_MASK_EA            0x0000000100000000UL
+#define PSW_MASK_BA            0x0000000080000000UL
+
+#define PSW_MASK_USER          0x0000FF0180000000UL
+
+#define PSW_ADDR_AMODE         0x0000000000000000UL
+#define PSW_ADDR_INSN          0xFFFFFFFFFFFFFFFFUL
+
+#define PSW_DEFAULT_KEY                (((unsigned long) PAGE_DEFAULT_ACC) << 52)
+
+#define PSW_ASC_PRIMARY                0x0000000000000000UL
+#define PSW_ASC_ACCREG         0x0000400000000000UL
+#define PSW_ASC_SECONDARY      0x0000800000000000UL
+#define PSW_ASC_HOME           0x0000C00000000000UL
+
+#endif /* __s390x__ */
+
+
+/*
+ * The s390_regs structure is used to define the elf_gregset_t.
+ */
+typedef struct {
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+       unsigned int  acrs[NUM_ACRS];
+       unsigned long orig_gpr2;
+} s390_regs;
+
+/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+       unsigned long args[1];
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
+/*
+ * Now for the user space program event recording (trace) definitions.
+ * The following structures are used only for the ptrace interface, don't
+ * touch or even look at it if you don't want to modify the user-space
+ * ptrace interface. In particular stay away from it for in-kernel PER.
+ */
+typedef struct {
+       unsigned long cr[NUM_CR_WORDS];
+} per_cr_words;
+
+#define PER_EM_MASK 0xE8000000UL
+
+typedef struct {
+#ifdef __s390x__
+       unsigned                       : 32;
+#endif /* __s390x__ */
+       unsigned em_branching          : 1;
+       unsigned em_instruction_fetch  : 1;
+       /*
+        * Switching on storage alteration automatically fixes
+        * the storage alteration event bit in the users std.
+        */
+       unsigned em_storage_alteration : 1;
+       unsigned em_gpr_alt_unused     : 1;
+       unsigned em_store_real_address : 1;
+       unsigned                       : 3;
+       unsigned branch_addr_ctl       : 1;
+       unsigned                       : 1;
+       unsigned storage_alt_space_ctl : 1;
+       unsigned                       : 21;
+       unsigned long starting_addr;
+       unsigned long ending_addr;
+} per_cr_bits;
+
+typedef struct {
+       unsigned short perc_atmid;
+       unsigned long address;
+       unsigned char access_id;
+} per_lowcore_words;
+
+typedef struct {
+       unsigned perc_branching          : 1;
+       unsigned perc_instruction_fetch  : 1;
+       unsigned perc_storage_alteration : 1;
+       unsigned perc_gpr_alt_unused     : 1;
+       unsigned perc_store_real_address : 1;
+       unsigned                         : 3;
+       unsigned atmid_psw_bit_31        : 1;
+       unsigned atmid_validity_bit      : 1;
+       unsigned atmid_psw_bit_32        : 1;
+       unsigned atmid_psw_bit_5         : 1;
+       unsigned atmid_psw_bit_16        : 1;
+       unsigned atmid_psw_bit_17        : 1;
+       unsigned si                      : 2;
+       unsigned long address;
+       unsigned                         : 4;
+       unsigned access_id               : 4;
+} per_lowcore_bits;
+
+typedef struct {
+       union {
+               per_cr_words   words;
+               per_cr_bits    bits;
+       } control_regs;
+       /*
+        * The single_step and instruction_fetch bits are obsolete,
+        * the kernel always sets them to zero. To enable single
+        * stepping use ptrace(PTRACE_SINGLESTEP) instead.
+        */
+       unsigned  single_step       : 1;
+       unsigned  instruction_fetch : 1;
+       unsigned                    : 30;
+       /*
+        * These addresses are copied into cr10 & cr11 if single
+        * stepping is switched off
+        */
+       unsigned long starting_addr;
+       unsigned long ending_addr;
+       union {
+               per_lowcore_words words;
+               per_lowcore_bits  bits;
+       } lowcore;
+} per_struct;
+
+typedef struct {
+       unsigned int  len;
+       unsigned long kernel_addr;
+       unsigned long process_addr;
+} ptrace_area;
+
+/*
+ * S/390 specific non posix ptrace requests. I chose unusual values so
+ * they are unlikely to clash with future ptrace definitions.
+ */
+#define PTRACE_PEEKUSR_AREA          0x5000
+#define PTRACE_POKEUSR_AREA          0x5001
+#define PTRACE_PEEKTEXT_AREA         0x5002
+#define PTRACE_PEEKDATA_AREA         0x5003
+#define PTRACE_POKETEXT_AREA         0x5004
+#define PTRACE_POKEDATA_AREA         0x5005
+#define PTRACE_GET_LAST_BREAK        0x5006
+#define PTRACE_PEEK_SYSTEM_CALL       0x5007
+#define PTRACE_POKE_SYSTEM_CALL              0x5008
+#define PTRACE_ENABLE_TE             0x5009
+#define PTRACE_DISABLE_TE            0x5010
+#define PTRACE_TE_ABORT_RAND         0x5011
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK     12      /* resume execution until next branch */
+
+/*
+ * PT_PROT definition is loosely based on hppa bsd definition in
+ * gdb/hppab-nat.c
+ */
+#define PTRACE_PROT                      21
+
+typedef enum {
+       ptprot_set_access_watchpoint,
+       ptprot_set_write_watchpoint,
+       ptprot_disable_watchpoint
+} ptprot_flags;
+
+typedef struct {
+       unsigned long lowaddr;
+       unsigned long hiaddr;
+       ptprot_flags prot;
+} ptprot_area;
+
+/* Sequence of bytes for breakpoint illegal instruction.  */
+#define S390_BREAKPOINT     {0x0,0x1}
+#define S390_BREAKPOINT_U16 ((__u16)0x0001)
+#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
+#define S390_SYSCALL_SIZE   2
+
+/*
+ * The user_regs_struct defines the way the user registers are
+ * store on the stack for signal handling.
+ */
+struct user_regs_struct {
+       psw_t psw;
+       unsigned long gprs[NUM_GPRS];
+       unsigned int  acrs[NUM_ACRS];
+       unsigned long orig_gpr2;
+       s390_fp_regs fp_regs;
+       /*
+        * These per registers are in here so that gdb can modify them
+        * itself as there is no "official" ptrace interface for hardware
+        * watchpoints. This is the way intel does it.
+        */
+       per_struct per_info;
+       unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_S390_PTRACE_H */
index 793690fbda3625defd130262db1e94b57152c0bc..800104c8a3edfee7f4f52a33b8451a51ee0ed90a 100644 (file)
 /*
  * Defines x86 CPU feature bits
  */
-#define NCAPINTS       18      /* N 32-bit words worth of info */
-#define NBUGINTS       1       /* N 32-bit bug flags */
+#define NCAPINTS                       18         /* N 32-bit words worth of info */
+#define NBUGINTS                       1          /* N 32-bit bug flags */
 
 /*
  * Note: If the comment begins with a quoted string, that string is used
  * in /proc/cpuinfo instead of the macro name.  If the string is "",
  * this feature bit is not displayed in /proc/cpuinfo at all.
+ *
+ * When adding new features here that depend on other features,
+ * please update the table in kernel/cpu/cpuid-deps.c as well.
  */
 
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU                ( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME                ( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE         ( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE                ( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC                ( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR                ( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE                ( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE                ( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8                ( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC       ( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP                ( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR       ( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE                ( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA                ( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV       ( 0*32+15) /* CMOV instructions */
-                                         /* (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT                ( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36      ( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN         ( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH    ( 0*32+19) /* CLFLUSH instruction */
-#define X86_FEATURE_DS         ( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI       ( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX                ( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR       ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
-#define X86_FEATURE_XMM                ( 0*32+25) /* "sse" */
-#define X86_FEATURE_XMM2       ( 0*32+26) /* "sse2" */
-#define X86_FEATURE_SELFSNOOP  ( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT         ( 0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC                ( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64       ( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE                ( 0*32+31) /* Pending Break Enable */
+/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
+#define X86_FEATURE_FPU                        ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME                        ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE                 ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE                        ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC                        ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR                        ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE                        ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE                        ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8                        ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC               ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP                        ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR               ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE                        ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA                        ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV               ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT                        ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36              ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN                 ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH            ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS                 ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI               ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX                        ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR               ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM                        ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2               ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP          ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT                 ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC                        ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64               ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE                        ( 0*32+31) /* Pending Break Enable */
 
 /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
 /* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL    ( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP         ( 1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX         ( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT     ( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT   ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
-#define X86_FEATURE_GBPAGES    ( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP     ( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM         ( 1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT   ( 1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW      ( 1*32+31) /* 3DNow! */
+#define X86_FEATURE_SYSCALL            ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP                 ( 1*32+19) /* MP Capable */
+#define X86_FEATURE_NX                 ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT             ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT           ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES            ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP             ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM                 ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
+#define X86_FEATURE_3DNOWEXT           ( 1*32+30) /* AMD 3DNow extensions */
+#define X86_FEATURE_3DNOW              ( 1*32+31) /* 3DNow */
 
 /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY   ( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN    ( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI       ( 2*32+ 3) /* LongRun table interface */
+#define X86_FEATURE_RECOVERY           ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN            ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI               ( 2*32+ 3) /* LongRun table interface */
 
 /* Other features, Linux-defined mapping, word 3 */
 /* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX      ( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR    ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR  ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR        ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8         ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7         ( 3*32+ 5) /* "" Athlon */
-#define X86_FEATURE_P3         ( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4         ( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP         ( 3*32+ 9) /* smp kernel running on up */
-#define X86_FEATURE_ART                ( 3*32+10) /* Platform has always running timer (ART) */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS       ( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS                ( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32  ( 3*32+14) /* "" syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD   ( 3*32+16) /* rep microcode works well */
-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
-#define X86_FEATURE_ACC_POWER  ( 3*32+19) /* AMD Accumulated Power Mechanism */
-#define X86_FEATURE_NOPL       ( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS     ( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY  ( 3*32+22) /* cpu topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC        ( 3*32+24) /* TSC does not stop in C states */
-#define X86_FEATURE_CPUID      ( 3*32+25) /* CPU has CPUID instruction itself */
-#define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
+#define X86_FEATURE_CXMMX              ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR            ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR          ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR                ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+
+/* CPU types for specific tunings: */
+#define X86_FEATURE_K8                 ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7                 ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3                 ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4                 ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC       ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP                 ( 3*32+ 9) /* SMP kernel running on UP */
+#define X86_FEATURE_ART                        ( 3*32+10) /* Always running timer (ART) */
+#define X86_FEATURE_ARCH_PERFMON       ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS               ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS                        ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32          ( 3*32+14) /* "" syscall in IA32 userspace */
+#define X86_FEATURE_SYSENTER32         ( 3*32+15) /* "" sysenter in IA32 userspace */
+#define X86_FEATURE_REP_GOOD           ( 3*32+16) /* REP microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC       ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC       ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_ACC_POWER          ( 3*32+19) /* AMD Accumulated Power Mechanism */
+#define X86_FEATURE_NOPL               ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS             ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY          ( 3*32+22) /* CPU topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE       ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC                ( 3*32+24) /* TSC does not stop in C states */
+#define X86_FEATURE_CPUID              ( 3*32+25) /* CPU has CPUID instruction itself */
+#define X86_FEATURE_EXTD_APICID                ( 3*32+26) /* Extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM            ( 3*32+27) /* AMD multi-node processor */
+#define X86_FEATURE_APERFMPERF         ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+#define X86_FEATURE_NONSTOP_TSC_S3     ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ     ( 3*32+31) /* TSC has known frequency */
 
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3       ( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ  ( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64     ( 4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT      ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
-#define X86_FEATURE_DSCPL      ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
-#define X86_FEATURE_VMX                ( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX                ( 4*32+ 6) /* Safer mode */
-#define X86_FEATURE_EST                ( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2                ( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3      ( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID                ( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG       ( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA                ( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16       ( 4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR       ( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM       ( 4*32+15) /* Performance Capabilities */
-#define X86_FEATURE_PCID       ( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA                ( 4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_1     ( 4*32+19) /* "sse4_1" SSE-4.1 */
-#define X86_FEATURE_XMM4_2     ( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC     ( 4*32+21) /* x2APIC */
-#define X86_FEATURE_MOVBE      ( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
-#define X86_FEATURE_AES                ( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE      ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_OSXSAVE    ( 4*32+27) /* "" XSAVE enabled in the OS */
-#define X86_FEATURE_AVX                ( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C       ( 4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRAND     ( 4*32+30) /* The RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
+#define X86_FEATURE_XMM3               ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ          ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64             ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT              ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
+#define X86_FEATURE_DSCPL              ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
+#define X86_FEATURE_VMX                        ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX                        ( 4*32+ 6) /* Safer Mode eXtensions */
+#define X86_FEATURE_EST                        ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2                        ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3              ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID                        ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG               ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA                        ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16               ( 4*32+13) /* CMPXCHG16B instruction */
+#define X86_FEATURE_XTPR               ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM               ( 4*32+15) /* Perf/Debug Capabilities MSR */
+#define X86_FEATURE_PCID               ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA                        ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1             ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2             ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC             ( 4*32+21) /* X2APIC */
+#define X86_FEATURE_MOVBE              ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT             ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
+#define X86_FEATURE_AES                        ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE              ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
+#define X86_FEATURE_OSXSAVE            ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
+#define X86_FEATURE_AVX                        ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C               ( 4*32+29) /* 16-bit FP conversions */
+#define X86_FEATURE_RDRAND             ( 4*32+30) /* RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR         ( 4*32+31) /* Running on a hypervisor */
 
 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE     ( 5*32+ 2) /* "rng" RNG present (xstore) */
-#define X86_FEATURE_XSTORE_EN  ( 5*32+ 3) /* "rng_en" RNG enabled */
-#define X86_FEATURE_XCRYPT     ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
-#define X86_FEATURE_XCRYPT_EN  ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2       ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN    ( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE                ( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN     ( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM                ( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN     ( 5*32+13) /* PMM enabled */
+#define X86_FEATURE_XSTORE             ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN          ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT             ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN          ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2               ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN            ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE                        ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN             ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM                        ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN             ( 5*32+13) /* PMM enabled */
 
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM    ( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM                ( 6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC    ( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM                ( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A      ( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW       ( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS                ( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP                ( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT     ( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT                ( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP                ( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4       ( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE                ( 6*32+17) /* translation cache extension */
-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM                ( 6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT    ( 6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT      (6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_PTSC       ( 6*32+27) /* performance time-stamp counter */
-#define X86_FEATURE_PERFCTR_LLC        ( 6*32+28) /* Last Level Cache performance counter extensions */
-#define X86_FEATURE_MWAITX     ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
+#define X86_FEATURE_LAHF_LM            ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY         ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM                        ( 6*32+ 2) /* Secure Virtual Machine */
+#define X86_FEATURE_EXTAPIC            ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY         ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM                        ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A              ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE                ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH      ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW               ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS                        ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP                        ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT             ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT                        ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP                        ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4               ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE                        ( 6*32+17) /* Translation Cache Extension */
+#define X86_FEATURE_NODEID_MSR         ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM                        ( 6*32+21) /* Trailing Bit Manipulations */
+#define X86_FEATURE_TOPOEXT            ( 6*32+22) /* Topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE       ( 6*32+23) /* Core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB         ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT              ( 6*32+26) /* Data breakpoint extension */
+#define X86_FEATURE_PTSC               ( 6*32+27) /* Performance time-stamp counter */
+#define X86_FEATURE_PERFCTR_LLC                ( 6*32+28) /* Last Level Cache performance counter extensions */
+#define X86_FEATURE_MWAITX             ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
  *
  * Reuse free bits when adding new feature flags!
  */
-#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
-#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
-#define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-#define X86_FEATURE_CAT_L3     ( 7*32+ 4) /* Cache Allocation Technology L3 */
-#define X86_FEATURE_CAT_L2     ( 7*32+ 5) /* Cache Allocation Technology L2 */
-#define X86_FEATURE_CDP_L3     ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_RING3MWAIT         ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
+#define X86_FEATURE_CPUID_FAULT                ( 7*32+ 1) /* Intel CPUID faulting */
+#define X86_FEATURE_CPB                        ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB                        ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3             ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2             ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3             ( 7*32+ 6) /* Code and Data Prioritization L3 */
 
-#define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME                ( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_SME                        ( 7*32+10) /* AMD Secure Memory Encryption */
 
-#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
+#define X86_FEATURE_INTEL_PT           ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW      ( 7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS      ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
-#define X86_FEATURE_MBA         ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth Allocation */
 
 /* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */
+#define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI               ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY       ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT                        ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID               ( 8*32+ 4) /* Intel Virtual Processor ID */
 
-#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
-#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
+#define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
+#define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
 
 
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE   ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
-#define X86_FEATURE_BMI1       ( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE                ( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2       ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP       ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2       ( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS       ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID    ( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_RDT_A      ( 9*32+15) /* Resource Director Technology Allocation */
-#define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_AVX512DQ   ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
-#define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
-#define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_AVX512IFMA  ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER   ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD   ( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI     ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-#define X86_FEATURE_AVX512BW   ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
-#define X86_FEATURE_AVX512VL   ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
+#define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+#define X86_FEATURE_TSC_ADJUST         ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
+#define X86_FEATURE_BMI1               ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE                        ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2               ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP               ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2               ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS               ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
+#define X86_FEATURE_INVPCID            ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM                        ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM                        ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX                        ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A              ( 9*32+15) /* Resource Director Technology Allocation */
+#define X86_FEATURE_AVX512F            ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ           ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
+#define X86_FEATURE_RDSEED             ( 9*32+18) /* RDSEED instruction */
+#define X86_FEATURE_ADX                        ( 9*32+19) /* ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP               ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA         ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
+#define X86_FEATURE_CLFLUSHOPT         ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB               ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF           ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER           ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD           ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI             ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+#define X86_FEATURE_AVX512BW           ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
+#define X86_FEATURE_AVX512VL           ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
 
-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
-#define X86_FEATURE_XSAVEOPT   (10*32+ 0) /* XSAVEOPT */
-#define X86_FEATURE_XSAVEC     (10*32+ 1) /* XSAVEC */
-#define X86_FEATURE_XGETBV1    (10*32+ 2) /* XGETBV with ECX = 1 */
-#define X86_FEATURE_XSAVES     (10*32+ 3) /* XSAVES/XRSTORS */
+/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
+#define X86_FEATURE_XSAVEOPT           (10*32+ 0) /* XSAVEOPT instruction */
+#define X86_FEATURE_XSAVEC             (10*32+ 1) /* XSAVEC instruction */
+#define X86_FEATURE_XGETBV1            (10*32+ 2) /* XGETBV with ECX = 1 instruction */
+#define X86_FEATURE_XSAVES             (10*32+ 3) /* XSAVES/XRSTORS instructions */
 
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
-#define X86_FEATURE_CQM_LLC    (11*32+ 1) /* LLC QoS if 1 */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
+#define X86_FEATURE_CQM_LLC            (11*32+ 1) /* LLC QoS if 1 */
 
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC      (12*32+ 0) /* LLC occupancy monitoring */
+#define X86_FEATURE_CQM_MBM_TOTAL      (12*32+ 1) /* LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL      (12*32+ 2) /* LLC Local MBM monitoring */
 
-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
-#define X86_FEATURE_CLZERO     (13*32+0) /* CLZERO instruction */
-#define X86_FEATURE_IRPERF     (13*32+1) /* Instructions Retired Count */
+/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
+#define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
+#define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired Count */
+#define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP error pointers */
 
-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
-#define X86_FEATURE_DTHERM     (14*32+ 0) /* Digital Thermal Sensor */
-#define X86_FEATURE_IDA                (14*32+ 1) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT       (14*32+ 2) /* Always Running APIC Timer */
-#define X86_FEATURE_PLN                (14*32+ 4) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS                (14*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_HWP                (14*32+ 7) /* Intel Hardware P-states */
-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
-#define X86_FEATURE_HWP_EPP    (14*32+10) /* HWP Energy Perf. Preference */
-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+#define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA                        (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT               (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN                        (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS                        (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP                        (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY         (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW     (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP            (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ                (14*32+11) /* HWP Package Level Request */
 
-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
-#define X86_FEATURE_NPT                (15*32+ 0) /* Nested Page Table support */
-#define X86_FEATURE_LBRV       (15*32+ 1) /* LBR Virtualization support */
-#define X86_FEATURE_SVML       (15*32+ 2) /* "svm_lock" SVM locking MSR */
-#define X86_FEATURE_NRIPS      (15*32+ 3) /* "nrip_save" SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
-#define X86_FEATURE_AVIC       (15*32+13) /* Virtual Interrupt Controller */
-#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
-#define X86_FEATURE_VGIF       (15*32+16) /* Virtual GIF */
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
+#define X86_FEATURE_NPT                        (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV               (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML               (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS              (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR         (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN          (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID                (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS      (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER                (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD                (15*32+12) /* pause filter threshold */
+#define X86_FEATURE_AVIC               (15*32+13) /* Virtual Interrupt Controller */
+#define X86_FEATURE_V_VMSAVE_VMLOAD    (15*32+15) /* Virtual VMSAVE VMLOAD */
+#define X86_FEATURE_VGIF               (15*32+16) /* Virtual GIF */
 
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
-#define X86_FEATURE_AVX512VBMI  (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
-#define X86_FEATURE_PKU                (16*32+ 3) /* Protection Keys for Userspace */
-#define X86_FEATURE_OSPKE      (16*32+ 4) /* OS Protection Keys Enable */
-#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_LA57       (16*32+16) /* 5-level page tables */
-#define X86_FEATURE_RDPID      (16*32+22) /* RDPID instruction */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
+#define X86_FEATURE_AVX512VBMI         (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
+#define X86_FEATURE_UMIP               (16*32+ 2) /* User Mode Instruction Protection */
+#define X86_FEATURE_PKU                        (16*32+ 3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE              (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_AVX512_VBMI2       (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
+#define X86_FEATURE_GFNI               (16*32+ 8) /* Galois Field New Instructions */
+#define X86_FEATURE_VAES               (16*32+ 9) /* Vector AES */
+#define X86_FEATURE_VPCLMULQDQ         (16*32+10) /* Carry-Less Multiplication Double Quadword */
+#define X86_FEATURE_AVX512_VNNI                (16*32+11) /* Vector Neural Network Instructions */
+#define X86_FEATURE_AVX512_BITALG      (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_AVX512_VPOPCNTDQ   (16*32+14) /* POPCNT for vectors of DW/QW */
+#define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
+#define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
 
-/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
-#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
-#define X86_FEATURE_SUCCOR     (17*32+1) /* Uncorrectable error containment and recovery */
-#define X86_FEATURE_SMCA       (17*32+3) /* Scalable MCA */
+/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV     (17*32+ 0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR             (17*32+ 1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA               (17*32+ 3) /* Scalable MCA */
 
 /*
  * BUG word(s)
  */
-#define X86_BUG(x)             (NCAPINTS*32 + (x))
+#define X86_BUG(x)                     (NCAPINTS*32 + (x))
 
-#define X86_BUG_F00F           X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV           X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA           X86_BUG(2) /* Cyrix 6x86 coma */
-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
-#define X86_BUG_AMD_APIC_C1E   X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP           X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_F00F                   X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV                   X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA                   X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH         X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E           X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP                   X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK            X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR                X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS                X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
 #ifdef CONFIG_X86_32
 /*
  * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
  * to avoid confusion.
  */
-#define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
+#define X86_BUG_ESPFIX                 X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
 #endif
-#define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
-#define X86_BUG_MONITOR                X86_BUG(12) /* IPI required to wake up remote CPU */
-#define X86_BUG_AMD_E400       X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_NULL_SEG               X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE           X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR                        X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400               X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+
 #endif /* _ASM_X86_CPUFEATURES_H */
index c10c9128f54e6b7296014a74e7a253a1eedaacd9..14d6d50073142b0f49b06850ccd0d394546479ee 100644 (file)
 # define DISABLE_MPX   (1<<(X86_FEATURE_MPX & 31))
 #endif
 
+#ifdef CONFIG_X86_INTEL_UMIP
+# define DISABLE_UMIP  0
+#else
+# define DISABLE_UMIP  (1<<(X86_FEATURE_UMIP & 31))
+#endif
+
 #ifdef CONFIG_X86_64
 # define DISABLE_VME           (1<<(X86_FEATURE_VME & 31))
 # define DISABLE_K6_MTRR       (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -63,7 +69,7 @@
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
-#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
+#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
 #define DISABLED_MASK17        0
 #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
index bde77d7c4390ab0a6d14332cceca7dc7c8571894..37292bb5ce6065c31c21b2af61fa077de432a081 100644 (file)
@@ -6,7 +6,7 @@ RM ?= rm -f
 
 # Make the path relative to DESTDIR, not prefix
 ifndef DESTDIR
-prefix?=$(HOME)
+prefix ?= /usr/local
 endif
 mandir ?= $(prefix)/share/man
 man8dir = $(mandir)/man8
index 813826c50936b174ce11821b0e606ce0e49668cd..ec3052c0b004011573861231edcdd0d0e982c088 100644 (file)
@@ -45,8 +45,8 @@ $(LIBBPF)-clean:
        $(call QUIET_CLEAN, libbpf)
        $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
 
-prefix = /usr
-bash_compdir ?= $(prefix)/share/bash-completion/completions
+prefix = /usr/local
+bash_compdir ?= /usr/share/bash-completion/completions
 
 CC = gcc
 
@@ -76,6 +76,7 @@ clean: $(LIBBPF)-clean
        $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
 
 install:
+       install -m 0755 -d $(prefix)/sbin
        install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
        install -m 0755 -d $(bash_compdir)
        install -m 0644 bash-completion/bpftool $(bash_compdir)
@@ -88,5 +89,5 @@ doc-install:
 
 FORCE:
 
-.PHONY: all clean FORCE
+.PHONY: all clean FORCE install doc doc-install
 .DEFAULT_GOAL := all
index d6e4762170a4464d029415edc189030f55841844..d294bc8168bed8cc72f8926c6947b98aba0cbcc6 100644 (file)
@@ -58,11 +58,19 @@ bool show_pinned;
 struct pinned_obj_table prog_table;
 struct pinned_obj_table map_table;
 
+static void __noreturn clean_and_exit(int i)
+{
+       if (json_output)
+               jsonw_destroy(&json_wtr);
+
+       exit(i);
+}
+
 void usage(void)
 {
        last_do_help(last_argc - 1, last_argv + 1);
 
-       exit(-1);
+       clean_and_exit(-1);
 }
 
 static int do_help(int argc, char **argv)
@@ -280,6 +288,7 @@ int main(int argc, char **argv)
        hash_init(prog_table.table);
        hash_init(map_table.table);
 
+       opterr = 0;
        while ((opt = getopt_long(argc, argv, "Vhpjf",
                                  options, NULL)) >= 0) {
                switch (opt) {
@@ -291,13 +300,25 @@ int main(int argc, char **argv)
                        pretty_output = true;
                        /* fall through */
                case 'j':
-                       json_output = true;
+                       if (!json_output) {
+                               json_wtr = jsonw_new(stdout);
+                               if (!json_wtr) {
+                                       p_err("failed to create JSON writer");
+                                       return -1;
+                               }
+                               json_output = true;
+                       }
+                       jsonw_pretty(json_wtr, pretty_output);
                        break;
                case 'f':
                        show_pinned = true;
                        break;
                default:
-                       usage();
+                       p_err("unrecognized option '%s'", argv[optind - 1]);
+                       if (json_output)
+                               clean_and_exit(-1);
+                       else
+                               usage();
                }
        }
 
@@ -306,15 +327,6 @@ int main(int argc, char **argv)
        if (argc < 0)
                usage();
 
-       if (json_output) {
-               json_wtr = jsonw_new(stdout);
-               if (!json_wtr) {
-                       p_err("failed to create JSON writer");
-                       return -1;
-               }
-               jsonw_pretty(json_wtr, pretty_output);
-       }
-
        bfd_init();
 
        ret = cmd_select(cmds, argc, argv, do_help);
index 9c191e222d6f824d5d5be3d84611a551a6dbdeca..bff330b49791e5bc6021d8188bd0e637db20ea3b 100644 (file)
@@ -41,6 +41,7 @@
 #include <stdbool.h>
 #include <stdio.h>
 #include <linux/bpf.h>
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/hashtable.h>
 
@@ -50,7 +51,7 @@
 
 #define NEXT_ARG()     ({ argc--; argv++; if (argc < 0) usage(); })
 #define NEXT_ARGP()    ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
-#define BAD_ARG()      ({ p_err("what is '%s'?\n", *argv); -1; })
+#define BAD_ARG()      ({ p_err("what is '%s'?", *argv); -1; })
 
 #define ERR_MAX_LEN    1024
 
@@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);
 
 bool is_prefix(const char *pfx, const char *str);
 void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
-void usage(void) __attribute__((noreturn));
+void usage(void) __noreturn;
 
 struct pinned_obj_table {
        DECLARE_HASHTABLE(table, 16);
index eaa3bec273c8e21fcb15fbf5ee0407017b12c036..4c99c57736cefd155326293be053a7c4ffef9c6a 100644 (file)
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
        for (;;) {
                readp = &record[records_read];
                records_read += fread(readp, sizeof(struct kvp_record),
-                                       ENTRIES_PER_BLOCK * num_blocks,
-                                       filep);
+                               ENTRIES_PER_BLOCK * num_blocks - records_read,
+                               filep);
 
                if (ferror(filep)) {
-                       syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
+                       syslog(LOG_ERR,
+                               "Failed to read file, pool: %d; error: %d %s",
+                                pool, errno, strerror(errno));
+                       kvp_release_lock(pool);
                        exit(EXIT_FAILURE);
                }
 
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
 
                        if (record == NULL) {
                                syslog(LOG_ERR, "malloc failed");
+                               kvp_release_lock(pool);
                                exit(EXIT_FAILURE);
                        }
                        continue;
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
        fclose(filep);
        kvp_release_lock(pool);
 }
+
 static int kvp_file_init(void)
 {
        int  fd;
-       FILE *filep;
-       size_t records_read;
        char *fname;
-       struct kvp_record *record;
-       struct kvp_record *readp;
-       int num_blocks;
        int i;
        int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
 
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
 
        for (i = 0; i < KVP_POOL_COUNT; i++) {
                fname = kvp_file_info[i].fname;
-               records_read = 0;
-               num_blocks = 1;
                sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
                fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
 
                if (fd == -1)
                        return 1;
 
-
-               filep = fopen(fname, "re");
-               if (!filep) {
-                       close(fd);
-                       return 1;
-               }
-
-               record = malloc(alloc_unit * num_blocks);
-               if (record == NULL) {
-                       fclose(filep);
-                       close(fd);
-                       return 1;
-               }
-               for (;;) {
-                       readp = &record[records_read];
-                       records_read += fread(readp, sizeof(struct kvp_record),
-                                       ENTRIES_PER_BLOCK,
-                                       filep);
-
-                       if (ferror(filep)) {
-                               syslog(LOG_ERR, "Failed to read file, pool: %d",
-                                      i);
-                               exit(EXIT_FAILURE);
-                       }
-
-                       if (!feof(filep)) {
-                               /*
-                                * We have more data to read.
-                                */
-                               num_blocks++;
-                               record = realloc(record, alloc_unit *
-                                               num_blocks);
-                               if (record == NULL) {
-                                       fclose(filep);
-                                       close(fd);
-                                       return 1;
-                               }
-                               continue;
-                       }
-                       break;
-               }
                kvp_file_info[i].fd = fd;
-               kvp_file_info[i].num_blocks = num_blocks;
-               kvp_file_info[i].records = record;
-               kvp_file_info[i].num_records = records_read;
-               fclose(filep);
-
+               kvp_file_info[i].num_blocks = 1;
+               kvp_file_info[i].records = malloc(alloc_unit);
+               if (kvp_file_info[i].records == NULL)
+                       return 1;
+               kvp_file_info[i].num_records = 0;
+               kvp_update_mem_state(i);
        }
 
        return 0;
index 07fd03c74a775a48a5183f96cb85e27b4f85fedd..04e32f965ad7f038beb2d8db9dc2119e07628744 100644 (file)
@@ -84,8 +84,6 @@
 
 #define uninitialized_var(x) x = *(&(x))
 
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
-
 #include <linux/types.h>
 
 /*
@@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering.  One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
  *
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
- * compile-time warning.
+ * These two macros will also work on aggregate data types like structs or
+ * unions. If the size of the accessed data type exceeds the word size of
+ * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
+ * fall back to memcpy and print a compile-time warning.
  *
  * Their two major use cases are: (1) Mediating communication between
  * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
  * mutilate accesses that either do not require ordering or that interact
  * with an explicit memory barrier or atomic instruction that provides the
  * required ordering.
diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
deleted file mode 100644 (file)
index ea32a7d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
index 940c1b0756591e8e65792ce6dafbeb568a5aa15d..6b0c36a58fcbc38b67157b53efe2044bd848f137 100644 (file)
@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
 #define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
 #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
 #define pr_warn pr_err
+#define pr_cont pr_err
 
 #define list_del_rcu list_del
 
diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..53815d2
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+
+#include <linux/ptrace.h>
+
+/* Export kernel pt_regs structure */
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
index 2dffcbf705b37857d9289dedd2e5807322382984..653687d9771b9d0e824fe4b25eee079f7fcec1cc 100644 (file)
@@ -13,6 +13,7 @@
 #define MAP_NONBLOCK   0x10000         /* do not block on IO */
 #define MAP_STACK      0x20000         /* give out an address that is best suited for process/thread stacks */
 #define MAP_HUGETLB    0x40000         /* create a huge page mapping */
+#define MAP_SYNC       0x80000         /* perform synchronous page faults for the mapping */
 
 /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
 
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..13a5853
--- /dev/null
@@ -0,0 +1,7 @@
+#if defined(__aarch64__)
+#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__s390__)
+#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
+#else
+#include <uapi/asm-generic/bpf_perf_event.h>
+#endif
index 97677cd6964db099689f96f11b9731c748bebcfa..6fdff5945c8a08f27af713f6b59cb27b315da447 100644 (file)
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
        __u32 pad;
 };
 
+/* Query current scanout sequence number */
+struct drm_crtc_get_sequence {
+       __u32 crtc_id;          /* requested crtc_id */
+       __u32 active;           /* return: crtc output is active */
+       __u64 sequence;         /* return: most recent vblank sequence */
+       __s64 sequence_ns;      /* return: most recent time of first pixel out */
+};
+
+/* Queue event to be delivered at specified sequence. Time stamp marks
+ * when the first pixel of the refresh cycle leaves the display engine
+ * for the display
+ */
+#define DRM_CRTC_SEQUENCE_RELATIVE             0x00000001      /* sequence is relative to current */
+#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS         0x00000002      /* Use next sequence if we've missed */
+
+struct drm_crtc_queue_sequence {
+       __u32 crtc_id;
+       __u32 flags;
+       __u64 sequence;         /* on input, target sequence. on output, actual sequence */
+       __u64 user_data;        /* user data passed to event */
+};
+
 #if defined(__cplusplus)
 }
 #endif
@@ -819,6 +841,9 @@ extern "C" {
 
 #define DRM_IOCTL_WAIT_VBLANK          DRM_IOWR(0x3a, union drm_wait_vblank)
 
+#define DRM_IOCTL_CRTC_GET_SEQUENCE    DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
+#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE  DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
+
 #define DRM_IOCTL_UPDATE_DRAW          DRM_IOW(0x3f, struct drm_update_draw)
 
 #define DRM_IOCTL_MODE_GETRESOURCES    DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_RESET                DRM_IOWR(0xC4, struct drm_syncobj_array)
 #define DRM_IOCTL_SYNCOBJ_SIGNAL       DRM_IOWR(0xC5, struct drm_syncobj_array)
 
+#define DRM_IOCTL_MODE_CREATE_LEASE    DRM_IOWR(0xC6, struct drm_mode_create_lease)
+#define DRM_IOCTL_MODE_LIST_LESSEES    DRM_IOWR(0xC7, struct drm_mode_list_lessees)
+#define DRM_IOCTL_MODE_GET_LEASE       DRM_IOWR(0xC8, struct drm_mode_get_lease)
+#define DRM_IOCTL_MODE_REVOKE_LEASE    DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
 
 #define DRM_EVENT_VBLANK 0x01
 #define DRM_EVENT_FLIP_COMPLETE 0x02
+#define DRM_EVENT_CRTC_SEQUENCE        0x03
 
 struct drm_event_vblank {
        struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
        __u32 crtc_id; /* 0 on older kernels that do not support this */
 };
 
+/* Event delivered at sequence. Time stamp marks when the first pixel
+ * of the refresh cycle leaves the display engine for the display
+ */
+struct drm_event_crtc_sequence {
+       struct drm_event        base;
+       __u64                   user_data;
+       __s64                   time_ns;
+       __u64                   sequence;
+};
+
 /* typedef area */
 #ifndef __KERNEL__
 typedef struct drm_clip_rect drm_clip_rect_t;
index 9816590d3ad24b0d7037eb65f9b84c3c571c9b53..ac3c6503ca27f156ddbc3dd304bbf91671a9ac2a 100644 (file)
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_MIN_EU_IN_POOL       39
 #define I915_PARAM_MMAP_GTT_VERSION     40
 
-/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
  * priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
  */
 #define I915_PARAM_HAS_SCHEDULER        41
+#define   I915_SCHEDULER_CAP_ENABLED   (1ul << 0)
+#define   I915_SCHEDULER_CAP_PRIORITY  (1ul << 1)
+#define   I915_SCHEDULER_CAP_PREEMPTION        (1ul << 2)
+
 #define I915_PARAM_HUC_STATUS           42
 
 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
         * be specified
         */
        __u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
        __u64 val; /* Return value */
 };
 /* Known registers:
  *
  * Render engine timestamp - 0x2358 + 64bit - gen7+
  * - Note this register returns an invalid value if using the default
- *   single instruction 8byte read, in order to workaround that use
- *   offset (0x2538 | 1) instead.
+ *   single instruction 8byte read, in order to workaround that pass
+ *   flag I915_REG_READ_8B_WA in offset field.
  *
  */
 
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
 #define I915_CONTEXT_PARAM_GTT_SIZE    0x3
 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE    0x4
 #define I915_CONTEXT_PARAM_BANNABLE    0x5
+#define I915_CONTEXT_PARAM_PRIORITY    0x6
+#define   I915_CONTEXT_MAX_USER_PRIORITY       1023 /* inclusive */
+#define   I915_CONTEXT_DEFAULT_PRIORITY                0
+#define   I915_CONTEXT_MIN_USER_PRIORITY       -1023 /* inclusive */
        __u64 value;
 };
 
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
        __u32 n_boolean_regs;
        __u32 n_flex_regs;
 
-       __u64 __user mux_regs_ptr;
-       __u64 __user boolean_regs_ptr;
-       __u64 __user flex_regs_ptr;
+       /*
+        * These fields are pointers to tuples of u32 values (register
+        * address, value). For example the expected length of the buffer
+        * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+        */
+       __u64 mux_regs_ptr;
+       __u64 boolean_regs_ptr;
+       __u64 flex_regs_ptr;
 };
 
 #if defined(__cplusplus)
index 0674272598205c300e7f573e38dae6087de94429..8f95303f9d807d10d4fd6850d91a2486b0a490ec 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /* Copyright (c) 2016 Facebook
  *
  * This program is free software; you can redistribute it and/or
@@ -7,11 +8,10 @@
 #ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
-#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <asm/bpf_perf_event.h>
 
 struct bpf_perf_event_data {
-       struct pt_regs regs;
+       bpf_user_pt_regs_t regs;
        __u64 sample_period;
 };
 
index 481e103da78ed42a5a76447e836ee8374541d7c7..ef130501092531b23148898efc08b0e521f3354c 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _UAPI_LINUX_KCMP_H
 #define _UAPI_LINUX_KCMP_H
 
index 7e99999d6236fa2940fa2b565442e8b1b1331407..496e59a2738ba99308f438e1f0509e66e17086cb 100644 (file)
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
 
 struct kvm_s390_irq_state {
        __u64 buf;
-       __u32 flags;
+       __u32 flags;        /* will stay unused for compatibility reasons */
        __u32 len;
-       __u32 reserved[4];
+       __u32 reserved[4];  /* will stay unused for compatibility reasons */
 };
 
 /* for KVM_SET_GUEST_DEBUG */
@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_PPC_SMT_POSSIBLE 147
 #define KVM_CAP_HYPERV_SYNIC2 148
 #define KVM_CAP_HYPERV_VP_INDEX 149
+#define KVM_CAP_S390_AIS_MIGRATION 150
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 362493a2f950b3024efae42ac321454ab66d845d..b9a4953018edeafa64e951e4ed8d53fa94c7a1cb 100644 (file)
@@ -942,6 +942,7 @@ enum perf_callchain_context {
 #define PERF_AUX_FLAG_TRUNCATED                0x01    /* record was truncated to fit */
 #define PERF_AUX_FLAG_OVERWRITE                0x02    /* snapshot from overwrite mode */
 #define PERF_AUX_FLAG_PARTIAL          0x04    /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION                0x08    /* sample collided with another */
 
 #define PERF_FLAG_FD_NO_GROUP          (1UL << 0)
 #define PERF_FLAG_FD_OUTPUT            (1UL << 1)
index a8d0759a9e400c5d472fe37d13a924bc9e9777a6..af5f8c2df87ac51b401acab46c817b649e99893d 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _LINUX_PRCTL_H
 #define _LINUX_PRCTL_H
 
@@ -197,4 +198,13 @@ struct prctl_mm_map {
 # define PR_CAP_AMBIENT_LOWER          3
 # define PR_CAP_AMBIENT_CLEAR_ALL      4
 
+/* arm64 Scalable Vector Extension controls */
+/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
+#define PR_SVE_SET_VL                  50      /* set task vector length */
+# define PR_SVE_SET_VL_ONEXEC          (1 << 18) /* defer effect until exec */
+#define PR_SVE_GET_VL                  51      /* get task vector length */
+/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
+# define PR_SVE_VL_LEN_MASK            0xffff
+# define PR_SVE_VL_INHERIT             (1 << 17) /* inherit across exec */
+
 #endif /* _LINUX_PRCTL_H */
index 217cf6f95c366037ccd2ff3cedb1d61de22c616a..a5684d0968b4fd087905e659c0ce80bd170434c2 100755 (executable)
@@ -478,7 +478,7 @@ class Provider(object):
     @staticmethod
     def is_field_wanted(fields_filter, field):
         """Indicate whether field is valid according to fields_filter."""
-        if not fields_filter or fields_filter == "help":
+        if not fields_filter:
             return True
         return re.match(fields_filter, field) is not None
 
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self._fields = [field for field in self.get_available_fields()
-                        if self.is_field_wanted(fields_filter, field)]
+        self.fields = [field for field in self.get_available_fields()
+                       if self.is_field_wanted(fields_filter, field)]
 
     @staticmethod
     def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
             curses.nocbreak()
             curses.endwin()
 
-    def get_all_gnames(self):
+    @staticmethod
+    def get_all_gnames():
         """Returns a list of (pid, gname) tuples of all running guests"""
         res = []
         try:
@@ -963,7 +964,7 @@ class Tui(object):
             # perform a sanity check before calling the more expensive
             # function to possibly extract the guest name
             if ' -name ' in line[1]:
-                res.append((line[0], self.get_gname_from_pid(line[0])))
+                res.append((line[0], Tui.get_gname_from_pid(line[0])))
         child.stdout.close()
 
         return res
@@ -984,7 +985,8 @@ class Tui(object):
         except Exception:
             self.screen.addstr(row + 1, 2, 'Not available')
 
-    def get_pid_from_gname(self, gname):
+    @staticmethod
+    def get_pid_from_gname(gname):
         """Fuzzy function to convert guest name to QEMU process pid.
 
         Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
 
         """
         pids = []
-        for line in self.get_all_gnames():
+        for line in Tui.get_all_gnames():
             if gname == line[1]:
                 pids.append(int(line[0]))
 
@@ -1090,15 +1092,16 @@ class Tui(object):
             # sort by totals
             return (0, -stats[x][0])
         total = 0.
-        for val in stats.values():
-            total += val[0]
+        for key in stats.keys():
+            if key.find('(') is -1:
+                total += stats[key][0]
         if self._sorting == SORT_DEFAULT:
             sortkey = sortCurAvg
         else:
             sortkey = sortTotal
+        tavg = 0
         for key in sorted(stats.keys(), key=sortkey):
-
-            if row >= self.screen.getmaxyx()[0]:
+            if row >= self.screen.getmaxyx()[0] - 1:
                 break
             values = stats[key]
             if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
                 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
                                    (key, values[0], values[0] * 100 / total,
                                     cur))
+                if cur is not '' and key.find('(') is -1:
+                    tavg += cur
             row += 1
         if row == 3:
             self.screen.addstr(4, 1, 'No matching events reported yet')
+        else:
+            self.screen.addstr(row, 1, '%-40s %10d        %8s' %
+                               ('Total', total, tavg if tavg else ''),
+                               curses.A_BOLD)
         self.screen.refresh()
 
     def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
                 if char == 'x':
                     self.update_drilldown()
                     # prevents display of current values on next refresh
-                    self.stats.get()
+                    self.stats.get(self._display_guests)
             except KeyboardInterrupt:
                 break
             except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
         try:
             pids = Tui.get_pid_from_gname(val)
         except:
-            raise optparse.OptionValueError('Error while searching for guest '
-                                            '"{}", use "-p" to specify a pid '
-                                            'instead'.format(val))
+            sys.exit('Error while searching for guest "{}". Use "-p" to '
+                     'specify a pid instead?'.format(val))
         if len(pids) == 0:
-            raise optparse.OptionValueError('No guest by the name "{}" '
-                                            'found'.format(val))
+            sys.exit('Error: No guest by the name "{}" found'.format(val))
         if len(pids) > 1:
-            raise optparse.OptionValueError('Multiple processes found (pids: '
-                                            '{}) - use "-p" to specify a pid '
-                                            'instead'.format(" ".join(pids)))
+            sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
+                     'to specify the desired pid'.format(" ".join(pids)))
         parser.values.pid = pids[0]
 
     optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
                          help='restrict statistics to guest by name',
                          callback=cb_guest_to_pid,
                          )
-    (options, _) = optparser.parse_args(sys.argv)
+    options, unkn = optparser.parse_args(sys.argv)
+    if len(unkn) != 1:
+        sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
+    try:
+        # verify that we were passed a valid regex up front
+        re.compile(options.fields)
+    except re.error:
+        sys.exit('Error: "' + options.fields + '" is not a valid regular '
+                 'expression')
+
     return options
 
 
@@ -1564,16 +1579,13 @@ def main():
 
     stats = Stats(options)
 
-    if options.fields == "help":
-        event_list = "\n"
-        s = stats.get()
-        for key in s.keys():
-            if key.find('(') != -1:
-                key = key[0:key.find('(')]
-            if event_list.find('\n' + key + '\n') == -1:
-                event_list += key + '\n'
-        sys.stdout.write(event_list)
-        return ""
+    if options.fields == 'help':
+        stats.fields_filter = None
+        event_list = []
+        for key in stats.get().keys():
+            event_list.append(key.split('(', 1)[0])
+        sys.stdout.write('  ' + '\n  '.join(sorted(set(event_list))) + '\n')
+        sys.exit(0)
 
     if options.log:
         log(stats)
index e5cf836be8a1848bb82f39cfa3c7c75dcc67b4fa..b5b3810c9e945d7f3a39568840fbc5b73f84983b 100644 (file)
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
 *s*::   set update interval
 
 *x*::  toggle reporting of stats for child trace events
+ ::     *Note*: The stats for the parents summarize the respective child trace
+                events
 
 Press any other key to refresh statistics immediately.
 
@@ -86,7 +88,7 @@ OPTIONS
 
 -f<fields>::
 --fields=<fields>::
-       fields to display (regex)
+       fields to display (regex), "-f help" for a list of available events
 
 -h::
 --help::
index d3102c865a95e0ea82c6979c9acb72c50a61fce1..914cff12899b655f760b0c5d79d3e029a3d09168 100644 (file)
@@ -1,3 +1,3 @@
-arch/x86/insn/inat-tables.c
+arch/x86/lib/inat-tables.c
 objtool
 fixdep
index 424b1965d06f2f95d701d19284cc0f3f7e6e82ea..ae0272f9a09184db54933fe079b9cf529bcf47d5 100644 (file)
@@ -7,9 +7,11 @@ ARCH := x86
 endif
 
 # always use the host compiler
-CC = gcc
-LD = ld
-AR = ar
+HOSTCC ?= gcc
+HOSTLD ?= ld
+CC      = $(HOSTCC)
+LD      = $(HOSTLD)
+AR      = ar
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -25,7 +27,9 @@ OBJTOOL_IN := $(OBJTOOL)-in.o
 
 all: $(OBJTOOL)
 
-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
+INCLUDES := -I$(srctree)/tools/include \
+           -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+           -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
 CFLAGS   += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
 LDFLAGS  += -lelf $(LIBSUBCMD)
@@ -41,22 +45,8 @@ include $(srctree)/tools/build/Makefile.include
 $(OBJTOOL_IN): fixdep FORCE
        @$(MAKE) $(build)=objtool
 
-# Busybox's diff doesn't have -I, avoid warning in that case
-#
 $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
-       @(diff -I 2>&1 | grep -q 'option requires an argument' && \
-       test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
-       diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
-       diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
-       diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
-       diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
-       diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
-       diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
-       diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
-       || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
-       @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
-       diff ../../arch/x86/include/asm/orc_types.h orc_types.h >/dev/null) \
-       || echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true
+       @./sync-check.sh
        $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
 
 
@@ -66,7 +56,7 @@ $(LIBSUBCMD): fixdep FORCE
 clean:
        $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
        $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
-       $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
+       $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
 
 FORCE:
 
index debbdb0b5c430b3a74143bd216db464956522733..b998412c017d9173d27b10a760899fd0c9efa32f 100644 (file)
@@ -1,12 +1,12 @@
 objtool-y += decode.o
 
-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
+inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
+inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
 
-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
        $(call rule_mkdir)
        $(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
 
-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
+$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
 
-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
+CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
index 34a579f806e390337bdee738ae507364c02e7ad7..8acfc47af70efde4c1a3bb3ad6aff809f0ad0308 100644 (file)
@@ -19,9 +19,9 @@
 #include <stdlib.h>
 
 #define unlikely(cond) (cond)
-#include "insn/insn.h"
-#include "insn/inat.c"
-#include "insn/insn.c"
+#include <asm/insn.h>
+#include "lib/inat.c"
+#include "lib/insn.c"
 
 #include "../../elf.h"
 #include "../../arch.h"
similarity index 95%
rename from tools/objtool/arch/x86/insn/inat.h
rename to tools/objtool/arch/x86/include/asm/inat.h
index 125ecd2a300d78758d3968c9fcd5049dff88c457..1c78580e58bea3e83b79076409aaaab2343aa47f 100644 (file)
@@ -20,7 +20,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
  */
-#include "inat_types.h"
+#include <asm/inat_types.h>
 
 /*
  * Internal bits. Don't use bitmasks directly, because these bits are
 #define INAT_MAKE_GROUP(grp)   ((grp << INAT_GRP_OFFS) | INAT_MODRM)
 #define INAT_MAKE_IMM(imm)     (imm << INAT_IMM_OFFS)
 
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE    0
+#define INAT_SEG_REG_DEFAULT   1
+#define INAT_SEG_REG_CS                2
+#define INAT_SEG_REG_SS                3
+#define INAT_SEG_REG_DS                4
+#define INAT_SEG_REG_ES                5
+#define INAT_SEG_REG_FS                6
+#define INAT_SEG_REG_GS                7
+
 /* Attribute search APIs */
 extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
 extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
similarity index 99%
rename from tools/objtool/arch/x86/insn/insn.h
rename to tools/objtool/arch/x86/include/asm/insn.h
index e23578c7b1be9d56398ccd33a2b3ef28633b4939..b3e32b010ab194ed613034234c403c4067502776 100644 (file)
@@ -21,7 +21,7 @@
  */
 
 /* insn_attr_t is defined in inat.h */
-#include "inat.h"
+#include <asm/inat.h>
 
 struct insn_field {
        union {
similarity index 99%
rename from tools/objtool/arch/x86/insn/inat.c
rename to tools/objtool/arch/x86/lib/inat.c
index e4bf28e6f4c7a5c851dd8a07052ea49255a24e22..c1f01a8e9f65ecd266b81a2806cf6ff56d3ed5ba 100644 (file)
@@ -18,7 +18,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
  */
-#include "insn.h"
+#include <asm/insn.h>
 
 /* Attribute tables are generated from opcode map */
 #include "inat-tables.c"
similarity index 99%
rename from tools/objtool/arch/x86/insn/insn.c
rename to tools/objtool/arch/x86/lib/insn.c
index ca983e2bea8b2d6c577b284b6f354017d5bd18ca..1088eb8f3a5fea12bcd06cc11f5184bbf8e46eb9 100644 (file)
@@ -23,8 +23,8 @@
 #else
 #include <string.h>
 #endif
-#include "inat.h"
-#include "insn.h"
+#include <asm/inat.h>
+#include <asm/insn.h>
 
 /* Verify next sizeof(t) bytes can be on the same instruction */
 #define validate_next(t, insn, n)      \
similarity index 99%
rename from tools/objtool/arch/x86/insn/x86-opcode-map.txt
rename to tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4ad0c55d00c3784f08b393764a2bc..e0b85930dd773e87417e2b4957b8af61221b04c0 100644 (file)
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
 fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
 fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
 fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
 EndTable
 
 Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
 7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
 80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
 82: INVPCID Gy,Mdq (66)
 83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
 88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
 EndTable
 
 GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
 EndTable
 
 # Grp11A and Grp11B are expressed as Grp11 in Intel SDM
index a4139e386ef37471e7c0f71e66b6df05739e2360..b0e92a6d0903b0b18b8e451194d3df73269d8162 100644 (file)
@@ -18,7 +18,7 @@
 #ifndef _ORC_H
 #define _ORC_H
 
-#include "orc_types.h"
+#include <asm/orc_types.h>
 
 struct objtool_file;
 
index 36c5bf6a2675143b788e89663ee4652efbbbb582..c3343820916a6dccf0e49bf50cbfbc811b4525fd 100644 (file)
@@ -76,7 +76,8 @@ int orc_dump(const char *_objname)
        int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
        struct orc_entry *orc = NULL;
        char *name;
-       unsigned long nr_sections, orc_ip_addr = 0;
+       size_t nr_sections;
+       Elf64_Addr orc_ip_addr = 0;
        size_t shstrtab_idx;
        Elf *elf;
        Elf_Scn *scn;
@@ -187,10 +188,10 @@ int orc_dump(const char *_objname)
                                return -1;
                        }
 
-                       printf("%s+%lx:", name, rela.r_addend);
+                       printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
 
                } else {
-                       printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]);
+                       printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
                }
 
 
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
new file mode 100755 (executable)
index 0000000..1470e74
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+FILES='
+arch/x86/lib/insn.c
+arch/x86/lib/inat.c
+arch/x86/lib/x86-opcode-map.txt
+arch/x86/tools/gen-insn-attr-x86.awk
+arch/x86/include/asm/insn.h
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/orc_types.h
+'
+
+check()
+{
+       local file=$1
+
+       diff $file ../../$file > /dev/null ||
+               echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
+}
+
+if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
+       exit 0
+fi
+
+for i in $FILES; do
+  check $i
+done
index 21322e0385b886667d7bbd9a17edddc1ad1b3c8f..09ba923debe86810f8380f7df54504dee4232ec8 100644 (file)
@@ -2,3 +2,4 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 endif
 HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
index f47576ce13ea9da3d6220894c8c1d191a5fe889b..a8ace5cc6301f184e5d37210e00402234a014a4b 100644 (file)
@@ -2,17 +2,43 @@
 /*
  * Mapping of DWARF debug register numbers into register names.
  *
- *    Copyright IBM Corp. 2010
- *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ * Copyright IBM Corp. 2010, 2017
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *           Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *
  */
 
+#include <errno.h>
 #include <stddef.h>
-#include <dwarf-regs.h>
+#include <stdlib.h>
 #include <linux/kernel.h>
+#include <asm/ptrace.h>
+#include <string.h>
+#include <dwarf-regs.h>
 #include "dwarf-regs-table.h"
 
 const char *get_arch_regstr(unsigned int n)
 {
        return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];
 }
+
+/*
+ * Convert the register name into an offset to struct pt_regs (kernel).
+ * This is required by the BPF prologue generator.  The BPF
+ * program is called in the BPF overflow handler in the perf
+ * core.
+ */
+int regs_query_register_offset(const char *name)
+{
+       unsigned long gpr;
+
+       if (!name || strncmp(name, "%r", 2))
+               return -EINVAL;
+
+       errno = 0;
+       gpr = strtoul(name + 2, NULL, 10);
+       if (errno || gpr >= 16)
+               return -EINVAL;
+
+       return offsetof(user_pt_regs, gprs) + 8 * gpr;
+}
index d95fdcc26f4b6f38cffb4f7a727d3c7456cbb357..944070e98a2cd9dc4903e3be02278b1a7f8f3500 100644 (file)
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
        NULL
 };
 
+/*
+ * To get number of numa nodes present.
+ */
+static int nr_numa_nodes(void)
+{
+       int i, nr_nodes = 0;
+
+       for (i = 0; i < g->p.nr_nodes; i++) {
+               if (numa_bitmask_isbitset(numa_nodes_ptr, i))
+                       nr_nodes++;
+       }
+
+       return nr_nodes;
+}
+
+/*
+ * To check if given numa node is present.
+ */
+static int is_node_present(int node)
+{
+       return numa_bitmask_isbitset(numa_nodes_ptr, node);
+}
+
+/*
+ * To check given numa node has cpus.
+ */
+static bool node_has_cpus(int node)
+{
+       struct bitmask *cpu = numa_allocate_cpumask();
+       unsigned int i;
+
+       if (cpu && !numa_node_to_cpus(node, cpu)) {
+               for (i = 0; i < cpu->size; i++) {
+                       if (numa_bitmask_isbitset(cpu, i))
+                               return true;
+               }
+       }
+
+       return false; /* lets fall back to nocpus safely */
+}
+
 static cpu_set_t bind_to_cpu(int target_cpu)
 {
        cpu_set_t orig_mask, mask;
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
 
 static cpu_set_t bind_to_node(int target_node)
 {
-       int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
+       int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
        cpu_set_t orig_mask, mask;
        int cpu;
        int ret;
 
-       BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
+       BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
        BUG_ON(!cpus_per_node);
 
        ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
                        int i;
 
                        for (i = 0; i < mul; i++) {
-                               if (t >= g->p.nr_tasks) {
+                               if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
                                        printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
                                        goto out;
                                }
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
        sum = 0;
 
        for (node = 0; node < g->p.nr_nodes; node++) {
+               if (!is_node_present(node))
+                       continue;
                nr = nodes[node];
                nr_min = min(nr, nr_min);
                nr_max = max(nr, nr_max);
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
        process_groups = 0;
 
        for (node = 0; node < g->p.nr_nodes; node++) {
-               int processes = count_node_processes(node);
+               int processes;
 
+               if (!is_node_present(node))
+                       continue;
+               processes = count_node_processes(node);
                nr = nodes[node];
                tprintf(" %2d/%-2d", nr, processes);
 
@@ -1291,7 +1337,7 @@ static void print_summary(void)
 
        printf("\n ###\n");
        printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
-               g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
+               g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
        printf(" #      %5dx %5ldMB global  shared mem operations\n",
                        g->p.nr_loops, g->p.bytes_global/1024/1024);
        printf(" #      %5dx %5ldMB process shared mem operations\n",
index bd1fedef3d1c5d67a8ab6954467c24da622f2a27..a0f7ed2b869b0304408483e5660efa9167794a0e 100644 (file)
@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
                add_man_viewer(value);
                return 0;
        }
-       if (!strstarts(var, "man."))
+       if (strstarts(var, "man."))
                return add_man_viewer_info(var, value);
 
        return 0;
@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
 
        if (!perf_cmd)
                return "perf";
-       else if (!strstarts(perf_cmd, "perf"))
+       else if (strstarts(perf_cmd, "perf"))
                return perf_cmd;
 
        return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
index 3d7f33e19df28d90c7e33dcc37ea823197de6731..003255910c05df9104eafd2c012267c96523118b 100644 (file)
@@ -339,6 +339,22 @@ static int record__open(struct record *rec)
        struct perf_evsel_config_term *err_term;
        int rc = 0;
 
+       /*
+        * For initial_delay we need to add a dummy event so that we can track
+        * PERF_RECORD_MMAP while we wait for the initial delay to enable the
+        * real events, the ones asked by the user.
+        */
+       if (opts->initial_delay) {
+               if (perf_evlist__add_dummy(evlist))
+                       return -ENOMEM;
+
+               pos = perf_evlist__first(evlist);
+               pos->tracking = 0;
+               pos = perf_evlist__last(evlist);
+               pos->tracking = 1;
+               pos->attr.enable_on_exec = 1;
+       }
+
        perf_evlist__config(evlist, opts, &callchain_param);
 
        evlist__for_each_entry(evlist, pos) {
@@ -749,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)
                        goto out;
        }
 
-       err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
-                                                machine);
-       WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
-                          "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
-                          "Check /proc/kallsyms permission or run as root.\n");
-
-       err = perf_event__synthesize_modules(tool, process_synthesized_event,
-                                            machine);
-       WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
-                          "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
-                          "Check /proc/modules permission or run as root.\n");
+       if (!perf_evlist__exclude_kernel(rec->evlist)) {
+               err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+                                                        machine);
+               WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
+                                  "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+                                  "Check /proc/kallsyms permission or run as root.\n");
+
+               err = perf_event__synthesize_modules(tool, process_synthesized_event,
+                                                    machine);
+               WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
+                                  "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+                                  "Check /proc/modules permission or run as root.\n");
+       }
 
        if (perf_guest) {
                machines__process_guests(&session->machines,
@@ -1693,7 +1711,7 @@ int cmd_record(int argc, const char **argv)
 
        err = -ENOMEM;
 
-       if (symbol_conf.kptr_restrict)
+       if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
                pr_warning(
 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
 "check /proc/sys/kernel/kptr_restrict.\n\n"
index 1394cd8d96f7bb8d132e2a5cf892090690a19920..af5dd038195e3f6e3a6877d5884efda263e46843 100644 (file)
@@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
        struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
        struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
 
+       if (perf_evlist__exclude_kernel(rep->session->evlist))
+               return;
+
        if (kernel_map == NULL ||
            (kernel_map->dso->hit &&
             (kernel_kmap->ref_reloc_sym == NULL ||
index 68f36dc0344f2bb7d07a1bcdb05f8af4419d1d88..9b43bda45a415649a06c7b9743796bff7b760dbe 100644 (file)
@@ -1955,6 +1955,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
        struct perf_evsel *evsel;
 
        evlist__for_each_entry(script->session->evlist, evsel) {
+               /*
+                * Already setup? I.e. we may be called twice in cases like
+                * Intel PT, one for the intel_pt// and dummy events, then
+                * for the evsels syntheized from the auxtrace info.
+                *
+                * Ses perf_script__process_auxtrace_info.
+                */
+               if (evsel->priv != NULL)
+                       continue;
+
                evsel->priv = perf_evsel_script__new(evsel, script->session->data);
                if (evsel->priv == NULL)
                        goto out_err_fclose;
@@ -2838,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
        return set_maps(script);
 }
 
+#ifdef HAVE_AUXTRACE_SUPPORT
+static int perf_script__process_auxtrace_info(struct perf_tool *tool,
+                                             union perf_event *event,
+                                             struct perf_session *session)
+{
+       int ret = perf_event__process_auxtrace_info(tool, event, session);
+
+       if (ret == 0) {
+               struct perf_script *script = container_of(tool, struct perf_script, tool);
+
+               ret = perf_script__setup_per_event_dump(script);
+       }
+
+       return ret;
+}
+#else
+#define perf_script__process_auxtrace_info 0
+#endif
+
 int cmd_script(int argc, const char **argv)
 {
        bool show_full_info = false;
@@ -2866,7 +2895,7 @@ int cmd_script(int argc, const char **argv)
                        .feature         = perf_event__process_feature,
                        .build_id        = perf_event__process_build_id,
                        .id_index        = perf_event__process_id_index,
-                       .auxtrace_info   = perf_event__process_auxtrace_info,
+                       .auxtrace_info   = perf_script__process_auxtrace_info,
                        .auxtrace        = perf_event__process_auxtrace,
                        .auxtrace_error  = perf_event__process_auxtrace_error,
                        .stat            = perf_event__process_stat_event,
index 477a8699f0b501e3c71a711fd6a5a336645a1c46..9e0d2645ae13ad40624069077cadb7650b3245b0 100644 (file)
@@ -77,6 +77,7 @@
 #include "sane_ctype.h"
 
 static volatile int done;
+static volatile int resize;
 
 #define HEADER_LINE_NR  5
 
@@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
        top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
 }
 
-static void perf_top__sig_winch(int sig __maybe_unused,
-                               siginfo_t *info __maybe_unused, void *arg)
+static void winch_sig(int sig __maybe_unused)
 {
-       struct perf_top *top = arg;
+       resize = 1;
+}
 
+static void perf_top__resize(struct perf_top *top)
+{
        get_term_dimensions(&top->winsize);
        perf_top__update_print_entries(top);
 }
@@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
                case 'e':
                        prompt_integer(&top->print_entries, "Enter display entries (lines)");
                        if (top->print_entries == 0) {
-                               struct sigaction act = {
-                                       .sa_sigaction = perf_top__sig_winch,
-                                       .sa_flags     = SA_SIGINFO,
-                               };
-                               perf_top__sig_winch(SIGWINCH, NULL, top);
-                               sigaction(SIGWINCH, &act, NULL);
+                               perf_top__resize(top);
+                               signal(SIGWINCH, winch_sig);
                        } else {
                                signal(SIGWINCH, SIG_DFL);
                        }
@@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
        if (!machine->kptr_restrict_warned &&
            symbol_conf.kptr_restrict &&
            al.cpumode == PERF_RECORD_MISC_KERNEL) {
-               ui__warning(
+               if (!perf_evlist__exclude_kernel(top->session->evlist)) {
+                       ui__warning(
 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
 "Check /proc/sys/kernel/kptr_restrict.\n\n"
 "Kernel%s samples will not be resolved.\n",
                          al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
                          " modules" : "");
-               if (use_browser <= 0)
-                       sleep(5);
+                       if (use_browser <= 0)
+                               sleep(5);
+               }
                machine->kptr_restrict_warned = true;
        }
 
@@ -1030,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)
 
                if (hits == top->samples)
                        ret = perf_evlist__poll(top->evlist, 100);
+
+               if (resize) {
+                       perf_top__resize(top);
+                       resize = 0;
+               }
        }
 
        ret = 0;
@@ -1352,12 +1358,8 @@ int cmd_top(int argc, const char **argv)
 
        get_term_dimensions(&top.winsize);
        if (top.print_entries == 0) {
-               struct sigaction act = {
-                       .sa_sigaction = perf_top__sig_winch,
-                       .sa_flags     = SA_SIGINFO,
-               };
                perf_top__update_print_entries(&top);
-               sigaction(SIGWINCH, &act, NULL);
+               signal(SIGWINCH, winch_sig);
        }
 
        status = __cmd_top(&top);
index f2757d38c7d7054d4dcef7d7e90f25efc115a8f7..84debdbad32717ce3f5a00cf646d778527cc6302 100644 (file)
@@ -1152,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
        if (trace->host == NULL)
                return -ENOMEM;
 
-       if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
-               return -errno;
+       err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
+       if (err < 0)
+               goto out;
 
        err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
                                            evlist->threads, trace__tool_process, false,
                                            trace->opts.proc_map_timeout, 1);
+out:
        if (err)
                symbol__exit();
 
index 77406d25e5218023bf15277f293338e4279fe144..6db9d809fe9722a9e4eb0afb5140443490aa43e3 100755 (executable)
@@ -30,6 +30,7 @@ arch/x86/include/uapi/asm/vmx.h
 arch/powerpc/include/uapi/asm/kvm.h
 arch/s390/include/uapi/asm/kvm.h
 arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/ptrace.h
 arch/s390/include/uapi/asm/sie.h
 arch/arm/include/uapi/asm/kvm.h
 arch/arm64/include/uapi/asm/kvm.h
index 7a84d73324e3c1209781296dcc54260745150b53..8b3da21a08f19a110a3e9c2519495460da2eb310 100755 (executable)
@@ -10,8 +10,8 @@
 
 . $(dirname $0)/lib/probe.sh
 
-ld=$(realpath /lib64/ld*.so.* | uniq)
-libc=$(echo $ld | sed 's/ld/libc/g')
+libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
+nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
 
 trace_libc_inet_pton_backtrace() {
        idx=0
@@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {
        done
 }
 
+# Check for IPv6 interface existence
+ip a sh lo | fgrep -q inet6 || exit 2
+
 skip_if_no_perf_probe && \
 perf probe -q $libc inet_pton && \
 trace_libc_inet_pton_backtrace
index 2e68c5f120da87250a87b5067793e2ca75ced60c..2a9ef080efd028a1038ab214c05012f7d9186aa7 100755 (executable)
@@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
 file=$(mktemp /tmp/temporary_file.XXXXX)
 
 trace_open_vfs_getname() {
-       perf trace -e open touch $file 2>&1 | \
-       egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+       test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
+
+       perf trace -e ${svc:-open} touch $file 2>&1 | \
+       egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
 }
 
 
index bc4a7344e274255141e54e8ec28f47a0d12e6819..89c8e1604ca73ab277b4cf988d08400c527f315f 100644 (file)
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
 
        evsel = perf_evlist__first(evlist);
        evsel->attr.task = 1;
+#ifdef __s390x__
+       evsel->attr.sample_freq = 1000000;
+#else
        evsel->attr.sample_freq = 1;
+#endif
        evsel->attr.inherit = 0;
        evsel->attr.watermark = 0;
        evsel->attr.wakeup_events = 1;
index 9e1668b2c5d7cf86f25a5fa6453026a6768d2950..417e3ecfe9d730fc1c02e2c2e24fe36e23fbf83a 100644 (file)
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
        P_MMAP_FLAG(POPULATE);
        P_MMAP_FLAG(STACK);
        P_MMAP_FLAG(UNINITIALIZED);
+#ifdef MAP_SYNC
+       P_MMAP_FLAG(SYNC);
+#endif
 #undef P_MMAP_FLAG
 
        if (flags)
index da1c4c4a0dd842a279283f8b2b52bef10c5fda3d..3369c7830260d9bcce078adeeb02d72ac9344add 100644 (file)
@@ -165,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)
 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
                              struct ins_operands *ops)
 {
-       return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
+       return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
 }
 
 int ins__scnprintf(struct ins *ins, char *bf, size_t size,
@@ -230,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
        if (ops->target.name)
-               return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
+               return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
 
        if (ops->target.addr == 0)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
-       return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
+       return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
 }
 
 static struct ins_ops call_ops = {
@@ -299,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                        c++;
        }
 
-       return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+       return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
                         ins->name, c ? c - ops->raw : 0, ops->raw,
                         ops->target.offset);
 }
@@ -372,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
        if (ops->locked.ins.ops == NULL)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
-       printed = scnprintf(bf, size, "%-6.6s ", ins->name);
+       printed = scnprintf(bf, size, "%-6s ", ins->name);
        return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
                                        size - printed, ops->locked.ops);
 }
@@ -448,7 +448,7 @@ out_free_source:
 static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
-       return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
+       return scnprintf(bf, size, "%-6s %s,%s", ins->name,
                         ops->source.name ?: ops->source.raw,
                         ops->target.name ?: ops->target.raw);
 }
@@ -488,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
 static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
-       return scnprintf(bf, size, "%-6.6s %s", ins->name,
+       return scnprintf(bf, size, "%-6s %s", ins->name,
                         ops->target.name ?: ops->target.raw);
 }
 
@@ -500,7 +500,7 @@ static struct ins_ops dec_ops = {
 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
                          struct ins_operands *ops __maybe_unused)
 {
-       return scnprintf(bf, size, "%-6.6s", "nop");
+       return scnprintf(bf, size, "%-6s", "nop");
 }
 
 static struct ins_ops nop_ops = {
@@ -924,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)
 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
 {
        if (raw || !dl->ins.ops)
-               return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
+               return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
 
        return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
 }
index c6c891e154a63c67971c04512c0b576d5bf43087..b62e523a70352f40cdb4c90b7284858065d6fafa 100644 (file)
@@ -257,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
                .config = PERF_COUNT_SW_DUMMY,
                .size   = sizeof(attr), /* to capture ABI version */
        };
-       struct perf_evsel *evsel = perf_evsel__new(&attr);
+       struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
 
        if (evsel == NULL)
                return -ENOMEM;
@@ -1786,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
 state_err:
        return;
 }
+
+bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (!evsel->attr.exclude_kernel)
+                       return false;
+       }
+
+       return true;
+}
index e72ae64c11acb5214d82996d9c21498b1d34547e..491f69542920978f4be31dea49d8ae47aa98b155 100644 (file)
@@ -312,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
 
 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
                                            union perf_event *event);
+
+bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
 #endif /* __PERF_EVLIST_H */
index f894893c203d13e00259d356414a8b781066fa5a..d5fbcf8c7aa70138716f1d4e153f515a882c4ccb 100644 (file)
@@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
        list_for_each_entry(term, config_terms, list) {
                switch (term->type) {
                case PERF_EVSEL__CONFIG_TERM_PERIOD:
-                       attr->sample_period = term->val.period;
-                       attr->freq = 0;
+                       if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
+                               attr->sample_period = term->val.period;
+                               attr->freq = 0;
+                       }
                        break;
                case PERF_EVSEL__CONFIG_TERM_FREQ:
-                       attr->sample_freq = term->val.freq;
-                       attr->freq = 1;
+                       if (!(term->weak && opts->user_freq != UINT_MAX)) {
+                               attr->sample_freq = term->val.freq;
+                               attr->freq = 1;
+                       }
                        break;
                case PERF_EVSEL__CONFIG_TERM_TIME:
                        if (term->val.time)
@@ -1371,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
 static int
 perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
 {
-       struct perf_stat_evsel *ps = leader->priv;
+       struct perf_stat_evsel *ps = leader->stats;
        u64 read_format = leader->attr.read_format;
        int size = perf_evsel__read_size(leader);
        u64 *data = ps->group_data;
index 9277df96ffdad90a4899e922b574156bb708ef86..157f49e8a772d7d7a1bcc53a22d41f02e3397e78 100644 (file)
@@ -67,6 +67,7 @@ struct perf_evsel_config_term {
                bool    overwrite;
                char    *branch;
        } val;
+       bool weak;
 };
 
 struct perf_stat_evsel;
index 125ecd2a300d78758d3968c9fcd5049dff88c457..52dc8d911173e917124301d80052b6c4fbfb40cc 100644 (file)
 #define INAT_MAKE_GROUP(grp)   ((grp << INAT_GRP_OFFS) | INAT_MODRM)
 #define INAT_MAKE_IMM(imm)     (imm << INAT_IMM_OFFS)
 
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE    0
+#define INAT_SEG_REG_DEFAULT   1
+#define INAT_SEG_REG_CS                2
+#define INAT_SEG_REG_SS                3
+#define INAT_SEG_REG_DS                4
+#define INAT_SEG_REG_ES                5
+#define INAT_SEG_REG_FS                6
+#define INAT_SEG_REG_GS                7
+
 /* Attribute search APIs */
 extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
 extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
index 12e377184ee4ad0c55d00c3784f08b393764a2bc..e0b85930dd773e87417e2b4957b8af61221b04c0 100644 (file)
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
 fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
 fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
 fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
 EndTable
 
 Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
 7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
 80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
 82: INVPCID Gy,Mdq (66)
 83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
 88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
 EndTable
 
 GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
 EndTable
 
 # Grp11A and Grp11B are expressed as Grp11 in Intel SDM
index 6a8d03c3d9b7095961b07a4d648eadc6f857e868..270f3223c6df15cef58fd4d1c4efdc27dea3a520 100644 (file)
@@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)
 {
        int i;
 
+       if (machine == NULL)
+               return;
+
        machine__destroy_kernel_maps(machine);
        map_groups__exit(&machine->kmaps);
        dsos__exit(&machine->dsos);
index efd78b827b0514275f9388b2c64329c45c025248..3a5cb5a6e94ad8fc039dc3410a6724263057a970 100644 (file)
@@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md);
 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
 {
        struct perf_event_mmap_page *pc = mm->base;
-       u64 head = ACCESS_ONCE(pc->data_head);
+       u64 head = READ_ONCE(pc->data_head);
        rmb();
        return head;
 }
index a7fcd95961ef0776f07c6968088a7b6cf7836924..170316795a1845c7e013d84ee17ba663da200d6d 100644 (file)
@@ -1116,6 +1116,7 @@ do {                                                              \
        INIT_LIST_HEAD(&__t->list);                             \
        __t->type       = PERF_EVSEL__CONFIG_TERM_ ## __type;   \
        __t->val.__name = __val;                                \
+       __t->weak       = term->weak;                           \
        list_add_tail(&__t->list, head_terms);                  \
 } while (0)
 
@@ -2410,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,
 
        *term = *temp;
        INIT_LIST_HEAD(&term->list);
+       term->weak = false;
 
        switch (term->type_val) {
        case PARSE_EVENTS__TERM_TYPE_NUM:
index be337c266697a718d3ad595a166f8f3e3602cb93..88108cd11b4c80132d7b9b80f1b53d727d2bac1b 100644 (file)
@@ -101,6 +101,9 @@ struct parse_events_term {
        /* error string indexes for within parsed string */
        int err_term;
        int err_val;
+
+       /* Coming from implicit alias */
+       bool weak;
 };
 
 struct parse_events_error {
index 07cb2ac041d7a63b53298b4205fd0b4cd629c349..80fb1593913a9227a742a1a2ed56330c877facad 100644 (file)
@@ -405,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
                        parse_events_terms__purge(&list);
                        return ret;
                }
+               /*
+                * Weak terms don't override command line options,
+                * which we don't want for implicit terms in aliases.
+                */
+               cloned->weak = true;
                list_add_tail(&cloned->list, &list);
        }
        list_splice(&list, terms);
index da205d1fa03c546c4972d1e696a5eeeaab1d9711..1dd5f4fcffd53f375ba00479bfed37d867399c4a 100644 (file)
@@ -26,7 +26,7 @@ endif
 
 ifneq ($(OUTPUT),)
 # check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
 $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 endif
 
index c25a74ae51baef13bfa5609d2957af76941597f6..2bb3eef7d5c1fbf36d420be2801e74b4f7eeb049 100644 (file)
@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
 
        dprintf("set %s as cpufreq governor\n", governor);
 
-       if (cpupower_is_cpu_online(cpu) != 0) {
+       if (cpupower_is_cpu_online(cpu) != 1) {
                perror("cpufreq_cpu_exists");
                fprintf(stderr, "error: cpu %u does not exist\n", cpu);
                return -1;
index 1b5da0066ebf90bfe4c441fab62fd26b4cc99267..5b3205f1621749bb6ebc340413ae16d957064cb9 100644 (file)
@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
 {
        int num;
        char *tmp;
+       int this_cpu;
+
+       this_cpu = sched_getcpu();
 
        /* Assume idle state count is the same for all CPUs */
-       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
+       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
 
        if (cpuidle_sysfs_monitor.hw_states_num <= 0)
                return NULL;
 
        for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
-               tmp = cpuidle_state_name(0, num);
+               tmp = cpuidle_state_name(this_cpu, num);
                if (tmp == NULL)
                        continue;
 
@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
                strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
                free(tmp);
 
-               tmp = cpuidle_state_desc(0, num);
+               tmp = cpuidle_state_desc(this_cpu, num);
                if (tmp == NULL)
                        continue;
                strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
index 654efd9768fd3687924d6c2caf75a9b5425b8225..3fab179b1abac797a55952dd69b78848f74fdc64 100644 (file)
@@ -13,7 +13,7 @@ endif
 
 # check that the output directory actually exists
 ifneq ($(OUTPUT),)
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
 $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 endif
 
index 333a48655ee0a4cfadd89a478b09f1be7dba42b5..05fc4e2e7b3a06523cce6adde01fbdf03b8bdac0 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+
 LIBDIR := ../../../lib
 BPFDIR := $(LIBDIR)/bpf
 APIDIR := ../../../include/uapi
@@ -10,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
 endif
 
 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf
+LDLIBS += -lcap -lelf -lrt
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
        test_align test_verifier_log test_dev_cgroup
index 69427531408dd22ef1887d473ab4bf6548e173b9..6761be18a91fccc2d4f8ad52b0f83fec293189ae 100644 (file)
@@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
                          info_len != sizeof(struct bpf_map_info) ||
                          strcmp((char *)map_infos[i].name, expected_map_name),
                          "get-map-info(fd)",
-                         "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+                         "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
                          err, errno,
                          map_infos[i].type, BPF_MAP_TYPE_ARRAY,
                          info_len, sizeof(struct bpf_map_info),
@@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
                          *(int *)prog_infos[i].map_ids != map_infos[i].id ||
                          strcmp((char *)prog_infos[i].name, expected_prog_name),
                          "get-prog-info(fd)",
-                         "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+                         "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
                          err, errno, i,
                          prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
                          info_len, sizeof(struct bpf_prog_info),
@@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&prog_info, &prog_infos[i], info_len) ||
                      *(int *)prog_info.map_ids != saved_map_id,
                      "get-prog-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
                      err, errno, info_len, sizeof(struct bpf_prog_info),
                      memcmp(&prog_info, &prog_infos[i], info_len),
                      *(int *)prog_info.map_ids, saved_map_id);
@@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&map_info, &map_infos[i], info_len) ||
                      array_value != array_magic_value,
                      "check get-map-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
                      err, errno, info_len, sizeof(struct bpf_map_info),
                      memcmp(&map_info, &map_infos[i], info_len),
                      array_value, array_magic_value);
index 3c64f30cf63cc2b6adb532a3b1f3201533193f7f..b51017404c62d0dc8198afdf035016f6e5e2fd0b 100644 (file)
@@ -422,9 +422,7 @@ static struct bpf_test tests[] = {
                        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R1 subtraction from stack pointer",
-               .result_unpriv = REJECT,
-               .errstr = "R1 invalid mem access",
+               .errstr = "R1 subtraction from stack pointer",
                .result = REJECT,
        },
        {
@@ -606,7 +604,6 @@ static struct bpf_test tests[] = {
                },
                .errstr = "misaligned stack access",
                .result = REJECT,
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "invalid map_fd for function call",
@@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - out of bounds low",
@@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer += pointer",
+               .result = REJECT,
+               .errstr = "R1 pointer += pointer",
        },
        {
                "unpriv: neg pointer",
@@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
                        BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
                        BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid access to packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
@@ -3885,9 +3880,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3, 11 },
-               .errstr_unpriv = "R0 pointer += pointer",
-               .errstr = "R0 invalid mem access 'inv'",
-               .result_unpriv = REJECT,
+               .errstr = "R0 pointer += pointer",
                .result = REJECT,
                .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
@@ -3928,7 +3921,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3949,7 +3942,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3970,7 +3963,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -5195,10 +5188,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 bitwise operator &= on pointer",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 bitwise operator &= on pointer",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 2",
@@ -5214,10 +5205,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 32-bit pointer arithmetic prohibited",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 3",
@@ -5233,10 +5222,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic with /= operator",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 pointer arithmetic with /= operator",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 4",
@@ -6019,8 +6006,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map_in_map = { 3 },
-               .errstr = "R1 type=inv expected=map_ptr",
-               .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+               .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
                .result = REJECT,
        },
        {
@@ -6116,6 +6102,30 @@ static struct bpf_test tests[] = {
                },
                .result = ACCEPT,
        },
+       {
+               "ld_abs: tests on r6 and skb data reload helper",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_6, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_vlan_push),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
        {
                "ld_ind: check calling conv, r1",
                .insns = {
@@ -6300,7 +6310,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6324,7 +6334,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6350,7 +6360,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6375,7 +6385,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6423,7 +6433,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6494,7 +6504,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6545,7 +6555,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6572,7 +6582,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6598,7 +6608,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6627,7 +6637,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6657,7 +6667,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6685,8 +6695,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr_unpriv = "R0 pointer comparison prohibited",
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
                .result_unpriv = REJECT,
        },
@@ -6741,6 +6750,462 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
        },
+       {
+               "bounds check based on zero-extended MOV",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0x0000'0000'ffff'ffff */
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check based on sign-extended MOV. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 4294967295",
+               .result = REJECT
+       },
+       {
+               "bounds check based on sign-extended MOV. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xfff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 min value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value_size=8 off=1073741825",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value 1073741823",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check after truncation of non-boundary-crossing range",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       /* r2 = 0x10'0000'0000 */
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+                       /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        * difference to previous test: truncation via MOV32
+                        * instead of ALU32.
+                        */
+                       BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after wrapping 32-bit addition",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       /* r1 = 0x7fff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0xffff'fffe */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after shift with oversized count operand",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_2, 32),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       /* r1 = (u32)1 << (u32)32 = ? */
+                       BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x0000, 0xffff] */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 max value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check after right shift of maybe-negative number",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       /* r1 = [-0x01, 0xfe] */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+                       /* r1 = 0 or 0xff'ffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* r1 = 0 or 0xffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 2147483646",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset 1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset -1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test4",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_1, 1000000),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 1000000000000",
+               .result = REJECT
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(1),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
        {
                "variable-offset ctx access",
                .insns = {
@@ -6782,6 +7247,71 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_LWT_IN,
        },
+       {
+               "indirect variable-offset stack access",
+               .insns = {
+                       /* Fill the top 8 bytes of the stack */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       /* Get an unknown value */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+                       /* Make it small and 4-byte aligned */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+                       /* add it to fp.  We now have either fp-4 or fp-8, but
+                        * we don't know which
+                        */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+                       /* dereference it indirectly */
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 5 },
+               .errstr = "variable stack read R2",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_LWT_IN,
+       },
+       {
+               "direct stack access with 32-bit wraparound. test1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 2147483647",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 1073741823",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer offset 1073741822",
+               .result = REJECT
+       },
        {
                "liveness pruning and write screening",
                .insns = {
@@ -7103,6 +7633,19 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "pkt_end - pkt_start is allowed",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "XDP pkt read, pkt_end mangling, bad access 1",
                .insns = {
@@ -7118,7 +7661,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
@@ -7137,7 +7680,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
index 3cc0b561489ea2c1b54701f1aaaf580e79c2535b..e9626cf5607ad060b070680d25986a270c5cd59c 100644 (file)
@@ -3,6 +3,8 @@
 #include <stdio.h>
 #include <string.h>
 #include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 
 #include <linux/bpf.h>
 #include <linux/filter.h>
@@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
 
 int main(int argc, char **argv)
 {
+       struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
        char full_log[LOG_SIZE];
        char log[LOG_SIZE];
        size_t want_len;
        int i;
 
+       /* allow unlimited locked memory to have more consistent error code */
+       if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+               perror("Unable to lift memlock rlimit");
+
        memset(log, 1, LOG_SIZE);
 
        /* Test incorrect attr */
index e57b4ac40e72e0502dff75ea1d80c543280428eb..7177bea1fdfa62a1aa4e424d4dab665d8a9b7aaf 100644 (file)
@@ -1,3 +1,4 @@
 CONFIG_USER_NS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
+CONFIG_NUMA=y
diff --git a/tools/testing/selftests/x86/5lvl.c b/tools/testing/selftests/x86/5lvl.c
new file mode 100644 (file)
index 0000000..2eafdcd
--- /dev/null
@@ -0,0 +1,177 @@
+#include <stdio.h>
+#include <sys/mman.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define PAGE_SIZE      4096
+#define LOW_ADDR       ((void *) (1UL << 30))
+#define HIGH_ADDR      ((void *) (1UL << 50))
+
+struct testcase {
+       void *addr;
+       unsigned long size;
+       unsigned long flags;
+       const char *msg;
+       unsigned int low_addr_required:1;
+       unsigned int keep_mapped:1;
+};
+
+static struct testcase testcases[] = {
+       {
+               .addr = NULL,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(NULL)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = LOW_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(LOW_ADDR)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR) again",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1) again",
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE),
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)",
+               .low_addr_required = 1,
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE / 2),
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)",
+               .low_addr_required = 1,
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE),
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)",
+       },
+       {
+               .addr = NULL,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(NULL, MAP_HUGETLB)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = LOW_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1, MAP_HUGETLB)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1, MAP_HUGETLB) again",
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE),
+               .size = 4UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)",
+               .low_addr_required = 1,
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void *)((1UL << 47) - (2UL << 20)),
+               .size = 4UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)",
+       },
+};
+
+int main(int argc, char **argv)
+{
+       int i;
+       void *p;
+
+       for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+               struct testcase *t = testcases + i;
+
+               p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0);
+
+               printf("%s: %p - ", t->msg, p);
+
+               if (p == MAP_FAILED) {
+                       printf("FAILED\n");
+                       continue;
+               }
+
+               if (t->low_addr_required && p >= (void *)(1UL << 47))
+                       printf("FAILED\n");
+               else
+                       printf("OK\n");
+               if (!t->keep_mapped)
+                       munmap(p, t->size);
+       }
+       return 0;
+}
index 7b1adeee4b0f1956cc78d2e722bf17e5f716454c..939a337128dbf3fb08277995de98437b10b037b8 100644 (file)
@@ -11,7 +11,7 @@ TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_sysc
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
                        test_FCMOV test_FCOMI test_FISTTP \
                        vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
 TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
index 3f0093911f03d5e474d245c4b4e37064643e917b..d1b61ab870f8d9f4d78412b48322f5f58bd5cd76 100644 (file)
 struct mpx_bd_entry {
        union {
                char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
-               void *contents[1];
+               void *contents[0];
        };
 } __attribute__((packed));
 
 struct mpx_bt_entry {
        union {
                char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
-               unsigned long contents[1];
+               unsigned long contents[0];
        };
 } __attribute__((packed));
 
index 3818f25391c24c34beeb20c2e9f937bdfc9b111a..b3cb7670e02661cd2ab66fd3da98b3940dd44c70 100644 (file)
@@ -30,6 +30,7 @@ static inline void sigsafe_printf(const char *format, ...)
        if (!dprint_in_signal) {
                vprintf(format, ap);
        } else {
+               int ret;
                int len = vsnprintf(dprint_in_signal_buffer,
                                    DPRINT_IN_SIGNAL_BUF_SIZE,
                                    format, ap);
@@ -39,7 +40,9 @@ static inline void sigsafe_printf(const char *format, ...)
                 */
                if (len > DPRINT_IN_SIGNAL_BUF_SIZE)
                        len = DPRINT_IN_SIGNAL_BUF_SIZE;
-               write(1, dprint_in_signal_buffer, len);
+               ret = write(1, dprint_in_signal_buffer, len);
+               if (ret < 0)
+                       abort();
        }
        va_end(ap);
 }
index 7a1cc0e56d2d6a5006548f9f4488ae003db24911..bc1b0735bb50ed02963e834c7dc38395f7c6d834 100644 (file)
@@ -250,7 +250,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
        unsigned long ip;
        char *fpregs;
        u32 *pkru_ptr;
-       u64 si_pkey;
+       u64 siginfo_pkey;
        u32 *si_pkey_ptr;
        int pkru_offset;
        fpregset_t fpregset;
@@ -292,9 +292,9 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
        si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
        dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
        dump_mem(si_pkey_ptr - 8, 24);
-       si_pkey = *si_pkey_ptr;
-       pkey_assert(si_pkey < NR_PKEYS);
-       last_si_pkey = si_pkey;
+       siginfo_pkey = *si_pkey_ptr;
+       pkey_assert(siginfo_pkey < NR_PKEYS);
+       last_si_pkey = siginfo_pkey;
 
        if ((si->si_code == SEGV_MAPERR) ||
            (si->si_code == SEGV_ACCERR) ||
@@ -306,7 +306,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
        dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
        /* need __rdpkru() version so we do not do shadow_pkru checking */
        dprintf1("signal pkru from  pkru: %08x\n", __rdpkru());
-       dprintf1("si_pkey from siginfo: %jx\n", si_pkey);
+       dprintf1("pkey from siginfo: %jx\n", siginfo_pkey);
        *(u64 *)pkru_ptr = 0x00000000;
        dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
        pkru_faults++;
index 5727dfb15a83efecb4ef7dc951149183a38e5108..c9c81614a66ad6245d831066b204b72b1af8817f 100644 (file)
@@ -50,14 +50,14 @@ static int parse_status(const char *value)
 
        while (*c != '\0') {
                int port, status, speed, devid;
-               unsigned long socket;
+               int sockfd;
                char lbusid[SYSFS_BUS_ID_SIZE];
                struct usbip_imported_device *idev;
                char hub[3];
 
-               ret = sscanf(c, "%2s  %d %d %d %x %lx %31s\n",
+               ret = sscanf(c, "%2s  %d %d %d %x %u %31s\n",
                                hub, &port, &status, &speed,
-                               &devid, &socket, lbusid);
+                               &devid, &sockfd, lbusid);
 
                if (ret < 5) {
                        dbg("sscanf failed: %d", ret);
@@ -66,7 +66,7 @@ static int parse_status(const char *value)
 
                dbg("hub %s port %d status %d speed %d devid %x",
                                hub, port, status, speed, devid);
-               dbg("socket %lx lbusid %s", socket, lbusid);
+               dbg("sockfd %u lbusid %s", sockfd, lbusid);
 
                /* if a device is connected, look at it */
                idev = &vhci_driver->idev[port];
@@ -106,7 +106,7 @@ static int parse_status(const char *value)
        return 0;
 }
 
-#define MAX_STATUS_NAME 16
+#define MAX_STATUS_NAME 18
 
 static int refresh_imported_device_list(void)
 {
@@ -329,9 +329,17 @@ err:
 int usbip_vhci_get_free_port(uint32_t speed)
 {
        for (int i = 0; i < vhci_driver->nports; i++) {
-               if (speed == USB_SPEED_SUPER &&
-                   vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
-                       continue;
+
+               switch (speed) {
+               case    USB_SPEED_SUPER:
+                       if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
+                               continue;
+               break;
+               default:
+                       if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH)
+                               continue;
+               break;
+               }
 
                if (vhci_driver->idev[i].status == VDEV_ST_NULL)
                        return vhci_driver->idev[i].port;
index 38bb171acebade83314d9556baa7893c7d203911..e6e81305ef469559ffde4013535be10019c30da4 100644 (file)
 #define unlikely(x)    (__builtin_expect(!!(x), 0))
 #define likely(x)    (__builtin_expect(!!(x), 1))
 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
+#define SIZE_MAX        (~(size_t)0)
+
 typedef pthread_spinlock_t  spinlock_t;
 
 typedef int gfp_t;
-static void *kmalloc(unsigned size, gfp_t gfp)
-{
-       return memalign(64, size);
-}
+#define __GFP_ZERO 0x1
 
-static void *kzalloc(unsigned size, gfp_t gfp)
+static void *kmalloc(unsigned size, gfp_t gfp)
 {
        void *p = memalign(64, size);
        if (!p)
                return p;
-       memset(p, 0, size);
 
+       if (gfp & __GFP_ZERO)
+               memset(p, 0, size);
        return p;
 }
 
+static inline void *kzalloc(unsigned size, gfp_t flags)
+{
+       return kmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+       if (size != 0 && n > SIZE_MAX / size)
+               return NULL;
+       return kmalloc(n * size, flags);
+}
+
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+       return kmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
 static void kfree(void *p)
 {
        if (p)
index 35b039864b778c1f03df563dce35ca59da1d79b4..0cf28aa6f21c3b392b8a90b4aa9e9087f6807b3e 100644 (file)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 # Sergey Senozhatsky, 2015
 # sergey.senozhatsky.work@gmail.com
index 4db54ff08d9e92b907117ee34d1d302e18fd5dbb..cc29a814832837f5fb237dfdf74845a284e04367 100644 (file)
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
        struct arch_timer_context *vtimer;
+       u32 cnt_ctl;
 
-       if (!vcpu) {
-               pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
-               return IRQ_NONE;
-       }
-       vtimer = vcpu_vtimer(vcpu);
+       /*
+        * We may see a timer interrupt after vcpu_put() has been called which
+        * sets the CPU's vcpu pointer to NULL, because even though the timer
+        * has been disabled in vtimer_save_state(), the hardware interrupt
+        * signal may not have been retired from the interrupt controller yet.
+        */
+       if (!vcpu)
+               return IRQ_HANDLED;
 
+       vtimer = vcpu_vtimer(vcpu);
        if (!vtimer->irq.level) {
-               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-               if (kvm_timer_irq_can_fire(vtimer))
+               cnt_ctl = read_sysreg_el0(cntv_ctl);
+               cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+                          ARCH_TIMER_CTRL_IT_MASK;
+               if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
                        kvm_timer_update_irq(vcpu, true, vtimer);
        }
 
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
 
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
+       isb();
 
        vtimer->loaded = false;
 out:
@@ -479,9 +487,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
 
        vtimer_restore_state(vcpu);
 
-       if (has_vhe())
-               disable_el1_phys_timer_access();
-
        /* Set the background timer for the physical timer emulation. */
        phys_timer_emulate(vcpu);
 }
@@ -510,9 +515,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
        if (unlikely(!timer->enabled))
                return;
 
-       if (has_vhe())
-               enable_el1_phys_timer_access();
-
        vtimer_save_state(vcpu);
 
        /*
@@ -726,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
        return 0;
 }
 
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
 {
        struct arch_timer_kvm_info *info;
        int err;
@@ -762,10 +764,13 @@ int kvm_timer_hyp_init(void)
                return err;
        }
 
-       err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
-       if (err) {
-               kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
-               goto out_free_irq;
+       if (has_gic) {
+               err = irq_set_vcpu_affinity(host_vtimer_irq,
+                                           kvm_get_running_vcpus());
+               if (err) {
+                       kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+                       goto out_free_irq;
+               }
        }
 
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -817,9 +822,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-       struct irq_desc *desc;
-       struct irq_data *data;
-       int phys_irq;
        int ret;
 
        if (timer->enabled)
@@ -837,33 +839,14 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
                return -EINVAL;
        }
 
-       /*
-        * Find the physical IRQ number corresponding to the host_vtimer_irq
-        */
-       desc = irq_to_desc(host_vtimer_irq);
-       if (!desc) {
-               kvm_err("%s: no interrupt descriptor\n", __func__);
-               return -EINVAL;
-       }
-
-       data = irq_desc_get_irq_data(desc);
-       while (data->parent_data)
-               data = data->parent_data;
-
-       phys_irq = data->hwirq;
-
-       /*
-        * Tell the VGIC that the virtual interrupt is tied to a
-        * physical interrupt. We do that once per VCPU.
-        */
-       ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
+       ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq);
        if (ret)
                return ret;
 
 no_vgic:
        preempt_disable();
        timer->enabled = 1;
-       kvm_timer_vcpu_load_vgic(vcpu);
+       kvm_timer_vcpu_load(vcpu);
        preempt_enable();
 
        return 0;
index 772bf74ac2e9ae8380e0ba2b87b385883eca6d4c..2e43f9d42bd5db2a07438bb98f5e029c6246adb4 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/mman.h>
 #include <linux/sched.h>
 #include <linux/kvm.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
 #include <trace/events/kvm.h>
 #include <kvm/arm_pmu.h>
 
@@ -175,6 +177,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        int i;
 
+       kvm_vgic_destroy(kvm);
+
        free_percpu(kvm->arch.last_vcpu_ran);
        kvm->arch.last_vcpu_ran = NULL;
 
@@ -184,8 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                        kvm->vcpus[i] = NULL;
                }
        }
-
-       kvm_vgic_destroy(kvm);
+       atomic_set(&kvm->online_vcpus, 0);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -294,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
        kvm_mmu_free_memory_caches(vcpu);
        kvm_timer_vcpu_terminate(vcpu);
-       kvm_vgic_vcpu_destroy(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -313,11 +315,13 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
        kvm_timer_schedule(vcpu);
+       kvm_vgic_v4_enable_doorbell(vcpu);
 }
 
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 {
        kvm_timer_unschedule(vcpu);
+       kvm_vgic_v4_disable_doorbell(vcpu);
 }
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -611,7 +615,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int ret;
-       sigset_t sigsaved;
 
        if (unlikely(!kvm_vcpu_initialized(vcpu)))
                return -ENOEXEC;
@@ -624,13 +627,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                ret = kvm_handle_mmio_return(vcpu, vcpu->run);
                if (ret)
                        return ret;
+               if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
+                       return 0;
+
        }
 
        if (run->immediate_exit)
                return -EINTR;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        ret = 1;
        run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -765,8 +770,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                kvm_pmu_update_run(vcpu);
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
+
        return ret;
 }
 
@@ -1321,7 +1326,7 @@ static int init_subsystems(void)
        /*
         * Init HYP architected timer support
         */
-       err = kvm_timer_hyp_init();
+       err = kvm_timer_hyp_init(vgic_present);
        if (err)
                goto out;
 
@@ -1450,6 +1455,46 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
        return NULL;
 }
 
+bool kvm_arch_has_irq_bypass(void)
+{
+       return true;
+}
+
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
+                                         &irqfd->irq_entry);
+}
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
+                                    &irqfd->irq_entry);
+}
+
+void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       kvm_arm_halt_guest(irqfd->kvm);
+}
+
+void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       kvm_arm_resume_guest(irqfd->kvm);
+}
+
 /**
  * Initialize Hyp-mode and memory mappings on all CPUs.
  */
@@ -1460,7 +1505,7 @@ int kvm_arch_init(void *opaque)
        bool in_hyp_mode;
 
        if (!is_hyp_mode_available()) {
-               kvm_err("HYP mode not available\n");
+               kvm_info("HYP mode not available\n");
                return -ENODEV;
        }
 
index f39861639f08f81a305fff841f0f08dcf0878686..f24404b3c8dff8016c358c9339655465a0468ee7 100644 (file)
@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
        write_sysreg(cntvoff, cntvoff_el2);
 }
 
-void __hyp_text enable_el1_phys_timer_access(void)
-{
-       u64 val;
-
-       /* Allow physical timer/counter access for the host */
-       val = read_sysreg(cnthctl_el2);
-       val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
-       write_sysreg(val, cnthctl_el2);
-}
-
-void __hyp_text disable_el1_phys_timer_access(void)
-{
-       u64 val;
-
-       /*
-        * Disallow physical timer access for the guest
-        * Physical counter access is allowed
-        */
-       val = read_sysreg(cnthctl_el2);
-       val &= ~CNTHCTL_EL1PCEN;
-       val |= CNTHCTL_EL1PCTEN;
-       write_sysreg(val, cnthctl_el2);
-}
-
 void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
 {
        /*
         * We don't need to do this for VHE since the host kernel runs in EL2
         * with HCR_EL2.TGE ==1, which makes those bits have no impact.
         */
-       if (!has_vhe())
-               enable_el1_phys_timer_access();
+       if (!has_vhe()) {
+               u64 val;
+
+               /* Allow physical timer/counter access for the host */
+               val = read_sysreg(cnthctl_el2);
+               val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 }
 
 void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
 {
-       if (!has_vhe())
-               disable_el1_phys_timer_access();
+       if (!has_vhe()) {
+               u64 val;
+
+               /*
+                * Disallow physical timer access for the guest
+                * Physical counter access is allowed
+                */
+               val = read_sysreg(cnthctl_el2);
+               val &= ~CNTHCTL_EL1PCEN;
+               val |= CNTHCTL_EL1PCTEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 }
index a3f18d3623661ae6204750fa9aa8639a5fde7617..d7fd46fe9efb35ca28a0685b333f8c68a61b2d64 100644 (file)
@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
        else
                elrsr1 = 0;
 
-#ifdef CONFIG_CPU_BIG_ENDIAN
-       cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
-#else
        cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
-#endif
 }
 
 static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
index 91728faa13fdc8650b3bbdd54b00ac42be3dc01f..f5c3d6d7019ea63a7d2376c68f6c392cd2036aaf 100644 (file)
@@ -258,7 +258,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
                        cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
                }
        } else {
-               if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+               if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+                   cpu_if->its_vpe.its_vm)
                        write_gicreg(0, ICH_HCR_EL2);
 
                cpu_if->vgic_elrsr = 0xffff;
@@ -337,9 +338,11 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
                /*
                 * If we need to trap system registers, we must write
                 * ICH_HCR_EL2 anyway, even if no interrupts are being
-                * injected,
+                * injected. Same thing if GICv4 is used, as VLPI
+                * delivery is gated by ICH_HCR_EL2.En.
                 */
-               if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+               if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+                   cpu_if->its_vpe.its_vm)
                        write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
        }
 
index b6e715fd3c90af8c74408b72652f9974a3fb894d..dac7ceb1a677746cadb086a2cf8a07d8e560373c 100644 (file)
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
                }
 
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-                              data);
+                              &data);
                data = vcpu_data_host_to_guest(vcpu, data, len);
                vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
        }
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
                data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
                                               len);
 
-               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
                kvm_mmio_write_buf(data_buf, len, data);
 
                ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                       data_buf);
        } else {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-                              fault_ipa, 0);
+                              fault_ipa, NULL);
 
                ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                      data_buf);
index b36945d49986dd5c0f097f16837d72d81f655308..b4b69c2d10120237e12bc6524243071bf645f1d9 100644 (file)
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  */
 void free_hyp_pgds(void)
 {
-       unsigned long addr;
-
        mutex_lock(&kvm_hyp_pgd_mutex);
 
        if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
 
        if (hyp_pgd) {
                unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-               for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+                               (uintptr_t)high_memory - PAGE_OFFSET);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+                               VMALLOC_END - VMALLOC_START);
 
                free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
                hyp_pgd = NULL;
index 5801261f3adddeaab819f4ffd23ecd5839d03c8a..62310122ee78eff828eb7f4ae6f8cfdb4ed18f0c 100644 (file)
@@ -285,6 +285,10 @@ int vgic_init(struct kvm *kvm)
        if (ret)
                goto out;
 
+       ret = vgic_v4_init(kvm);
+       if (ret)
+               goto out;
+
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_vgic_vcpu_enable(vcpu);
 
@@ -320,6 +324,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
 
        kfree(dist->spis);
        dist->nr_spis = 0;
+
+       if (vgic_supports_direct_msis(kvm))
+               vgic_v4_teardown(kvm);
 }
 
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
index b7baf581611ae8730bb480d6e465607eb3cb3d2a..99e026d2dade9bf5dc7e1506bca391b2ac631d41 100644 (file)
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
        u32 nr = dist->nr_spis;
        int i, ret;
 
-       entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
-                         GFP_KERNEL);
+       entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
        if (!entries)
                return -ENOMEM;
 
index d2a99ab0ade7a2a83a36466d3e76bff88b1e48a5..8e633bd9cc1e74706e0490419f27a20e6fa8e0b7 100644 (file)
@@ -38,7 +38,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its);
 static int vgic_its_restore_tables_v0(struct vgic_its *its);
 static int vgic_its_commit_v0(struct vgic_its *its);
 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
-                            struct kvm_vcpu *filter_vcpu);
+                            struct kvm_vcpu *filter_vcpu, bool needs_inv);
 
 /*
  * Creates a new (reference to a) struct vgic_irq for a given LPI.
@@ -106,7 +106,7 @@ out_unlock:
         * However we only have those structs for mapped IRQs, so we read in
         * the respective config data from memory here upon mapping the LPI.
         */
-       ret = update_lpi_config(kvm, irq, NULL);
+       ret = update_lpi_config(kvm, irq, NULL, false);
        if (ret)
                return ERR_PTR(ret);
 
@@ -273,7 +273,7 @@ static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
  * VCPU. Unconditionally applies if filter_vcpu is NULL.
  */
 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
-                            struct kvm_vcpu *filter_vcpu)
+                            struct kvm_vcpu *filter_vcpu, bool needs_inv)
 {
        u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
        u8 prop;
@@ -292,11 +292,17 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
                irq->priority = LPI_PROP_PRIORITY(prop);
                irq->enabled = LPI_PROP_ENABLE_BIT(prop);
 
-               vgic_queue_irq_unlock(kvm, irq, flags);
-       } else {
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               if (!irq->hw) {
+                       vgic_queue_irq_unlock(kvm, irq, flags);
+                       return 0;
+               }
        }
 
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+       if (irq->hw)
+               return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
+
        return 0;
 }
 
@@ -336,6 +342,29 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
        return i;
 }
 
+static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
+{
+       int ret = 0;
+
+       spin_lock(&irq->irq_lock);
+       irq->target_vcpu = vcpu;
+       spin_unlock(&irq->irq_lock);
+
+       if (irq->hw) {
+               struct its_vlpi_map map;
+
+               ret = its_get_vlpi(irq->host_irq, &map);
+               if (ret)
+                       return ret;
+
+               map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+               ret = its_map_vlpi(irq->host_irq, &map);
+       }
+
+       return ret;
+}
+
 /*
  * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
  * is targeting) to the VGIC's view, which deals with target VCPUs.
@@ -350,10 +379,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
                return;
 
        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
-
-       spin_lock(&ite->irq->irq_lock);
-       ite->irq->target_vcpu = vcpu;
-       spin_unlock(&ite->irq->irq_lock);
+       update_affinity(ite->irq, vcpu);
 }
 
 /*
@@ -395,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
        u32 *intids;
        int nr_irqs, i;
        unsigned long flags;
+       u8 pendmask;
 
        nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
        if (nr_irqs < 0)
@@ -402,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 
        for (i = 0; i < nr_irqs; i++) {
                int byte_offset, bit_nr;
-               u8 pendmask;
 
                byte_offset = intids[i] / BITS_PER_BYTE;
                bit_nr = intids[i] % BITS_PER_BYTE;
@@ -505,19 +531,11 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
        return 0;
 }
 
-/*
- * Find the target VCPU and the LPI number for a given devid/eventid pair
- * and make this IRQ pending, possibly injecting it.
- * Must be called with the its_lock mutex held.
- * Returns 0 on success, a positive error value for any ITS mapping
- * related errors and negative error values for generic errors.
- */
-static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
-                               u32 devid, u32 eventid)
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+                        u32 devid, u32 eventid, struct vgic_irq **irq)
 {
        struct kvm_vcpu *vcpu;
        struct its_ite *ite;
-       unsigned long flags;
 
        if (!its->enabled)
                return -EBUSY;
@@ -533,26 +551,65 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
        if (!vcpu->arch.vgic_cpu.lpis_enabled)
                return -EBUSY;
 
-       spin_lock_irqsave(&ite->irq->irq_lock, flags);
-       ite->irq->pending_latch = true;
-       vgic_queue_irq_unlock(kvm, ite->irq, flags);
-
+       *irq = ite->irq;
        return 0;
 }
 
-static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
 {
+       u64 address;
+       struct kvm_io_device *kvm_io_dev;
        struct vgic_io_device *iodev;
 
-       if (dev->ops != &kvm_io_gic_ops)
-               return NULL;
+       if (!vgic_has_its(kvm))
+               return ERR_PTR(-ENODEV);
+
+       if (!(msi->flags & KVM_MSI_VALID_DEVID))
+               return ERR_PTR(-EINVAL);
 
-       iodev = container_of(dev, struct vgic_io_device, dev);
+       address = (u64)msi->address_hi << 32 | msi->address_lo;
+
+       kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
+       if (!kvm_io_dev)
+               return ERR_PTR(-EINVAL);
 
+       if (kvm_io_dev->ops != &kvm_io_gic_ops)
+               return ERR_PTR(-EINVAL);
+
+       iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
        if (iodev->iodev_type != IODEV_ITS)
-               return NULL;
+               return ERR_PTR(-EINVAL);
+
+       return iodev->its;
+}
+
+/*
+ * Find the target VCPU and the LPI number for a given devid/eventid pair
+ * and make this IRQ pending, possibly injecting it.
+ * Must be called with the its_lock mutex held.
+ * Returns 0 on success, a positive error value for any ITS mapping
+ * related errors and negative error values for generic errors.
+ */
+static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+                               u32 devid, u32 eventid)
+{
+       struct vgic_irq *irq = NULL;
+       unsigned long flags;
+       int err;
+
+       err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
+       if (err)
+               return err;
+
+       if (irq->hw)
+               return irq_set_irqchip_state(irq->host_irq,
+                                            IRQCHIP_STATE_PENDING, true);
+
+       spin_lock_irqsave(&irq->irq_lock, flags);
+       irq->pending_latch = true;
+       vgic_queue_irq_unlock(kvm, irq, flags);
 
-       return iodev;
+       return 0;
 }
 
 /*
@@ -563,30 +620,16 @@ static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
  */
 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 {
-       u64 address;
-       struct kvm_io_device *kvm_io_dev;
-       struct vgic_io_device *iodev;
+       struct vgic_its *its;
        int ret;
 
-       if (!vgic_has_its(kvm))
-               return -ENODEV;
-
-       if (!(msi->flags & KVM_MSI_VALID_DEVID))
-               return -EINVAL;
-
-       address = (u64)msi->address_hi << 32 | msi->address_lo;
-
-       kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
-       if (!kvm_io_dev)
-               return -EINVAL;
-
-       iodev = vgic_get_its_iodev(kvm_io_dev);
-       if (!iodev)
-               return -EINVAL;
+       its = vgic_msi_to_its(kvm, msi);
+       if (IS_ERR(its))
+               return PTR_ERR(its);
 
-       mutex_lock(&iodev->its->its_lock);
-       ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
-       mutex_unlock(&iodev->its->its_lock);
+       mutex_lock(&its->its_lock);
+       ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
+       mutex_unlock(&its->its_lock);
 
        if (ret < 0)
                return ret;
@@ -608,8 +651,12 @@ static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
        list_del(&ite->ite_list);
 
        /* This put matches the get in vgic_add_lpi. */
-       if (ite->irq)
+       if (ite->irq) {
+               if (ite->irq->hw)
+                       WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+
                vgic_put_irq(kvm, ite->irq);
+       }
 
        kfree(ite);
 }
@@ -683,11 +730,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
        ite->collection = collection;
        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
 
-       spin_lock(&ite->irq->irq_lock);
-       ite->irq->target_vcpu = vcpu;
-       spin_unlock(&ite->irq->irq_lock);
-
-       return 0;
+       return update_affinity(ite->irq, vcpu);
 }
 
 /*
@@ -778,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
                return E_ITS_MAPC_COLLECTION_OOR;
 
        collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+       if (!collection)
+               return -ENOMEM;
 
        collection->collection_id = coll_id;
        collection->target_addr = COLLECTION_NOT_MAPPED;
@@ -1054,6 +1099,10 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
 
        ite->irq->pending_latch = false;
 
+       if (ite->irq->hw)
+               return irq_set_irqchip_state(ite->irq->host_irq,
+                                            IRQCHIP_STATE_PENDING, false);
+
        return 0;
 }
 
@@ -1073,7 +1122,7 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
        if (!ite)
                return E_ITS_INV_UNMAPPED_INTERRUPT;
 
-       return update_lpi_config(kvm, ite->irq, NULL);
+       return update_lpi_config(kvm, ite->irq, NULL, true);
 }
 
 /*
@@ -1108,12 +1157,15 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
                irq = vgic_get_irq(kvm, NULL, intids[i]);
                if (!irq)
                        continue;
-               update_lpi_config(kvm, irq, vcpu);
+               update_lpi_config(kvm, irq, vcpu, false);
                vgic_put_irq(kvm, irq);
        }
 
        kfree(intids);
 
+       if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
+               its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
+
        return 0;
 }
 
@@ -1128,11 +1180,12 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
                                      u64 *its_cmd)
 {
-       struct vgic_dist *dist = &kvm->arch.vgic;
        u32 target1_addr = its_cmd_get_target_addr(its_cmd);
        u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
        struct kvm_vcpu *vcpu1, *vcpu2;
        struct vgic_irq *irq;
+       u32 *intids;
+       int irq_count, i;
 
        if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
            target2_addr >= atomic_read(&kvm->online_vcpus))
@@ -1144,19 +1197,19 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
        vcpu1 = kvm_get_vcpu(kvm, target1_addr);
        vcpu2 = kvm_get_vcpu(kvm, target2_addr);
 
-       spin_lock(&dist->lpi_list_lock);
+       irq_count = vgic_copy_lpi_list(vcpu1, &intids);
+       if (irq_count < 0)
+               return irq_count;
 
-       list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
-               spin_lock(&irq->irq_lock);
+       for (i = 0; i < irq_count; i++) {
+               irq = vgic_get_irq(kvm, NULL, intids[i]);
 
-               if (irq->target_vcpu == vcpu1)
-                       irq->target_vcpu = vcpu2;
+               update_affinity(irq, vcpu2);
 
-               spin_unlock(&irq->irq_lock);
+               vgic_put_irq(kvm, irq);
        }
 
-       spin_unlock(&dist->lpi_list_lock);
-
+       kfree(intids);
        return 0;
 }
 
@@ -1634,6 +1687,14 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
        if (!its)
                return -ENOMEM;
 
+       if (vgic_initialized(dev->kvm)) {
+               int ret = vgic_v4_init(dev->kvm);
+               if (ret < 0) {
+                       kfree(its);
+                       return ret;
+               }
+       }
+
        mutex_init(&its->its_lock);
        mutex_init(&its->cmd_lock);
 
@@ -1946,6 +2007,15 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
        list_for_each_entry(ite, &device->itt_head, ite_list) {
                gpa_t gpa = base + ite->event_id * ite_esz;
 
+               /*
+                * If an LPI carries the HW bit, this means that this
+                * interrupt is controlled by GICv4, and we do not
+                * have direct access to that state. Let's simply fail
+                * the save operation...
+                */
+               if (ite->irq->hw)
+                       return -EACCES;
+
                ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
                if (ret)
                        return ret;
index 83786108829e38feb57267648c88acb04173f8ca..671fe81f8e1de991e1636a9901012b0a203618a4 100644 (file)
@@ -54,6 +54,11 @@ bool vgic_has_its(struct kvm *kvm)
        return dist->has_its;
 }
 
+bool vgic_supports_direct_msis(struct kvm *kvm)
+{
+       return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
+}
+
 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
                                            gpa_t addr, unsigned int len)
 {
index 863351c090d8f2129ec8224507fd75bff15d38d2..f47e8481fa452d2b67aaca6fc6985a4fa05b4e77 100644 (file)
@@ -24,6 +24,7 @@
 static bool group0_trap;
 static bool group1_trap;
 static bool common_trap;
+static bool gicv4_enable;
 
 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
 {
@@ -326,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
        int last_byte_offset = -1;
        struct vgic_irq *irq;
        int ret;
+       u8 val;
 
        list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
                int byte_offset, bit_nr;
                struct kvm_vcpu *vcpu;
                gpa_t pendbase, ptr;
                bool stored;
-               u8 val;
 
                vcpu = irq->target_vcpu;
                if (!vcpu)
@@ -461,6 +462,12 @@ static int __init early_common_trap_cfg(char *buf)
 }
 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
 
+static int __init early_gicv4_enable(char *buf)
+{
+       return strtobool(buf, &gicv4_enable);
+}
+early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
+
 /**
  * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
  * @node:      pointer to the DT node
@@ -480,6 +487,13 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
        kvm_vgic_global_state.can_emulate_gicv2 = false;
        kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
 
+       /* GICv4 support? */
+       if (info->has_v4) {
+               kvm_vgic_global_state.has_gicv4 = gicv4_enable;
+               kvm_info("GICv4 support %sabled\n",
+                        gicv4_enable ? "en" : "dis");
+       }
+
        if (!info->vcpu.start) {
                kvm_info("GICv3: no GICV resource entry\n");
                kvm_vgic_global_state.vcpu_base = 0;
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
new file mode 100644 (file)
index 0000000..4a37292
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2017 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kvm_host.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include "vgic.h"
+
+/*
+ * How KVM uses GICv4 (insert rude comments here):
+ *
+ * The vgic-v4 layer acts as a bridge between several entities:
+ * - The GICv4 ITS representation offered by the ITS driver
+ * - VFIO, which is in charge of the PCI endpoint
+ * - The virtual ITS, which is the only thing the guest sees
+ *
+ * The configuration of VLPIs is triggered by a callback from VFIO,
+ * instructing KVM that a PCI device has been configured to deliver
+ * MSIs to a vITS.
+ *
+ * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
+ * and this is used to find the corresponding vITS data structures
+ * (ITS instance, device, event and irq) using a process that is
+ * extremely similar to the injection of an MSI.
+ *
+ * At this stage, we can link the guest's view of an LPI (uniquely
+ * identified by the routing entry) and the host irq, using the GICv4
+ * driver mapping operation. Should the mapping succeed, we've then
+ * successfully upgraded the guest's LPI to a VLPI. We can then start
+ * with updating GICv4's view of the property table and generating an
+ * INValidation in order to kickstart the delivery of this VLPI to the
+ * guest directly, without software intervention. Well, almost.
+ *
+ * When the PCI endpoint is deconfigured, this operation is reversed
+ * with VFIO calling kvm_vgic_v4_unset_forwarding().
+ *
+ * Once the VLPI has been mapped, it needs to follow any change the
+ * guest performs on its LPI through the vITS. For that, a number of
+ * command handlers have hooks to communicate these changes to the HW:
+ * - Any invalidation triggers a call to its_prop_update_vlpi()
+ * - The INT command results in a irq_set_irqchip_state(), which
+ *   generates an INT on the corresponding VLPI.
+ * - The CLEAR command results in a irq_set_irqchip_state(), which
+ *   generates an CLEAR on the corresponding VLPI.
+ * - DISCARD translates into an unmap, similar to a call to
+ *   kvm_vgic_v4_unset_forwarding().
+ * - MOVI is translated by an update of the existing mapping, changing
+ *   the target vcpu, resulting in a VMOVI being generated.
+ * - MOVALL is translated by a string of mapping updates (similar to
+ *   the handling of MOVI). MOVALL is horrible.
+ *
+ * Note that a DISCARD/MAPTI sequence emitted from the guest without
+ * reprogramming the PCI endpoint after MAPTI does not result in a
+ * VLPI being mapped, as there is no callback from VFIO (the guest
+ * will get the interrupt via the normal SW injection). Fixing this is
+ * not trivial, and requires some horrible messing with the VFIO
+ * internals. Not fun. Don't do that.
+ *
+ * Then there is the scheduling. Each time a vcpu is about to run on a
+ * physical CPU, KVM must tell the corresponding redistributor about
+ * it. And if we've migrated our vcpu from one CPU to another, we must
+ * tell the ITS (so that the messages reach the right redistributor).
+ * This is done in two steps: first issue a irq_set_affinity() on the
+ * irq corresponding to the vcpu, then call its_schedule_vpe(). You
+ * must be in a non-preemptible context. On exit, another call to
+ * its_schedule_vpe() tells the redistributor that we're done with the
+ * vcpu.
+ *
+ * Finally, the doorbell handling: Each vcpu is allocated an interrupt
+ * which will fire each time a VLPI is made pending whilst the vcpu is
+ * not running. Each time the vcpu gets blocked, the doorbell
+ * interrupt gets enabled. When the vcpu is unblocked (for whatever
+ * reason), the doorbell interrupt is disabled.
+ */
+
+#define DB_IRQ_FLAGS   (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
+
+static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
+{
+       struct kvm_vcpu *vcpu = info;
+
+       vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+       kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+       kvm_vcpu_kick(vcpu);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * vgic_v4_init - Initialize the GICv4 data structures
+ * @kvm:       Pointer to the VM being initialized
+ *
+ * We may be called each time a vITS is created, or when the
+ * vgic is initialized. This relies on kvm->lock to be
+ * held. In both cases, the number of vcpus should now be
+ * fixed.
+ */
+int vgic_v4_init(struct kvm *kvm)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct kvm_vcpu *vcpu;
+       int i, nr_vcpus, ret;
+
+       if (!vgic_supports_direct_msis(kvm))
+               return 0; /* Nothing to see here... move along. */
+
+       if (dist->its_vm.vpes)
+               return 0;
+
+       nr_vcpus = atomic_read(&kvm->online_vcpus);
+
+       dist->its_vm.vpes = kzalloc(sizeof(*dist->its_vm.vpes) * nr_vcpus,
+                                   GFP_KERNEL);
+       if (!dist->its_vm.vpes)
+               return -ENOMEM;
+
+       dist->its_vm.nr_vpes = nr_vcpus;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+       ret = its_alloc_vcpu_irqs(&dist->its_vm);
+       if (ret < 0) {
+               kvm_err("VPE IRQ allocation failure\n");
+               kfree(dist->its_vm.vpes);
+               dist->its_vm.nr_vpes = 0;
+               dist->its_vm.vpes = NULL;
+               return ret;
+       }
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               int irq = dist->its_vm.vpes[i]->irq;
+
+               /*
+                * Don't automatically enable the doorbell, as we're
+                * flipping it back and forth when the vcpu gets
+                * blocked. Also disable the lazy disabling, as the
+                * doorbell could kick us out of the guest too
+                * early...
+                */
+               irq_set_status_flags(irq, DB_IRQ_FLAGS);
+               ret = request_irq(irq, vgic_v4_doorbell_handler,
+                                 0, "vcpu", vcpu);
+               if (ret) {
+                       kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+                       /*
+                        * Trick: adjust the number of vpes so we know
+                        * how many to nuke on teardown...
+                        */
+                       dist->its_vm.nr_vpes = i;
+                       break;
+               }
+       }
+
+       if (ret)
+               vgic_v4_teardown(kvm);
+
+       return ret;
+}
+
+/**
+ * vgic_v4_teardown - Free the GICv4 data structures
+ * @kvm:       Pointer to the VM being destroyed
+ *
+ * Relies on kvm->lock to be held.
+ */
+void vgic_v4_teardown(struct kvm *kvm)
+{
+       struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
+       int i;
+
+       if (!its_vm->vpes)
+               return;
+
+       for (i = 0; i < its_vm->nr_vpes; i++) {
+               struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
+               int irq = its_vm->vpes[i]->irq;
+
+               irq_clear_status_flags(irq, DB_IRQ_FLAGS);
+               free_irq(irq, vcpu);
+       }
+
+       its_free_vcpu_irqs(its_vm);
+       kfree(its_vm->vpes);
+       its_vm->nr_vpes = 0;
+       its_vm->vpes = NULL;
+}
+
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+       if (!vgic_supports_direct_msis(vcpu->kvm))
+               return 0;
+
+       return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
+}
+
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+       int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+       int err;
+
+       if (!vgic_supports_direct_msis(vcpu->kvm))
+               return 0;
+
+       /*
+        * Before making the VPE resident, make sure the redistributor
+        * corresponding to our current CPU expects us here. See the
+        * doc in drivers/irqchip/irq-gic-v4.c to understand how this
+        * turns into a VMOVP command at the ITS level.
+        */
+       err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
+       if (err)
+               return err;
+
+       err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
+       if (err)
+               return err;
+
+       /*
+        * Now that the VPE is resident, let's get rid of a potential
+        * doorbell interrupt that would still be pending.
+        */
+       err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
+
+       return err;
+}
+
+static struct vgic_its *vgic_get_its(struct kvm *kvm,
+                                    struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+       struct kvm_msi msi  = (struct kvm_msi) {
+               .address_lo     = irq_entry->msi.address_lo,
+               .address_hi     = irq_entry->msi.address_hi,
+               .data           = irq_entry->msi.data,
+               .flags          = irq_entry->msi.flags,
+               .devid          = irq_entry->msi.devid,
+       };
+
+       return vgic_msi_to_its(kvm, &msi);
+}
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
+                              struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+       struct vgic_its *its;
+       struct vgic_irq *irq;
+       struct its_vlpi_map map;
+       int ret;
+
+       if (!vgic_supports_direct_msis(kvm))
+               return 0;
+
+       /*
+        * Get the ITS, and escape early on error (not a valid
+        * doorbell for any of our vITSs).
+        */
+       its = vgic_get_its(kvm, irq_entry);
+       if (IS_ERR(its))
+               return 0;
+
+       mutex_lock(&its->its_lock);
+
+       /* Perform then actual DevID/EventID -> LPI translation. */
+       ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+                                  irq_entry->msi.data, &irq);
+       if (ret)
+               goto out;
+
+       /*
+        * Emit the mapping request. If it fails, the ITS probably
+        * isn't v4 compatible, so let's silently bail out. Holding
+        * the ITS lock should ensure that nothing can modify the
+        * target vcpu.
+        */
+       map = (struct its_vlpi_map) {
+               .vm             = &kvm->arch.vgic.its_vm,
+               .vpe            = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
+               .vintid         = irq->intid,
+               .properties     = ((irq->priority & 0xfc) |
+                                  (irq->enabled ? LPI_PROP_ENABLED : 0) |
+                                  LPI_PROP_GROUP1),
+               .db_enabled     = true,
+       };
+
+       ret = its_map_vlpi(virq, &map);
+       if (ret)
+               goto out;
+
+       irq->hw         = true;
+       irq->host_irq   = virq;
+
+out:
+       mutex_unlock(&its->its_lock);
+       return ret;
+}
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
+                                struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+       struct vgic_its *its;
+       struct vgic_irq *irq;
+       int ret;
+
+       if (!vgic_supports_direct_msis(kvm))
+               return 0;
+
+       /*
+        * Get the ITS, and escape early on error (not a valid
+        * doorbell for any of our vITSs).
+        */
+       its = vgic_get_its(kvm, irq_entry);
+       if (IS_ERR(its))
+               return 0;
+
+       mutex_lock(&its->its_lock);
+
+       ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+                                  irq_entry->msi.data, &irq);
+       if (ret)
+               goto out;
+
+       WARN_ON(!(irq->hw && irq->host_irq == virq));
+       if (irq->hw) {
+               irq->hw = false;
+               ret = its_unmap_vlpi(virq);
+       }
+
+out:
+       mutex_unlock(&its->its_lock);
+       return ret;
+}
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
+{
+       if (vgic_supports_direct_msis(vcpu->kvm)) {
+               int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+               if (irq)
+                       enable_irq(irq);
+       }
+}
+
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
+{
+       if (vgic_supports_direct_msis(vcpu->kvm)) {
+               int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+               if (irq)
+                       disable_irq(irq);
+       }
+}
index e54ef2fdf73dd391246c16474a3b3e652dc57300..ecb8e25f5fe56d69065757a80c44c2d4a532bbcb 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/list_sort.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include "vgic.h"
 
@@ -409,25 +411,56 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
        return 0;
 }
 
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
+/* @irq->irq_lock must be held */
+static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+                           unsigned int host_irq)
 {
-       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       struct irq_desc *desc;
+       struct irq_data *data;
+
+       /*
+        * Find the physical IRQ number corresponding to @host_irq
+        */
+       desc = irq_to_desc(host_irq);
+       if (!desc) {
+               kvm_err("%s: no interrupt descriptor\n", __func__);
+               return -EINVAL;
+       }
+       data = irq_desc_get_irq_data(desc);
+       while (data->parent_data)
+               data = data->parent_data;
+
+       irq->hw = true;
+       irq->host_irq = host_irq;
+       irq->hwintid = data->hwirq;
+       return 0;
+}
+
+/* @irq->irq_lock must be held */
+static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
+{
+       irq->hw = false;
+       irq->hwintid = 0;
+}
+
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+                         u32 vintid)
+{
+       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        unsigned long flags;
+       int ret;
 
        BUG_ON(!irq);
 
        spin_lock_irqsave(&irq->irq_lock, flags);
-
-       irq->hw = true;
-       irq->hwintid = phys_irq;
-
+       ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
        spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
-       return 0;
+       return ret;
 }
 
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
 {
        struct vgic_irq *irq;
        unsigned long flags;
@@ -435,14 +468,11 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
        if (!vgic_initialized(vcpu->kvm))
                return -EAGAIN;
 
-       irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        BUG_ON(!irq);
 
        spin_lock_irqsave(&irq->irq_lock, flags);
-
-       irq->hw = false;
-       irq->hwintid = 0;
-
+       kvm_vgic_unmap_irq(irq);
        spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
@@ -462,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
 {
        struct vgic_irq *irq;
+       unsigned long flags;
        int ret = 0;
 
        if (!vgic_initialized(vcpu->kvm))
@@ -472,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
                return -EINVAL;
 
        irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
        if (irq->owner && irq->owner != owner)
                ret = -EEXIST;
        else
                irq->owner = owner;
-       spin_unlock(&irq->irq_lock);
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        return ret;
 }
@@ -688,6 +719,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
+       WARN_ON(vgic_v4_sync_hwstate(vcpu));
+
        /* An empty ap_list_head implies used_lrs == 0 */
        if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
                return;
@@ -700,6 +733,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 /* Flush our emulation state into the GIC hardware before entering the guest. */
 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 {
+       WARN_ON(vgic_v4_flush_hwstate(vcpu));
+
        /*
         * If there are no virtual interrupts active or pending for this
         * VCPU, then there is no work to do and we can bail out without
@@ -751,6 +786,9 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
        if (!vcpu->kvm->arch.vgic.enabled)
                return false;
 
+       if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
+               return true;
+
        spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
@@ -784,15 +822,16 @@ void vgic_kick_vcpus(struct kvm *kvm)
        }
 }
 
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
 {
-       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       struct vgic_irq *irq;
        bool map_is_active;
        unsigned long flags;
 
        if (!vgic_initialized(vcpu->kvm))
                return false;
 
+       irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        spin_lock_irqsave(&irq->irq_lock, flags);
        map_is_active = irq->hw && irq->active;
        spin_unlock_irqrestore(&irq->irq_lock, flags);
index 4f8aecb07ae6fbf109a7a049939426c980104a4f..efbcf8f96f9c1a1bec87ce874103027c10de47ac 100644 (file)
@@ -237,4 +237,14 @@ static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
        }
 }
 
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+                        u32 devid, u32 eventid, struct vgic_irq **irq);
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
+
+bool vgic_supports_direct_msis(struct kvm *kvm);
+int vgic_v4_init(struct kvm *kvm);
+void vgic_v4_teardown(struct kvm *kvm);
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
+
 #endif
index f169ecc4f2e87f44ece32540b8428529aa01ae84..210bf820385a70967c729259bf762b0d9be98b73 100644 (file)
@@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
 static unsigned long long kvm_createvm_count;
 static unsigned long long kvm_active_vms;
 
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end)
+{
+}
+
 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
 {
        if (pfn_valid(pfn))
@@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                kvm_flush_remote_tlbs(kvm);
 
        spin_unlock(&kvm->mmu_lock);
+
+       kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
+
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -2065,6 +2073,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       /*
+        * This does a lockless modification of ->real_blocked, which is fine
+        * because, only current can change ->real_blocked and all readers of
+        * ->real_blocked don't care as long ->real_blocked is always a subset
+        * of ->blocked.
+        */
+       sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
+}
+
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
+       sigemptyset(&current->real_blocked);
+}
+
 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
        unsigned int old, val, grow;