Merge remote-tracking branches 'asoc/fix/topology', 'asoc/fix/adau17x1', 'asoc/fix...
authorMark Brown <broonie@kernel.org>
Wed, 1 Nov 2017 13:36:16 +0000 (13:36 +0000)
committerMark Brown <broonie@kernel.org>
Wed, 1 Nov 2017 13:36:16 +0000 (13:36 +0000)
1535 files changed:
.mailmap
Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
Documentation/ABI/testing/sysfs-devices-power
Documentation/ABI/testing/sysfs-kernel-mm-swap
Documentation/ABI/testing/sysfs-power
Documentation/core-api/kernel-api.rst
Documentation/core-api/workqueue.rst
Documentation/cpu-freq/index.txt
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt
Documentation/devicetree/bindings/iio/proximity/as3935.txt
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
Documentation/devicetree/bindings/leds/ams,as3645a.txt
Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
Documentation/devicetree/bindings/net/marvell-pp2.txt
Documentation/devicetree/bindings/net/rockchip-dwmac.txt
Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt [new file with mode: 0644]
Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/driver-api/pm/devices.rst
Documentation/driver-model/driver.txt
Documentation/filesystems/cifs/AUTHORS
Documentation/filesystems/cifs/README
Documentation/filesystems/cifs/TODO
Documentation/filesystems/cifs/cifs.txt
Documentation/filesystems/overlayfs.txt
Documentation/filesystems/sysfs.txt
Documentation/i2c/busses/i2c-i801
Documentation/kbuild/makefiles.txt
Documentation/networking/bonding.txt
Documentation/networking/filter.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/switchdev.txt
Documentation/process/index.rst
Documentation/process/kernel-enforcement-statement.rst [new file with mode: 0644]
Documentation/sysctl/kernel.txt
Documentation/userspace-api/seccomp_filter.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/mmu_context.h
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_dp264.c
arch/alpha/kernel/sys_eb64p.c
arch/alpha/kernel/sys_eiger.c
arch/alpha/kernel/sys_miata.c
arch/alpha/kernel/sys_mikasa.c
arch/alpha/kernel/sys_nautilus.c
arch/alpha/kernel/sys_noritake.c
arch/alpha/kernel/sys_rawhide.c
arch/alpha/kernel/sys_ruffian.c
arch/alpha/kernel/sys_rx164.c
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_sio.c
arch/alpha/kernel/sys_sx164.c
arch/alpha/kernel/sys_takara.c
arch/alpha/kernel/sys_wildfire.c
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/processor.h
arch/arc/kernel/setup.c
arch/arc/kernel/smp.c
arch/arc/plat-axs10x/axs10x.c
arch/arc/plat-hsdk/Kconfig
arch/arc/plat-hsdk/platform.c
arch/arm/Makefile
arch/arm/boot/compressed/debug.S
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/at91-sama5d27_som1.dtsi
arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
arch/arm/boot/dts/at91-sama5d2_xplained.dts
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/bcm2837-rpi-3-b.dts
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/da850-evm.dts
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/gemini.dtsi
arch/arm/boot/dts/imx7d.dtsi
arch/arm/boot/dts/moxart.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/stm32429i-eval.dts
arch/arm/boot/dts/stm32f4-pinctrl.dtsi [new file with mode: 0644]
arch/arm/boot/dts/stm32f429-disco.dts
arch/arm/boot/dts/stm32f429-pinctrl.dtsi [new file with mode: 0644]
arch/arm/boot/dts/stm32f429.dtsi
arch/arm/boot/dts/stm32f469-disco.dts
arch/arm/boot/dts/stm32f469-pinctrl.dtsi [new file with mode: 0644]
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/configs/gemini_defconfig
arch/arm/configs/pxa_defconfig
arch/arm/configs/viper_defconfig
arch/arm/configs/zeus_defconfig
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/debug.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/signal.c
arch/arm/mach-at91/pm.c
arch/arm/mach-omap2/hsmmc.c
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-ux500/pm.c
arch/arm/mm/nommu.c
arch/arm/xen/p2m.c
arch/arm64/Makefile
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/boot/dts/renesas/salvator-common.dtsi
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/include/asm/linkage.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/head.S
arch/arm64/kernel/signal.c
arch/arm64/mm/fault.c
arch/c6x/include/asm/processor.h
arch/frv/include/asm/processor.h
arch/m32r/Kconfig
arch/m32r/include/asm/processor.h
arch/m32r/kernel/traps.c
arch/metag/include/asm/processor.h
arch/microblaze/Kconfig
arch/microblaze/include/uapi/asm/Kbuild
arch/microblaze/kernel/dma.c
arch/mips/ath79/pci.c
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/mipsregs.h
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/loongson32/common/platform.c
arch/mips/math-emu/cp1emu.c
arch/mips/net/ebpf_jit.c
arch/mips/pci/fixup-capcella.c
arch/mips/pci/fixup-cobalt.c
arch/mips/pci/fixup-emma2rh.c
arch/mips/pci/fixup-fuloong2e.c
arch/mips/pci/fixup-ip32.c
arch/mips/pci/fixup-jmr3927.c
arch/mips/pci/fixup-lantiq.c
arch/mips/pci/fixup-lemote2f.c
arch/mips/pci/fixup-loongson3.c
arch/mips/pci/fixup-malta.c
arch/mips/pci/fixup-mpc30x.c
arch/mips/pci/fixup-pmcmsp.c
arch/mips/pci/fixup-rbtx4927.c
arch/mips/pci/fixup-rbtx4938.c
arch/mips/pci/fixup-sni.c
arch/mips/pci/fixup-tb0219.c
arch/mips/pci/fixup-tb0226.c
arch/mips/pci/fixup-tb0287.c
arch/mips/pci/pci-alchemy.c
arch/mips/pci/pci-bcm47xx.c
arch/mips/pci/pci-lasat.c
arch/mips/pci/pci-mt7620.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pci-rt2880.c
arch/mips/pci/pci-rt3883.c
arch/mips/pci/pci-tx4938.c
arch/mips/pci/pci-tx4939.c
arch/mips/pci/pci-xlp.c
arch/mips/pci/pci-xlr.c
arch/mips/pci/pcie-octeon.c
arch/mips/pmcs-msp71xx/msp_smp.c
arch/mips/tools/generic-board-config.sh
arch/mips/txx9/generic/pci.c
arch/mn10300/kernel/process.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/boot/compressed/Makefile
arch/parisc/boot/compressed/misc.c
arch/parisc/include/asm/pdc.h
arch/parisc/include/asm/smp.h
arch/parisc/kernel/firmware.c
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/kernel/pdt.c
arch/parisc/kernel/process.c
arch/parisc/kernel/processor.c
arch/parisc/kernel/setup.c
arch/parisc/kernel/smp.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/time.c
arch/parisc/kernel/traps.c
arch/parisc/kernel/unwind.c
arch/parisc/mm/fault.c
arch/powerpc/configs/g5_defconfig
arch/powerpc/configs/gamecube_defconfig
arch/powerpc/configs/pasemi_defconfig
arch/powerpc/configs/pmac32_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/configs/wii_defconfig
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/eeh_dev.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/mce_power.c
arch/powerpc/kernel/optprobes.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/trace/ftrace_64_mprofile.S
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/watchdog.c
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/book3s_xive.h
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/sstep.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/imc-pmu.c
arch/powerpc/platforms/powernv/idle.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/mobility.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/fsl_rmu.c
arch/powerpc/sysdev/xive/common.c
arch/powerpc/sysdev/xive/spapr.c
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/pgtable.h
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/smp.c
arch/s390/kernel/topology.c
arch/s390/mm/gup.c
arch/sh/include/asm/processor_32.h
arch/sh/include/asm/processor_64.h
arch/sh/include/cpu-sh2a/cpu/sh7264.h
arch/sh/include/cpu-sh2a/cpu/sh7269.h
arch/sh/include/cpu-sh4/cpu/sh7722.h
arch/sh/include/cpu-sh4/cpu/sh7757.h
arch/sparc/Kconfig
arch/tile/configs/tilegx_defconfig
arch/tile/configs/tilepro_defconfig
arch/tile/kernel/setup.c
arch/um/include/asm/processor-generic.h
arch/um/kernel/time.c
arch/x86/crypto/blowfish-x86_64-asm_64.S
arch/x86/crypto/camellia-x86_64-asm_64.S
arch/x86/crypto/cast5-avx-x86_64-asm_64.S
arch/x86/crypto/cast6-avx-x86_64-asm_64.S
arch/x86/crypto/des3_ede-asm_64.S
arch/x86/crypto/sha1_avx2_x86_64_asm.S
arch/x86/crypto/sha1_ssse3_asm.S
arch/x86/crypto/sha256-avx-asm.S
arch/x86/crypto/sha256-avx2-asm.S
arch/x86/crypto/sha256-ssse3-asm.S
arch/x86/crypto/sha512-avx2-asm.S
arch/x86/crypto/twofish-avx-x86_64-asm_64.S
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/events/intel/bts.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/msr.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/mmu.c
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/fpu/types.h
arch/x86/include/asm/fpu/xstate.h
arch/x86/include/asm/kvm_para.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/preempt.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/rwsem.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/trace/fpu.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/regset.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head32.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/kprobes/common.h
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/ksysfs.c
arch/x86/kernel/kvm.c
arch/x86/kernel/reboot.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kernel/unwind_frame.c
arch/x86/kernel/unwind_orc.c
arch/x86/kvm/Kconfig
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/math-emu/fpu_entry.c
arch/x86/mm/Makefile
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/pkeys.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp.c
arch/x86/xen/enlighten.c
arch/x86/xen/mmu_pv.c
arch/xtensa/include/asm/processor.h
block/bio.c
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-throttle.c
block/bsg-lib.c
block/partition-generic.c
crypto/af_alg.c
crypto/asymmetric_keys/asymmetric_type.c
crypto/asymmetric_keys/pkcs7_parser.c
crypto/drbg.c
crypto/shash.c
crypto/skcipher.c
crypto/xts.c
drivers/acpi/acpi_watchdog.c
drivers/acpi/apei/ghes.c
drivers/acpi/arm64/iort.c
drivers/acpi/property.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/ata/ahci.c
drivers/ata/ata_piix.c
drivers/ata/libata-core.c
drivers/auxdisplay/charlcd.c
drivers/auxdisplay/panel.c
drivers/base/arch_topology.c
drivers/base/cpu.c
drivers/base/dma-coherent.c
drivers/base/node.c
drivers/base/platform.c
drivers/base/power/domain_governor.c
drivers/base/power/main.c
drivers/base/power/opp/core.c
drivers/base/power/qos.c
drivers/base/power/runtime.c
drivers/base/power/sysfs.c
drivers/base/property.c
drivers/block/Kconfig
drivers/block/brd.c
drivers/block/loop.h
drivers/block/nbd.c
drivers/block/skd_main.c
drivers/block/zram/zram_drv.c
drivers/bus/mvebu-mbus.c
drivers/char/tpm/tpm-interface.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm2-cmd.c
drivers/char/tpm/tpm_crb.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_infineon.c
drivers/char/tpm/tpm_tis_core.c
drivers/clk/clk-bulk.c
drivers/clk/rockchip/clk-rk3128.c
drivers/clk/samsung/clk-exynos4.c
drivers/clocksource/cs5535-clockevt.c
drivers/clocksource/numachip.c
drivers/clocksource/timer-integrator-ap.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/ti-cpufreq.c
drivers/cpuidle/cpuidle-arm.c
drivers/cpuidle/governors/menu.c
drivers/crypto/axis/artpec6_crypto.c
drivers/crypto/caam/Kconfig
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/regs.h
drivers/crypto/inside-secure/safexcel_cipher.c
drivers/crypto/inside-secure/safexcel_hash.c
drivers/crypto/stm32/stm32-hash.c
drivers/crypto/talitos.c
drivers/dma-buf/sync_file.c
drivers/dma/altera-msgdma.c
drivers/dma/edma.c
drivers/dma/ti-dma-crossbar.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/test/efi_test.c
drivers/fpga/altera-cvp.c
drivers/gpio/Kconfig
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.h
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dpio_phy.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/sun4i/Kconfig
drivers/gpu/drm/sun4i/sun4i_hdmi.h
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/tegra/trace.h
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/gpu/ipu-v3/ipu-prg.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-elecom.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/hid-rmi.c
drivers/hid/hidraw.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/hv_fcopy.c
drivers/hv/vmbus_drv.c
drivers/hwmon/da9052-hwmon.c
drivers/hwmon/tmp102.c
drivers/hwmon/xgene-hwmon.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/core.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-img-scb.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-sprd.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/ide/ide-probe.c
drivers/ide/ide-scan-pci.c
drivers/ide/setup-pci.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad7793.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/mcp320x.c
drivers/iio/adc/stm32-adc.c
drivers/iio/adc/ti-ads1015.c
drivers/iio/adc/twl4030-madc.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/dummy/iio_simple_dummy_events.c
drivers/iio/industrialio-core.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/pressure/bmp280-core.c
drivers/iio/pressure/zpa2326.c
drivers/iio/proximity/as3935.c
drivers/iio/trigger/stm32-timer-trigger.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/netlink.c
drivers/infiniband/core/security.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip.h
drivers/infiniband/hw/hfi1/eprom.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/platform.c
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_cm.h
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_p.h
drivers/infiniband/hw/i40iw/i40iw_puda.c
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/input/ff-core.c
drivers/input/input.c
drivers/input/joydev.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/misc/axp20x-pek.c
drivers/input/misc/ims-pcu.c
drivers/input/misc/uinput.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_i2c.c
drivers/input/mouse/synaptics.c
drivers/input/rmi4/rmi_f30.c
drivers/input/tablet/gtco.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/stmfts.c
drivers/input/touchscreen/ti_am335x_tsc.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dmar.c
drivers/iommu/exynos-iommu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/mtk_iommu.c
drivers/iommu/of_iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic-v4.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-tango.c
drivers/isdn/i4l/isdn_ppp.c
drivers/leds/leds-as3645a.c
drivers/md/bcache/closure.c
drivers/md/dm-core.h
drivers/md/dm-crypt.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid5.c
drivers/media/cec/cec-adap.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-frontends/dib3000mc.c
drivers/media/dvb-frontends/dvb-pll.c
drivers/media/platform/Kconfig
drivers/media/platform/qcom/camss-8x16/camss-vfe.c
drivers/media/platform/qcom/venus/helpers.c
drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
drivers/media/platform/s5p-cec/s5p_cec.c
drivers/media/platform/s5p-cec/s5p_cec.h
drivers/media/rc/ir-sharp-decoder.c
drivers/media/tuners/mt2060.c
drivers/misc/cxl/cxllib.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/core/queue.c
drivers/mmc/core/queue.h
drivers/mmc/host/Kconfig
drivers/mmc/host/cavium-thunderx.c
drivers/mmc/host/cavium.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-xenon.c
drivers/mmc/host/sdhci-xenon.h
drivers/mmc/host/tmio_mmc_core.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/atmel/pmecc.c
drivers/mtd/nand/lpc32xx_mlc.c
drivers/mtd/nand/nand_base.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/can/flexcan.c
drivers/net/can/sun4i_can.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hnae3.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
drivers/net/ethernet/ibm/emac/mal.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/qualcomm/emac/emac.h
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker_tlv.h
drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/geneve.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipvlan/ipvtap.c
drivers/net/macsec.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/xilinx_gmii2rgmii.c
drivers/net/ppp/ppp_generic.c
drivers/net/tap.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/usb/rndis_host.c
drivers/net/wimax/i2400m/fw.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/intel/iwlwifi/cfg/7000.c
drivers/net/wireless/intel/iwlwifi/cfg/8000.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/cfg/a000.c
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
drivers/net/wireless/quantenna/qtnfmac/event.c
drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/nvdimm/namespace_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/fc.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/nvmet.h
drivers/nvmem/core.c
drivers/of/base.c
drivers/of/of_mdio.c
drivers/of/of_reserved_mem.c
drivers/of/property.c
drivers/pci/endpoint/functions/pci-epf-test.c
drivers/pci/host/pci-aardvark.c
drivers/pci/host/pci-tegra.c
drivers/pci/pci-sysfs.c
drivers/perf/arm_pmu_acpi.c
drivers/phy/marvell/phy-mvebu-cp110-comphy.c
drivers/phy/mediatek/phy-mtk-tphy.c
drivers/phy/rockchip/phy-rockchip-typec.c
drivers/phy/tegra/xusb.c
drivers/pinctrl/Kconfig
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-mcp23s08.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/intel_pmc_ipc.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/rio-access.c
drivers/ras/cec.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/rn5t618-regulator.c
drivers/remoteproc/Kconfig
drivers/remoteproc/imx_rproc.c
drivers/reset/Kconfig
drivers/reset/Makefile
drivers/reset/reset-hsdk.c [moved from drivers/reset/reset-hsdk-v1.c with 72% similarity]
drivers/reset/reset-socfpga.c
drivers/rpmsg/qcom_glink_native.c
drivers/s390/block/dasd.c
drivers/s390/block/scm_blk.c
drivers/s390/cio/device.c
drivers/s390/cio/device.h
drivers/s390/cio/device_fsm.c
drivers/s390/cio/io_sch.h
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/comminit.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/src.c
drivers/scsi/arm/acornscsi.c
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/spi/spi-armada-3700.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-stm32.c
drivers/spi/spi.c
drivers/staging/iio/adc/ad7192.c
drivers/staging/iio/meter/ade7759.c
drivers/staging/media/imx/imx-media-dev.c
drivers/staging/mt29f_spinand/mt29f_spinand.c
drivers/staging/pi433/rf69.c
drivers/staging/rtl8723bs/core/rtw_mlme.c
drivers/staging/rtl8723bs/os_dep/rtw_proc.c
drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
drivers/staging/speakup/main.c
drivers/staging/unisys/visorbus/visorchipset.c
drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
drivers/tty/mxser.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/sccnxp.c
drivers/tty/tty_ldisc.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/ep0.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/configfs.h
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/gadget/function/f_mass_storage.h
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/u_fs.h
drivers/usb/gadget/function/u_rndis.h
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/legacy/mass_storage.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/usbtest.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/sunxi.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/metro-usb.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/transport.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_devs.h
drivers/uwb/hwa-rc.c
drivers/uwb/uwbd.c
drivers/xen/gntdev.c
drivers/xen/xen-balloon.c
drivers/xen/xen-pciback/conf_space_header.c
drivers/xen/xenbus/xenbus_client.c
fs/9p/vfs_addr.c
fs/binfmt_misc.c
fs/binfmt_script.c
fs/block_dev.c
fs/btrfs/compression.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/cifs/Kconfig
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2maperror.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/crypto/keyinfo.c
fs/direct-io.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/keystore.c
fs/exec.c
fs/ext4/super.c
fs/f2fs/f2fs.h
fs/f2fs/segment.c
fs/f2fs/super.c
fs/fcntl.c
fs/fscache/object-list.c
fs/fuse/dir.c
fs/fuse/inode.c
fs/gfs2/glock.c
fs/iomap.c
fs/isofs/inode.c
fs/mpage.c
fs/namespace.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/nfs4idmap.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfsd/nfs4proc.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/array.c
fs/quota/dquot.c
fs/quota/quota_v2.c
fs/read_write.c
fs/userfaultfd.c
fs/xattr.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/xfs_acl.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_error.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.h
fs/xfs/xfs_log.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_ondisk.h
fs/xfs/xfs_pnfs.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_super.c
include/acpi/acpi_bus.h
include/asm-generic/percpu.h
include/dt-bindings/reset/snps,hsdk-reset.h [new file with mode: 0644]
include/dt-bindings/reset/snps,hsdk-v1-reset.h [deleted file]
include/linux/audit.h
include/linux/binfmts.h
include/linux/bitfield.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/buffer_head.h
include/linux/cpuhotplug.h
include/linux/device.h
include/linux/filter.h
include/linux/fs.h
include/linux/hyperv.h
include/linux/if_tap.h
include/linux/iio/adc/ad_sigma_delta.h
include/linux/input.h
include/linux/iommu.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/kernel.h
include/linux/key.h
include/linux/mbus.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/port.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/host.h
include/linux/mmu_notifier.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/netdevice.h
include/linux/netfilter_bridge/ebtables.h
include/linux/nmi.h
include/linux/nvme-fc-driver.h
include/linux/nvme.h
include/linux/of.h
include/linux/of_platform.h
include/linux/pci.h
include/linux/pm_qos.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/sched.h
include/linux/sched/mm.h
include/linux/sched/topology.h
include/linux/sctp.h
include/linux/seccomp.h
include/linux/smpboot.h
include/linux/srcu.h
include/linux/swait.h
include/linux/syscalls.h
include/linux/thread_info.h
include/linux/timer.h
include/linux/trace_events.h
include/net/dst.h
include/net/fq_impl.h
include/net/inet_sock.h
include/net/netlink.h
include/net/pkt_cls.h
include/net/protocol.h
include/net/route.h
include/net/sch_generic.h
include/net/sctp/sm.h
include/net/sctp/ulpevent.h
include/net/sock.h
include/net/strparser.h
include/net/tcp.h
include/net/udp.h
include/rdma/ib_verbs.h
include/scsi/scsi_device.h
include/scsi/scsi_devinfo.h
include/scsi/scsi_transport_iscsi.h
include/sound/control.h
include/sound/hda_verbs.h
include/sound/seq_virmidi.h
include/trace/events/sched.h
include/uapi/linux/bpf.h
include/uapi/linux/dm-ioctl.h
include/uapi/linux/ethtool.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/membarrier.h
include/uapi/linux/netfilter/xt_bpf.h
include/uapi/linux/sctp.h
include/uapi/linux/seccomp.h
include/uapi/linux/spi/spidev.h
include/uapi/linux/usb/ch9.h
include/uapi/rdma/ib_user_verbs.h
include/xen/arm/page.h
init/Kconfig
ipc/shm.c
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/inode.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/cpu.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/exit.c
kernel/extable.c
kernel/fork.c
kernel/futex.c
kernel/irq/chip.c
kernel/irq/cpuhotplug.c
kernel/irq/generic-chip.c
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/kcmp.c
kernel/livepatch/core.c
kernel/locking/lockdep.c
kernel/locking/rwsem-xadd.c
kernel/memremap.c
kernel/params.c
kernel/power/suspend.c
kernel/rcu/srcutree.c
kernel/rcu/sync.c
kernel/rcu/tree.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/membarrier.c
kernel/seccomp.c
kernel/smpboot.c
kernel/sysctl.c
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_mmiotrace.c
kernel/trace/trace_output.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c
kernel/watchdog.c
kernel/watchdog_hld.c
kernel/workqueue.c
lib/Kconfig.debug
lib/assoc_array.c
lib/digsig.c
lib/idr.c
lib/iov_iter.c
lib/kobject_uevent.c
lib/locking-selftest.c
lib/lz4/lz4_decompress.c
lib/ratelimit.c
lib/rhashtable.c
lib/ts_fsm.c
lib/ts_kmp.c
mm/cma.c
mm/compaction.c
mm/filemap.c
mm/ksm.c
mm/list_lru.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_vma_mapped.c
mm/percpu-stats.c
mm/percpu.c
mm/rodata_test.c
mm/slab_common.c
mm/swap.c
mm/swap_state.c
mm/vmalloc.c
mm/z3fold.c
net/8021q/vlan_core.c
net/bluetooth/Kconfig
net/bluetooth/hci_sock.c
net/bridge/br_netlink.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/ebtables.c
net/can/af_can.c
net/can/bcm.c
net/ceph/osdmap.c
net/compat.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/ethtool.c
net/core/filter.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_reuseport.c
net/dccp/ipv4.c
net/dns_resolver/dns_key.c
net/dsa/dsa2.c
net/dsa/slave.c
net/ipv4/Kconfig
net/ipv4/cipso_ipv4.c
net/ipv4/gre_offload.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/inetpeer.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/route.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/key.c
net/ncsi/internal.h
net/ncsi/ncsi-aen.c
net/ncsi/ncsi-manage.c
net/ncsi/ncsi-rsp.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/x_tables.c
net/netfilter/xt_bpf.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/rds/ib_send.c
net/rxrpc/af_rxrpc.c
net/sched/act_sample.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_bpf.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/cls_fw.c
net/sched/cls_matchall.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/sctp_diag.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/stream.c
net/sctp/ulpevent.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_clc.c
net/smc/smc_clc.h
net/smc/smc_close.c
net/smc/smc_core.c
net/smc/smc_ib.c
net/smc/smc_pnet.c
net/smc/smc_rx.c
net/smc/smc_tx.c
net/smc/smc_wr.c
net/strparser/strparser.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtsock.c
net/tipc/bcast.c
net/tipc/msg.c
net/unix/diag.c
net/vmw_vsock/hyperv_transport.c
net/wireless/nl80211.c
net/wireless/sme.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/sockmap/sockmap_kern.c
samples/trace_events/trace-events-sample.c
scripts/Makefile.build
scripts/Makefile.modpost
scripts/checkpatch.pl
scripts/dtc/dtx_diff
scripts/faddr2line
scripts/kallsyms.c
scripts/mkversion [deleted file]
scripts/package/Makefile
scripts/package/builddeb
scripts/package/mkspec
scripts/spelling.txt
security/apparmor/Makefile
security/apparmor/apparmorfs.c
security/apparmor/domain.c
security/apparmor/include/apparmor.h
security/apparmor/include/audit.h
security/apparmor/include/domain.h
security/apparmor/include/ipc.h
security/apparmor/include/label.h
security/apparmor/include/mount.h [new file with mode: 0644]
security/apparmor/include/sig_names.h [new file with mode: 0644]
security/apparmor/ipc.c
security/apparmor/label.c
security/apparmor/lsm.c
security/apparmor/mount.c [new file with mode: 0644]
security/apparmor/policy.c
security/apparmor/policy_ns.c
security/apparmor/policy_unpack.c
security/commoncap.c
security/keys/Kconfig
security/keys/big_key.c
security/keys/encrypted-keys/encrypted.c
security/keys/gc.c
security/keys/internal.h
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/permission.c
security/keys/proc.c
security/keys/process_keys.c
security/keys/request_key.c
security/keys/request_key_auth.c
security/keys/trusted.c
security/keys/user_defined.c
security/smack/smack_lsm.c
sound/core/compress_offload.c
sound/core/pcm_compat.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_lock.c
sound/core/seq/seq_lock.h
sound/core/seq/seq_ports.c
sound/core/seq/seq_virmidi.c
sound/core/vmaster.c
sound/hda/hdac_controller.c
sound/pci/asihpi/hpioctl.c
sound/pci/echoaudio/echoaudio.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/adau17x1.c
sound/soc/codecs/adau17x1.h
sound/soc/codecs/rt5514-spi.c
sound/soc/codecs/rt5514-spi.h
sound/soc/codecs/rt5514.c
sound/soc/codecs/rt5514.h
sound/soc/codecs/rt5616.c
sound/soc/codecs/rt5659.c
sound/soc/soc-topology.c
sound/usb/caiaq/device.c
sound/usb/card.c
sound/usb/line6/driver.c
sound/usb/line6/podhd.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/quirks.c
sound/usb/usx2y/usb_stream.c
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/include/asm-generic/hugetlb_encode.h [new file with mode: 0644]
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/mman.h
tools/objtool/Documentation/stack-validation.txt
tools/objtool/arch/x86/decode.c
tools/objtool/check.c
tools/objtool/elf.c
tools/perf/Documentation/perf-record.txt
tools/perf/MANIFEST
tools/perf/arch/s390/util/Build
tools/perf/arch/s390/util/sym-handling.c [deleted file]
tools/perf/builtin-script.c
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
tools/perf/ui/hist.c
tools/perf/util/callchain.c
tools/perf/util/evsel.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.l
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/session.c
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.h
tools/perf/util/syscalltbl.c
tools/perf/util/xyarray.h
tools/power/cpupower/Makefile
tools/power/x86/turbostat/turbostat.c
tools/scripts/Makefile.include
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/bpf_util.h
tools/testing/selftests/bpf/sockmap_verdict_prog.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/breakpoints/Makefile
tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
tools/testing/selftests/futex/Makefile
tools/testing/selftests/intel_pstate/Makefile
tools/testing/selftests/intel_pstate/run.sh
tools/testing/selftests/lib.mk
tools/testing/selftests/memfd/run_tests.sh [changed mode: 0644->0755]
tools/testing/selftests/mqueue/Makefile
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/msg_zerocopy.c
tools/testing/selftests/net/netdevice.sh
tools/testing/selftests/net/reuseaddr_conflict.c [new file with mode: 0644]
tools/testing/selftests/networking/timestamping/rxtimestamp.c
tools/testing/selftests/seccomp/Makefile
tools/testing/selftests/seccomp/seccomp_benchmark.c [new file with mode: 0644]
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/sigaltstack/sas.c
tools/testing/selftests/sync/Makefile
tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
tools/testing/selftests/tc-testing/tdc.py
tools/testing/selftests/tc-testing/tdc_batch.py [new file with mode: 0755]
tools/testing/selftests/tc-testing/tdc_config.py
tools/testing/selftests/timers/set-timer-lat.c
tools/testing/selftests/vm/userfaultfd.c
tools/testing/selftests/watchdog/Makefile
tools/testing/selftests/x86/Makefile
virt/kvm/eventfd.c

index 5273cfd70ad62996ba8374698fb5e22e91d33f45..c7b10caecc4ee24a17f14be238c301391451f0b3 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -68,6 +68,8 @@ Jacob Shin <Jacob.Shin@amd.com>
 James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
+James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
+James Hogan <jhogan@kernel.org> <james@albanarts.com>
 James Ketrenos <jketreno@io.(none)>
 Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
index 33e96f74063925fb253b7ac16c73f43b76c334c8..147d4e8a140393ce06a1076183b5a4a62432c7da 100644 (file)
@@ -14,3 +14,11 @@ Description:
                Show or set the gain boost of the amp, from 0-31 range.
                18 = indoors (default)
                14 = outdoors
+
+What           /sys/bus/iio/devices/iio:deviceX/noise_level_tripped
+Date:          May 2017
+KernelVersion: 4.13
+Contact:       Matt Ranostay <matt.ranostay@konsulko.com>
+Description:
+               When 1 the noise level is over the trip level and not reporting
+               valid data
index 676fdf5f2a99af0ee623eeef4b2938c1c1c8924f..5cbb6f0386155d202bf8b1f64e0b26bb63eb84f5 100644 (file)
@@ -211,7 +211,9 @@ Description:
                device, after it has been suspended at run time, from a resume
                request to the moment the device will be ready to process I/O,
                in microseconds.  If it is equal to 0, however, this means that
-               the PM QoS resume latency may be arbitrary.
+               the PM QoS resume latency may be arbitrary and the special value
+               "n/a" means that user space cannot accept any resume latency at
+               all for the given device.
 
                Not all drivers support this attribute.  If it isn't supported,
                it is not present.
index 587db52084c7c21a6db250dffd9b794859d7466a..94672016c26810799328ee6a56bd7f66bf7ef73e 100644 (file)
@@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
                still used for tmpfs etc. other users.  If set to
                false, the global swap readahead algorithm will be
                used for all swappable pages.
-
-What:          /sys/kernel/mm/swap/vma_ra_max_order
-Date:          August 2017
-Contact:       Linux memory management mailing list <linux-mm@kvack.org>
-Description:   The max readahead size in order for VMA based swap readahead
-
-               VMA based swap readahead algorithm will readahead at
-               most 1 << max_order pages for each readahead.  The
-               real readahead size for each readahead will be scaled
-               according to the estimation algorithm.
index 713cab1d5f12dca9baaa68f9ca053a4ab90d7ab0..a1d1612f36519f832c2d307293527083cf025271 100644 (file)
@@ -127,7 +127,7 @@ Description:
 
 What;          /sys/power/pm_trace_dev_match
 Date:          October 2010
-Contact:       James Hogan <james@albanarts.com>
+Contact:       James Hogan <jhogan@kernel.org>
 Description:
                The /sys/power/pm_trace_dev_match file contains the name of the
                device associated with the last PM event point saved in the RTC
index 8282099e0cbf446bbebaaf63868ca04610999408..5da10184d9084a77c15e42b56f76a4571193344a 100644 (file)
@@ -352,44 +352,30 @@ Read-Copy Update (RCU)
 ----------------------
 
 .. kernel-doc:: include/linux/rcupdate.h
-   :external:
 
 .. kernel-doc:: include/linux/rcupdate_wait.h
-   :external:
 
 .. kernel-doc:: include/linux/rcutree.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree.c
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree_plugin.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree_exp.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/update.c
-   :external:
 
 .. kernel-doc:: include/linux/srcu.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/srcutree.c
-   :external:
 
 .. kernel-doc:: include/linux/rculist_bl.h
-   :external:
 
 .. kernel-doc:: include/linux/rculist.h
-   :external:
 
 .. kernel-doc:: include/linux/rculist_nulls.h
-   :external:
 
 .. kernel-doc:: include/linux/rcu_sync.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/sync.c
-   :external:
 
index 3943b5bfa8cffacc8b760db0f94dfce636cb957f..00a5ba51e63fb79803e01539cc9f40dcd757c9e4 100644 (file)
@@ -39,8 +39,8 @@ up.
 Although MT wq wasted a lot of resource, the level of concurrency
 provided was unsatisfactory.  The limitation was common to both ST and
 MT wq albeit less severe on MT.  Each wq maintained its own separate
-worker pool.  A MT wq could provide only one execution context per CPU
-while a ST wq one for the whole system.  Work items had to compete for
+worker pool.  An MT wq could provide only one execution context per CPU
+while an ST wq one for the whole system.  Work items had to compete for
 those very limited execution contexts leading to various problems
 including proneness to deadlocks around the single execution context.
 
@@ -151,7 +151,7 @@ Application Programming Interface (API)
 
 ``alloc_workqueue()`` allocates a wq.  The original
 ``create_*workqueue()`` functions are deprecated and scheduled for
-removal.  ``alloc_workqueue()`` takes three arguments - @``name``,
+removal.  ``alloc_workqueue()`` takes three arguments - ``@name``,
 ``@flags`` and ``@max_active``.  ``@name`` is the name of the wq and
 also used as the name of the rescuer thread if there is one.
 
@@ -197,7 +197,7 @@ resources, scheduled and executed.
   served by worker threads with elevated nice level.
 
   Note that normal and highpri worker-pools don't interact with
-  each other.  Each maintain its separate pool of workers and
+  each other.  Each maintains its separate pool of workers and
   implements concurrency management among its workers.
 
 ``WQ_CPU_INTENSIVE``
@@ -249,8 +249,8 @@ unbound worker-pools and only one work item could be active at any given
 time thus achieving the same ordering property as ST wq.
 
 In the current implementation the above configuration only guarantees
-ST behavior within a given NUMA node. Instead alloc_ordered_queue should
-be used to achieve system wide ST behavior.
+ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should
+be used to achieve system-wide ST behavior.
 
 
 Example Execution Scenarios
index 03a7cee6ac73a4f2dd48248196994538d81f6a4e..c15e75386a0523d5398290aa9a63cc1d9f40f45e 100644 (file)
@@ -32,8 +32,6 @@ cpufreq-stats.txt -   General description of sysfs cpufreq stats.
 
 index.txt      -       File index, Mailing list and Links (this document)
 
-intel-pstate.txt -     Intel pstate cpufreq driver specific file.
-
 pcc-cpufreq.txt -      PCC cpufreq driver specific file.
 
 
index 4a0a7469fdd7bbcd93e26ea5ae9c11e4285f7c7f..32df07e29f6860d4c1de9e3c3c63d2b4d0fc454f 100644 (file)
@@ -344,3 +344,4 @@ Version History
        (wrong raid10_copies/raid10_format sequence)
 1.11.1  Add raid4/5/6 journal write-back support via journal_mode option
 1.12.1  fix for MD deadlock between mddev_suspend() and md_write_start() available
+1.13.0  Fix dev_health status at end of "recover" (was 'a', now 'A')
index a135504c7d57148b938285919130d8dfc8fa18d3..cac24ee10b72ebc3765d29708fa27c5ae6848950 100644 (file)
@@ -32,7 +32,7 @@ Example:
                compatible = "st,stm32h743-rcc", "st,stm32-rcc";
                reg = <0x58024400 0x400>;
                #reset-cells = <1>;
-               #clock-cells = <2>;
+               #clock-cells = <1>;
                clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>;
 
                st,syscfg = <&pwrcfg>;
index 38d74314b7abe5c52999d77f36489a0486f222f7..b6c1afa6f02d3663fd547cb3f34b1d70d2d63049 100644 (file)
@@ -16,6 +16,10 @@ Optional properties:
        - ams,tuning-capacitor-pf: Calibration tuning capacitor stepping
          value 0 - 120pF. This will require using the calibration data from
          the manufacturer.
+       - ams,nflwdth: Set the noise and watchdog threshold register on
+         startup. This will need to set according to the noise from the
+         MCU board, and possibly the local environment. Refer to the
+         datasheet for the threshold settings.
 
 Example:
 
@@ -27,4 +31,5 @@ as3935@0 {
        interrupt-parent = <&gpio1>;
        interrupts = <16 1>;
        ams,tuning-capacitor-pf = <80>;
+       ams,nflwdth = <0x44>;
 };
index 4c29cdab0ea5b72985168100891e2422e8dbb252..5eb108e180fa282711a4d448c8353196ae6cadf1 100644 (file)
@@ -99,7 +99,7 @@ Examples:
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
-                       reg = <0x0 0x2c200000 0 0x200000>;
+                       reg = <0x0 0x2c200000 0 0x20000>;
                };
        };
 
@@ -124,14 +124,14 @@ Examples:
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
-                       reg = <0x0 0x2c200000 0 0x200000>;
+                       reg = <0x0 0x2c200000 0 0x20000>;
                };
 
                gic-its@2c400000 {
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
-                       reg = <0x0 0x2c400000 0 0x200000>;
+                       reg = <0x0 0x2c400000 0 0x20000>;
                };
 
                ppi-partitions {
index 12c5ef26ec73924566361aa2ad96710d1e12b210..fdc40e354a64dd582d86841afac520a73d6fa5f0 100644 (file)
@@ -15,11 +15,14 @@ Required properties
 
 compatible     : Must be "ams,as3645a".
 reg            : The I2C address of the device. Typically 0x30.
+#address-cells : 1
+#size-cells    : 0
 
 
-Required properties of the "flash" child node
-=============================================
+Required properties of the flash child node (0)
+===============================================
 
+reg: 0
 flash-timeout-us: Flash timeout in microseconds. The value must be in
                  the range [100000, 850000] and divisible by 50000.
 flash-max-microamp: Maximum flash current in microamperes. Has to be
@@ -33,20 +36,21 @@ ams,input-max-microamp: Maximum flash controller input current. The
                        and divisible by 50000.
 
 
-Optional properties of the "flash" child node
-=============================================
+Optional properties of the flash child node
+===========================================
 
 label          : The label of the flash LED.
 
 
-Required properties of the "indicator" child node
-=================================================
+Required properties of the indicator child node (1)
+===================================================
 
+reg: 1
 led-max-microamp: Maximum indicator current. The allowed values are
                  2500, 5000, 7500 and 10000.
 
-Optional properties of the "indicator" child node
-=================================================
+Optional properties of the indicator child node
+===============================================
 
 label          : The label of the indicator LED.
 
@@ -55,16 +59,20 @@ Example
 =======
 
        as3645a@30 {
+               #address-cells: 1
+               #size-cells: 0
                reg = <0x30>;
                compatible = "ams,as3645a";
-               flash {
+               flash@0 {
+                       reg = <0x0>;
                        flash-timeout-us = <150000>;
                        flash-max-microamp = <320000>;
                        led-max-microamp = <60000>;
                        ams,input-max-microamp = <1750000>;
                        label = "as3645a:flash";
                };
-               indicator {
+               indicator@1 {
+                       reg = <0x1>;
                        led-max-microamp = <10000>;
                        label = "as3645a:indicator";
                };
index b878a1e305af6e143a646b3473be522c82a62e8a..ed1456f5c94dda21ecaf2eebd86bcd7934d11376 100644 (file)
@@ -16,11 +16,13 @@ Required Properties:
 
 - clocks:
   Array of clocks required for SDHC.
-  Require at least input clock for Xenon IP core.
+  Require at least input clock for Xenon IP core. For Armada AP806 and
+  CP110, the AXI clock is also mandatory.
 
 - clock-names:
   Array of names corresponding to clocks property.
   The input clock for Xenon IP core should be named as "core".
+  The input clock for the AXI bus must be named as "axi".
 
 - reg:
   * For "marvell,armada-3700-sdhci", two register areas.
@@ -106,8 +108,8 @@ Example:
                compatible = "marvell,armada-ap806-sdhci";
                reg = <0xaa0000 0x1000>;
                interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
-               clocks = <&emmc_clk>;
-               clock-names = "core";
+               clocks = <&emmc_clk>,<&axi_clk>;
+               clock-names = "core", "axi";
                bus-width = <4>;
                marvell,xenon-phy-slow-mode;
                marvell,xenon-tun-count = <11>;
@@ -126,8 +128,8 @@ Example:
                interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
                vqmmc-supply = <&sd_vqmmc_regulator>;
                vmmc-supply = <&sd_vmmc_regulator>;
-               clocks = <&sdclk>;
-               clock-names = "core";
+               clocks = <&sdclk>, <&axi_clk>;
+               clock-names = "core", "axi";
                bus-width = <4>;
                marvell,xenon-tun-count = <9>;
        };
index 7e2dad08a12e92c1baa0290116cbd032f5bb1e88..1814fa13f6ab8078a52a32fa2a55156124d696bb 100644 (file)
@@ -21,8 +21,9 @@ Required properties:
        - main controller clock (for both armada-375-pp2 and armada-7k-pp2)
        - GOP clock (for both armada-375-pp2 and armada-7k-pp2)
        - MG clock (only for armada-7k-pp2)
-- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and
-  "mg_clk" (the latter only for armada-7k-pp2).
+       - AXI clock (only for armada-7k-pp2)
+- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk"
+  and "axi_clk" (the 2 latter only for armada-7k-pp2).
 
 The ethernet ports are represented by subnodes. At least one port is
 required.
@@ -78,8 +79,9 @@ Example for marvell,armada-7k-pp2:
 cpm_ethernet: ethernet@0 {
        compatible = "marvell,armada-7k-pp22";
        reg = <0x0 0x100000>, <0x129000 0xb000>;
-       clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
-       clock-names = "pp_clk", "gop_clk", "gp_clk";
+       clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
+                <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>;
+       clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk";
 
        eth0: eth0 {
                interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
index 6af8eed1adeb1429cf9bdda435ac4e7ab27c5c21..9c16ee2965a2ce756acc23d6956fedc38157144b 100644 (file)
@@ -4,6 +4,7 @@ The device node has following properties.
 
 Required properties:
  - compatible: should be "rockchip,<name>-gamc"
+   "rockchip,rk3128-gmac": found on RK312x SoCs
    "rockchip,rk3228-gmac": found on RK322x SoCs
    "rockchip,rk3288-gmac": found on RK3288 SoCs
    "rockchip,rk3328-gmac": found on RK3328 SoCs
diff --git a/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
new file mode 100644 (file)
index 0000000..830069b
--- /dev/null
@@ -0,0 +1,28 @@
+Binding for the Synopsys HSDK reset controller
+
+This binding uses the common reset binding[1].
+
+[1] Documentation/devicetree/bindings/reset/reset.txt
+
+Required properties:
+- compatible: should be "snps,hsdk-reset".
+- reg: should always contain 2 pairs address - length: first for reset
+  configuration register and second for corresponding SW reset and status bits
+  register.
+- #reset-cells: from common reset binding; Should always be set to 1.
+
+Example:
+       reset: reset@880 {
+               compatible = "snps,hsdk-reset";
+               #reset-cells = <1>;
+               reg = <0x8A0 0x4>, <0xFF0 0x4>;
+       };
+
+Specifying reset lines connected to IP modules:
+       ethernet@.... {
+               ....
+               resets = <&reset HSDK_V1_ETH_RESET>;
+               ....
+       };
+
+The index could be found in <dt-bindings/reset/snps,hsdk-reset.h>
index 3eca6de6369d773ef70bba048345aaed1d2737df..a65d7b71e81a2a22bfc23ab79ede461a25d2de37 100644 (file)
@@ -8,6 +8,12 @@ Required properties:
                    the firmware event log
 - linux,sml-size : size of the memory allocated for the firmware event log
 
+Optional properties:
+
+- powered-while-suspended: present when the TPM is left powered on between
+                           suspend and resume (makes the suspend/resume
+                           callbacks do nothing).
+
 Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C)
 ----------------------------------------------------------
 
index 4fc96946f81d6afc781f7007eaa9dd945f52ed52..cf504d0380aeb9d0749cc2ab1f90e2f35a781146 100644 (file)
@@ -41,6 +41,8 @@ Required properties:
     - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
     - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
     - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
+    - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
+    - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
     - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
     - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART.
     - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
index 1ea1fd4232ab2ac9c0e736169ef98a02e05ccf8b..1afd298eddd73147ebf6a1dcbc56bf404559cff1 100644 (file)
@@ -3,8 +3,8 @@ Device tree binding vendor prefix registry.  Keep list in alphabetical order.
 This isn't an exhaustive list, but you should add new prefixes to it before
 using them to avoid name-space collisions.
 
-abcn   Abracon Corporation
 abilis Abilis Systems
+abracon        Abracon Corporation
 actions        Actions Semiconductor Co., Ltd.
 active-semi    Active-Semi International Inc
 ad     Avionic Design GmbH
index bedd32388dac51d8a603dd1389c3b4de24807d7a..a0dc2879a152c89dbb9d5f715ec07fbfe6a4cd8e 100644 (file)
@@ -675,7 +675,7 @@ sub-domain of the parent domain.
 
 Support for power domains is provided through the :c:member:`pm_domain` field of
 |struct device|.  This field is a pointer to an object of type
-|struct dev_pm_domain|, defined in :file:`include/linux/pm.h``, providing a set
+|struct dev_pm_domain|, defined in :file:`include/linux/pm.h`, providing a set
 of power management callbacks analogous to the subsystem-level and device driver
 callbacks that are executed for the given device during all power transitions,
 instead of the respective subsystem-level callbacks.  Specifically, if a
index 4421135826a2e5fb8526288afa06473cb2137027..d661e6f7e6a0cb6b161e30ab1ad800bc8bd581f6 100644 (file)
@@ -196,12 +196,13 @@ struct driver_attribute {
 };
 
 Device drivers can export attributes via their sysfs directories. 
-Drivers can declare attributes using a DRIVER_ATTR macro that works
-identically to the DEVICE_ATTR macro. 
+Drivers can declare attributes using a DRIVER_ATTR_RW and DRIVER_ATTR_RO
+macro that works identically to the DEVICE_ATTR_RW and DEVICE_ATTR_RO
+macros.
 
 Example:
 
-DRIVER_ATTR(debug,0644,show_debug,store_debug);
+DRIVER_ATTR_RW(debug);
 
 This is equivalent to declaring:
 
index c98800df677fef0e18103415af6a0333e8c6a10a..9f4f87e1624036349533adf9534bfd3c4b08535d 100644 (file)
@@ -41,6 +41,11 @@ Igor Mammedov (DFS support)
 Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
 Scott Lovenberg
 Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features)
+Aurelien Aptel (for DFS SMB3 work and some key bug fixes)
+Ronnie Sahlberg (for SMB3 xattr work and bug fixes)
+Shirish Pargaonkar (for many ACL patches over the years)
+Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
+
 
 Test case and Bug Report contributors
 -------------------------------------
index a54788405429dce03af2dd950f6dd817df0d9a56..a9da51553ba3e15049c17aecbad840eb607ef056 100644 (file)
@@ -1,10 +1,14 @@
-The CIFS VFS support for Linux supports many advanced network filesystem 
-features such as hierarchical dfs like namespace, hardlinks, locking and more.  
+This module supports the SMB3 family of advanced network protocols (as well
+as older dialects, originally called "CIFS" or SMB1).
+
+The CIFS VFS module for Linux supports many advanced network filesystem
+features such as hierarchical DFS like namespace, hardlinks, locking and more.
 It was designed to comply with the SNIA CIFS Technical Reference (which 
 supersedes the 1992 X/Open SMB Standard) as well as to perform best practice 
 practical interoperability with Windows 2000, Windows XP, Samba and equivalent 
 servers.  This code was developed in participation with the Protocol Freedom
-Information Foundation.
+Information Foundation.  CIFS and now SMB3 has now become a defacto
+standard for interoperating between Macs and Windows and major NAS appliances.
 
 Please see
   http://protocolfreedom.org/ and
@@ -15,30 +19,11 @@ for more details.
 For questions or bug reports please contact:
     sfrench@samba.org (sfrench@us.ibm.com) 
 
+See the project page at: https://wiki.samba.org/index.php/LinuxCIFS_utils
+
 Build instructions:
 ==================
-For Linux 2.4:
-1) Get the kernel source (e.g.from http://www.kernel.org)
-and download the cifs vfs source (see the project page
-at http://us1.samba.org/samba/Linux_CIFS_client.html)
-and change directory into the top of the kernel directory
-then patch the kernel (e.g. "patch -p1 < cifs_24.patch") 
-to add the cifs vfs to your kernel configure options if
-it has not already been added (e.g. current SuSE and UL
-users do not need to apply the cifs_24.patch since the cifs vfs is
-already in the kernel configure menu) and then
-mkdir linux/fs/cifs and then copy the current cifs vfs files from
-the cifs download to your kernel build directory e.g.
-
-       cp <cifs_download_dir>/fs/cifs/* to <kernel_download_dir>/fs/cifs
-       
-2) make menuconfig (or make xconfig)
-3) select cifs from within the network filesystem choices
-4) save and exit
-5) make dep
-6) make modules (or "make" if CIFS VFS not to be built as a module)
-
-For Linux 2.6:
+For Linux:
 1) Download the kernel (e.g. from http://www.kernel.org)
 and change directory into the top of the kernel directory tree
 (e.g. /usr/src/linux-2.5.73)
@@ -61,16 +46,13 @@ would simply type "make install").
 If you do not have the utility mount.cifs (in the Samba 3.0 source tree and on 
 the CIFS VFS web site) copy it to the same directory in which mount.smbfs and 
 similar files reside (usually /sbin).  Although the helper software is not  
-required, mount.cifs is recommended.  Eventually the Samba 3.0 utility program 
-"net" may also be helpful since it may someday provide easier mount syntax for
-users who are used to Windows e.g.
-       net use <mount point> <UNC name or cifs URL>
+required, mount.cifs is recommended.  Most distros include a "cifs-utils"
+package that includes this utility so it is recommended to install this.
+
 Note that running the Winbind pam/nss module (logon service) on all of your
 Linux clients is useful in mapping Uids and Gids consistently across the
 domain to the proper network user.  The mount.cifs mount helper can be
-trivially built from Samba 3.0 or later source e.g. by executing:
-
-       gcc samba/source/client/mount.cifs.c -o mount.cifs
+found at cifs-utils.git on git.samba.org
 
 If cifs is built as a module, then the size and number of network buffers
 and maximum number of simultaneous requests to one server can be configured.
@@ -79,6 +61,18 @@ Changing these from their defaults is not recommended. By executing modinfo
 on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made
 at module initialization time (by running insmod cifs.ko) can be seen.
 
+Recommendations
+===============
+To improve security the SMB2.1 dialect or later (usually will get SMB3) is now
+the new default. To use old dialects (e.g. to mount Windows XP) use "vers=1.0"
+on mount (or vers=2.0 for Windows Vista).  Note that the CIFS (vers=1.0) is
+much older and less secure than the default dialect SMB3 which includes
+many advanced security features such as downgrade attack detection
+and encrypted shares and stronger signing and authentication algorithms.
+There are additional mount options that may be helpful for SMB3 to get
+improved POSIX behavior (NB: can use vers=3.0 to force only SMB3, never 2.1):
+     "mfsymlinks" and "cifsacl" and "idsfromsid"
+
 Allowing User Mounts
 ====================
 To permit users to mount and unmount over directories they own is possible
@@ -98,9 +92,7 @@ and execution of suid programs on the remote target would be enabled
 by default. This can be changed, as with nfs and other filesystems, 
 by simply specifying "nosuid" among the mount options. For user mounts 
 though to be able to pass the suid flag to mount requires rebuilding 
-mount.cifs with the following flag: 
-        gcc samba/source/client/mount.cifs.c -DCIFS_ALLOW_USR_SUID -o mount.cifs
+mount.cifs with the following flag: CIFS_ALLOW_USR_SUID
 
 There is a corresponding manual page for cifs mounting in the Samba 3.0 and
 later source tree in docs/manpages/mount.cifs.8 
@@ -189,18 +181,18 @@ applications running on the same server as Samba.
 Use instructions:
 ================
 Once the CIFS VFS support is built into the kernel or installed as a module 
-(cifs.o), you can use mount syntax like the following to access Samba or Windows 
-servers: 
+(cifs.ko), you can use mount syntax like the following to access Samba or
+Mac or Windows servers:
 
-  mount -t cifs //9.53.216.11/e$ /mnt -o user=myname,pass=mypassword
+  mount -t cifs //9.53.216.11/e$ /mnt -o username=myname,password=mypassword
 
 Before -o the option -v may be specified to make the mount.cifs
 mount helper display the mount steps more verbosely.  
 After -o the following commonly used cifs vfs specific options
 are supported:
 
-  user=<username>
-  pass=<password>
+  username=<username>
+  password=<password>
   domain=<domain name>
   
 Other cifs mount options are described below.  Use of TCP names (in addition to
@@ -246,13 +238,16 @@ the Server's registry.  Samba starting with version 3.10 will allow such
 filenames (ie those which contain valid Linux characters, which normally
 would be forbidden for Windows/CIFS semantics) as long as the server is
 configured for Unix Extensions (and the client has not disabled
-/proc/fs/cifs/LinuxExtensionsEnabled).
-  
+/proc/fs/cifs/LinuxExtensionsEnabled). In addition the mount option
+"mapposix" can be used on CIFS (vers=1.0) to force the mapping of
+illegal Windows/NTFS/SMB characters to a remap range (this mount parm
+is the default for SMB3). This remap ("mapposix") range is also
+compatible with Mac (and "Services for Mac" on some older Windows).
 
 CIFS VFS Mount Options
 ======================
 A partial list of the supported mount options follows:
-  user         The user name to use when trying to establish
+  username     The user name to use when trying to establish
                the CIFS session.
   password     The user password.  If the mount helper is
                installed, the user will be prompted for password
index 066ffddc3964917d75287198ffd178daacc1cac6..396ecfd6ff4a0da40f099dfef57ef6111ae8da71 100644 (file)
@@ -1,4 +1,4 @@
-Version 2.03 August 1, 2014
+Version 2.04 September 13, 2017
 
 A Partial List of Missing Features
 ==================================
@@ -8,73 +8,69 @@ for visible, important contributions to this module.  Here
 is a partial list of the known problems and missing features:
 
 a) SMB3 (and SMB3.02) missing optional features:
-   - RDMA
+   - RDMA (started)
    - multichannel (started)
    - directory leases (improved metadata caching)
    - T10 copy offload (copy chunk is only mechanism supported)
-   - encrypted shares
 
 b) improved sparse file support
 
 c) Directory entry caching relies on a 1 second timer, rather than
-using FindNotify or equivalent.  - (started)
+using Directory Leases
 
 d) quota support (needs minor kernel change since quota calls
 to make it to network filesystems or deviceless filesystems)
 
-e) improve support for very old servers (OS/2 and Win9x for example)
-Including support for changing the time remotely (utimes command).
+e) Better optimize open to reduce redundant opens (using reference
+counts more) and to improve use of compounding in SMB3 to reduce
+number of roundtrips.
 
-f) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
-extra copy in/out of the socket buffers in some cases.
-
-g) Better optimize open (and pathbased setfilesize) to reduce the
-oplock breaks coming from windows srv.  Piggyback identical file
-opens on top of each other by incrementing reference count rather
-than resending (helps reduce server resource utilization and avoid
-spurious oplock breaks).
-
-h) Add support for storing symlink info to Windows servers
-in the Extended Attribute format their SFU clients would recognize.
-
-i) Finish inotify support so kde and gnome file list windows
+f) Finish inotify support so kde and gnome file list windows
 will autorefresh (partially complete by Asser). Needs minor kernel
 vfs change to support removing D_NOTIFY on a file.   
 
-j) Add GUI tool to configure /proc/fs/cifs settings and for display of
+g) Add GUI tool to configure /proc/fs/cifs settings and for display of
 the CIFS statistics (started)
 
-k) implement support for security and trusted categories of xattrs
+h) implement support for security and trusted categories of xattrs
 (requires minor protocol extension) to enable better support for SELINUX
 
-l) Implement O_DIRECT flag on open (already supported on mount)
+i) Implement O_DIRECT flag on open (already supported on mount)
 
-m) Create UID mapping facility so server UIDs can be mapped on a per
+j) Create UID mapping facility so server UIDs can be mapped on a per
 mount or a per server basis to client UIDs or nobody if no mapping
-exists.  This is helpful when Unix extensions are negotiated to
-allow better permission checking when UIDs differ on the server
-and client.  Add new protocol request to the CIFS protocol 
-standard for asking the server for the corresponding name of a
-particular uid.
+exists. Also better integration with winbind for resolving SID owners
+
+k) Add tools to take advantage of more smb3 specific ioctls and features
+
+l) encrypted file support
+
+m) improved stats gathering, tools (perhaps integration with nfsometer?)
 
-n) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for this too)
+n) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed
+file attribute via chflags) and improve user space tools for managing and
+viewing them.
 
-o) mount check for unmatched uids
+o) mount helper GUI (to simplify the various configuration options on mount)
 
-p) Add support for new vfs entry point for fallocate
+p) autonegotiation of dialects (offering more than one dialect ie SMB3.02,
+SMB3, SMB2.1 not just SMB3).
 
-q) Add tools to take advantage of cifs/smb3 specific ioctls and features
-such as "CopyChunk" (fast server side file copy)
+q) Allow mount.cifs to be more verbose in reporting errors with dialect
+or unsupported feature errors.
 
-r) encrypted file support
+r) updating cifs documentation, and user guid.
 
-s) improved stats gathering, tools (perhaps integration with nfsometer?)
+s) Addressing bugs found by running a broader set of xfstests in standard
+file system xfstest suite.
 
-t) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed
-file attribute via chflags)
+t) split cifs and smb3 support into separate modules so legacy (and less
+secure) CIFS dialect can be disabled in environments that don't need it
+and simplify the code.
 
-u) mount helper GUI (to simplify the various configuration options on mount)
+u) Finish up SMB3.1.1 dialect support
 
+v) POSIX Extensions for SMB3.1.1
 
 KNOWN BUGS
 ====================================
index 2fac91ac96cf7484aae9e148e5841755feba5f70..67756607246e767a9105bc06c00a7b97730abbb1 100644 (file)
@@ -1,24 +1,28 @@
-  This is the client VFS module for the Common Internet File System
-  (CIFS) protocol which is the successor to the Server Message Block 
+  This is the client VFS module for the SMB3 NAS protocol as well
+  older dialects such as the Common Internet File System (CIFS)
+  protocol which was the successor to the Server Message Block
   (SMB) protocol, the native file sharing mechanism for most early
   PC operating systems. New and improved versions of CIFS are now
   called SMB2 and SMB3. These dialects are also supported by the
   CIFS VFS module. CIFS is fully supported by network
-  file servers such as Windows 2000, 2003, 2008 and 2012
+  file servers such as Windows 2000, 2003, 2008, 2012 and 2016
   as well by Samba (which provides excellent CIFS
-  server support for Linux and many other operating systems), so
+  server support for Linux and many other operating systems), Apple
+  systems, as well as most Network Attached Storage vendors, so
   this network filesystem client can mount to a wide variety of
   servers.
 
   The intent of this module is to provide the most advanced network
-  file system function for CIFS compliant servers, including better
-  POSIX compliance, secure per-user session establishment, high
-  performance safe distributed caching (oplock), optional packet
+  file system function for SMB3 compliant servers, including advanced
+  security features, excellent parallelized high performance i/o, better
+  POSIX compliance, secure per-user session establishment, encryption,
+  high performance safe distributed caching (leases/oplocks), optional packet
   signing, large files, Unicode support and other internationalization
   improvements. Since both Samba server and this filesystem client support
-  the CIFS Unix extensions, the combination can provide a reasonable 
-  alternative to NFSv4 for fileserving in some Linux to Linux environments,
-  not just in Linux to Windows environments.
+  the CIFS Unix extensions (and in the future SMB3 POSIX extensions),
+  the combination can provide a reasonable alternative to other network and
+  cluster file systems for fileserving in some Linux to Linux environments,
+  not just in Linux to Windows (or Linux to Mac) environments.
 
   This filesystem has an mount utility (mount.cifs) that can be obtained from
 
index 36f528a7fdd64d18c7ee7b3a50849b45bc046ef3..8caa60734647f70a777b8a568ba9f2dd99182fb8 100644 (file)
@@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is
 beneath or above the path of another overlay lower layer path.
 
 Using an upper layer path and/or a workdir path that are already used by
-another overlay mount is not allowed and will fail with EBUSY.  Using
+another overlay mount is not allowed and may fail with EBUSY.  Using
 partially overlapping paths is not allowed but will not fail with EBUSY.
+If files are accessed from two overlayfs mounts which share or overlap the
+upper layer and/or workdir path the behavior of the overlay is undefined,
+though it will not result in a crash or deadlock.
 
 Mounting an overlay using an upper layer path, where the upper layer path
 was previously used by another mounted overlay in combination with a
index 24da7b32c489fd65408dbd8b33f7bc0402ddba0b..9a3658cc399ed459f1e21d613dbf8e5b1b2bc647 100644 (file)
@@ -366,7 +366,8 @@ struct driver_attribute {
 
 Declaring:
 
-DRIVER_ATTR(_name, _mode, _show, _store)
+DRIVER_ATTR_RO(_name)
+DRIVER_ATTR_RW(_name)
 
 Creation/Removal:
 
index 0500193434cb2957eec34124d621ccc176eac555..d477024569269ff741720537ef609cafb60166f2 100644 (file)
@@ -36,6 +36,7 @@ Supported adapters:
   * Intel Gemini Lake (SOC)
   * Intel Cannon Lake-H (PCH)
   * Intel Cannon Lake-LP (PCH)
+  * Intel Cedar Fork (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index 329e740adea70db91ac2c9cd9f671f79ca58790e..f6f80380dff2fa0851c5ae02de27497e9bc9c0df 100644 (file)
@@ -1108,14 +1108,6 @@ When kbuild executes, the following steps are followed (roughly):
     ld
        Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
 
-    objcopy
-       Copy binary. Uses OBJCOPYFLAGS usually specified in
-       arch/$(ARCH)/Makefile.
-       OBJCOPYFLAGS_$@ may be used to set additional options.
-
-    gzip
-       Compress target. Use maximum compression to compress target.
-
        Example:
                #arch/x86/boot/Makefile
                LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary
@@ -1139,6 +1131,19 @@ When kbuild executes, the following steps are followed (roughly):
              resulting in the target file being recompiled for no
              obvious reason.
 
+    objcopy
+       Copy binary. Uses OBJCOPYFLAGS usually specified in
+       arch/$(ARCH)/Makefile.
+       OBJCOPYFLAGS_$@ may be used to set additional options.
+
+    gzip
+       Compress target. Use maximum compression to compress target.
+
+       Example:
+               #arch/x86/boot/compressed/Makefile
+               $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
+                       $(call if_changed,gzip)
+
     dtc
        Create flattened device tree blob object suitable for linking
        into vmlinux. Device tree blobs linked into vmlinux are placed
@@ -1219,7 +1224,7 @@ When kbuild executes, the following steps are followed (roughly):
        that may be shared between individual architectures.
        The recommended approach how to use a generic header file is
        to list the file in the Kbuild file.
-       See "7.3 generic-y" for further info on syntax etc.
+       See "7.2 generic-y" for further info on syntax etc.
 
 --- 6.11 Post-link pass
 
@@ -1254,13 +1259,13 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and
 arch/<arch>/include/asm/ to list asm files coming from asm-generic.
 See subsequent chapter for the syntax of the Kbuild file.
 
-       --- 7.1 no-export-headers
+--- 7.1 no-export-headers
 
        no-export-headers is essentially used by include/uapi/linux/Kbuild to
        avoid exporting specific headers (e.g. kvm.h) on architectures that do
        not support it. It should be avoided as much as possible.
 
-       --- 7.2 generic-y
+--- 7.2 generic-y
 
        If an architecture uses a verbatim copy of a header from
        include/asm-generic then this is listed in the file
@@ -1287,7 +1292,7 @@ See subsequent chapter for the syntax of the Kbuild file.
                Example: termios.h
                        #include <asm-generic/termios.h>
 
-       --- 7.3 generated-y
+--- 7.3 generated-y
 
        If an architecture generates other header files alongside generic-y
        wrappers, generated-y specifies them.
@@ -1299,7 +1304,7 @@ See subsequent chapter for the syntax of the Kbuild file.
                        #arch/x86/include/asm/Kbuild
                        generated-y += syscalls_32.h
 
-       --- 7.5 mandatory-y
+--- 7.4 mandatory-y
 
        mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
        to define the minimum set of headers that must be exported in
index 57f52cdce32e42c9d170e8e475a02e8a08e1f454..9ba04c0bab8db6e1a74947770a028ebca43e1651 100644 (file)
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
        and packet type ID), so in a "gatewayed" configuration, all
        outgoing traffic will generally use the same device.  Incoming
        traffic may also end up on a single device, but that is
-       dependent upon the balancing policy of the peer's 8023.ad
+       dependent upon the balancing policy of the peer's 802.3ad
        implementation.  In a "local" configuration, traffic will be
        distributed across the devices in the bond.
 
index 789b74dbe1d96af46fae8a3504a242eaa0b11c3c..87814859cfc21c5c6a64c1dfb83a1d6cff596731 100644 (file)
@@ -337,7 +337,7 @@ Examples for low-level BPF:
   jeq #14, good           /* __NR_rt_sigprocmask */
   jeq #13, good           /* __NR_rt_sigaction */
   jeq #35, good           /* __NR_nanosleep */
-  bad: ret #0             /* SECCOMP_RET_KILL */
+  bad: ret #0             /* SECCOMP_RET_KILL_THREAD */
   good: ret #0x7fff0000   /* SECCOMP_RET_ALLOW */
 
 The above example code can be placed into a file (here called "foo"), and
index b3345d0fe0a67e477a6754848e7fc7be144322d5..77f4de59dc9ceb3cdb36692d1ea41e1d861468b0 100644 (file)
@@ -1680,6 +1680,9 @@ accept_dad - INTEGER
        2: Enable DAD, and disable IPv6 operation if MAC-based duplicate
           link-local address has been found.
 
+       DAD operation and mode on a given interface will be selected according
+       to the maximum value of conf/{all,interface}/accept_dad.
+
 force_tllao - BOOLEAN
        Enable sending the target link-layer address option even when
        responding to a unicast neighbor solicitation.
@@ -1727,16 +1730,23 @@ suppress_frag_ndisc - INTEGER
 
 optimistic_dad - BOOLEAN
        Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
-               0: disabled (default)
-               1: enabled
+       0: disabled (default)
+       1: enabled
+
+       Optimistic Duplicate Address Detection for the interface will be enabled
+       if at least one of conf/{all,interface}/optimistic_dad is set to 1,
+       it will be disabled otherwise.
 
 use_optimistic - BOOLEAN
        If enabled, do not classify optimistic addresses as deprecated during
        source address selection.  Preferred addresses will still be chosen
        before optimistic addresses, subject to other ranking in the source
        address selection algorithm.
-               0: disabled (default)
-               1: enabled
+       0: disabled (default)
+       1: enabled
+
+       This will be enabled if at least one of
+       conf/{all,interface}/use_optimistic is set to 1, disabled otherwise.
 
 stable_secret - IPv6 address
        This IPv6 address will be used as a secret to generate IPv6
index 5e40e1f68873b0f2b594c22a6e69ddb218ce56ec..82236a17b5e65198be004d2cdd6a7c5bd8a9b7d4 100644 (file)
@@ -13,42 +13,42 @@ an example setup using a data-center-class switch ASIC chip.  Other setups
 with SR-IOV or soft switches, such as OVS, are possible.
 
 
-                             User-space tools
-
-       user space                   |
-      +-------------------------------------------------------------------+
-       kernel                       | Netlink
-                                    |
-                     +--------------+-------------------------------+
-                     |         Network stack                        |
-                     |           (Linux)                            |
-                     |                                              |
-                     +----------------------------------------------+
+                             User-space tools
+
+       user space                   |
+      +-------------------------------------------------------------------+
+       kernel                       | Netlink
+                                    |
+                     +--------------+-------------------------------+
+                     |         Network stack                        |
+                     |           (Linux)                            |
+                     |                                              |
+                     +----------------------------------------------+
 
                            sw1p2     sw1p4     sw1p6
-                      sw1p1  +  sw1p3  +  sw1p5  +          eth1
-                        +    |    +    |    +    |            +
-                        |    |    |    |    |    |            |
-                     +--+----+----+----+-+--+----+---+  +-----+-----+
-                     |         Switch driver         |  |    mgmt   |
-                     |        (this document)        |  |   driver  |
-                     |                               |  |           |
-                     +--------------+----------------+  +-----------+
-                                    |
-       kernel                       | HW bus (eg PCI)
-      +-------------------------------------------------------------------+
-       hardware                     |
-                     +--------------+---+------------+
-                     |         Switch device (sw1)   |
-                     |  +----+                       +--------+
-                     |  |    v offloaded data path   | mgmt port
-                     |  |    |                       |
-                     +--|----|----+----+----+----+---+
-                        |    |    |    |    |    |
-                        +    +    +    +    +    +
-                       p1   p2   p3   p4   p5   p6
-
-                             front-panel ports
+                      sw1p1  +  sw1p3  +  sw1p5  +          eth1
+                        +    |    +    |    +    |            +
+                        |    |    |    |    |    |            |
+                     +--+----+----+----+----+----+---+  +-----+-----+
+                     |         Switch driver         |  |    mgmt   |
+                     |        (this document)        |  |   driver  |
+                     |                               |  |           |
+                     +--------------+----------------+  +-----------+
+                                    |
+       kernel                       | HW bus (eg PCI)
+      +-------------------------------------------------------------------+
+       hardware                     |
+                     +--------------+----------------+
+                     |         Switch device (sw1)   |
+                     |  +----+                       +--------+
+                     |  |    v offloaded data path   | mgmt port
+                     |  |    |                       |
+                     +--|----|----+----+----+----+---+
+                        |    |    |    |    |    |
+                        +    +    +    +    +    +
+                       p1   p2   p3   p4   p5   p6
+
+                             front-panel ports
 
 
                                     Fig 1.
index 82fc399fcd33d1628289ec5ccfeb3193a368c2fc..61e43cc3ed171e2371b6372609533dc16fc9b057 100644 (file)
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
    submitting-patches
    coding-style
    email-clients
+   kernel-enforcement-statement
 
 Other guides to the community that are of interest to most developers are: 
 
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
new file mode 100644 (file)
index 0000000..1e23d42
--- /dev/null
@@ -0,0 +1,147 @@
+Linux Kernel Enforcement Statement
+----------------------------------
+
+As developers of the Linux kernel, we have a keen interest in how our software
+is used and how the license for our software is enforced.  Compliance with the
+reciprocal sharing obligations of GPL-2.0 is critical to the long-term
+sustainability of our software and community.
+
+Although there is a right to enforce the separate copyright interests in the
+contributions made to our community, we share an interest in ensuring that
+individual enforcement actions are conducted in a manner that benefits our
+community and do not have an unintended negative impact on the health and
+growth of our software ecosystem.  In order to deter unhelpful enforcement
+actions, we agree that it is in the best interests of our development
+community to undertake the following commitment to users of the Linux kernel
+on behalf of ourselves and any successors to our copyright interests:
+
+    Notwithstanding the termination provisions of the GPL-2.0, we agree that
+    it is in the best interests of our development community to adopt the
+    following provisions of GPL-3.0 as additional permissions under our
+    license with respect to any non-defensive assertion of rights under the
+    license.
+
+       However, if you cease all violation of this License, then your license
+       from a particular copyright holder is reinstated (a) provisionally,
+       unless and until the copyright holder explicitly and finally
+       terminates your license, and (b) permanently, if the copyright holder
+       fails to notify you of the violation by some reasonable means prior to
+       60 days after the cessation.
+
+       Moreover, your license from a particular copyright holder is
+       reinstated permanently if the copyright holder notifies you of the
+       violation by some reasonable means, this is the first time you have
+       received notice of violation of this License (for any work) from that
+       copyright holder, and you cure the violation prior to 30 days after
+       your receipt of the notice.
+
+Our intent in providing these assurances is to encourage more use of the
+software.  We want companies and individuals to use, modify and distribute
+this software.  We want to work with users in an open and transparent way to
+eliminate any uncertainty about our expectations regarding compliance or
+enforcement that might limit adoption of our software.  We view legal action
+as a last resort, to be initiated only when other community efforts have
+failed to resolve the problem.
+
+Finally, once a non-compliance issue is resolved, we hope the user will feel
+welcome to join us in our efforts on this project.  Working together, we will
+be stronger.
+
+Except where noted below, we speak only for ourselves, and not for any company
+we might work for today, have in the past, or will in the future.
+
+  - Bjorn Andersson (Linaro)
+  - Andrea Arcangeli (Red Hat)
+  - Neil Armstrong
+  - Jens Axboe
+  - Pablo Neira Ayuso
+  - Khalid Aziz
+  - Ralf Baechle
+  - Felipe Balbi
+  - Arnd Bergmann
+  - Ard Biesheuvel
+  - Paolo Bonzini (Red Hat)
+  - Christian Borntraeger
+  - Mark Brown (Linaro)
+  - Paul Burton
+  - Javier Martinez Canillas
+  - Rob Clark
+  - Jonathan Corbet
+  - Vivien Didelot (Savoir-faire Linux)
+  - Hans de Goede (Red Hat)
+  - Mel Gorman (SUSE)
+  - Sven Eckelmann
+  - Alex Elder (Linaro)
+  - Fabio Estevam
+  - Larry Finger
+  - Bhumika Goyal
+  - Andy Gross
+  - Juergen Gross
+  - Shawn Guo
+  - Ulf Hansson
+  - Tejun Heo
+  - Rob Herring
+  - Masami Hiramatsu
+  - Michal Hocko
+  - Simon Horman
+  - Johan Hovold (Hovold Consulting AB)
+  - Christophe JAILLET
+  - Olof Johansson
+  - Lee Jones (Linaro)
+  - Heiner Kallweit
+  - Srinivas Kandagatla
+  - Jan Kara
+  - Shuah Khan (Samsung)
+  - David Kershner
+  - Jaegeuk Kim
+  - Namhyung Kim
+  - Colin Ian King
+  - Jeff Kirsher
+  - Greg Kroah-Hartman (Linux Foundation)
+  - Christian König
+  - Vinod Koul
+  - Krzysztof Kozlowski
+  - Viresh Kumar
+  - Aneesh Kumar K.V
+  - Julia Lawall
+  - Doug Ledford (Red Hat)
+  - Chuck Lever (Oracle)
+  - Daniel Lezcano
+  - Shaohua Li
+  - Xin Long (Red Hat)
+  - Tony Luck
+  - Mike Marshall
+  - Chris Mason
+  - Paul E. McKenney
+  - David S. Miller
+  - Ingo Molnar
+  - Kuninori Morimoto
+  - Borislav Petkov
+  - Jiri Pirko
+  - Josh Poimboeuf
+  - Sebastian Reichel (Collabora)
+  - Guenter Roeck
+  - Joerg Roedel
+  - Leon Romanovsky
+  - Steven Rostedt (VMware)
+  - Ivan Safonov
+  - Ivan Safonov
+  - Anna Schumaker
+  - Jes Sorensen
+  - K.Y. Srinivasan
+  - Heiko Stuebner
+  - Jiri Kosina (SUSE)
+  - Dmitry Torokhov
+  - Linus Torvalds
+  - Thierry Reding
+  - Rik van Riel
+  - Geert Uytterhoeven (Glider bvba)
+  - Daniel Vetter
+  - Linus Walleij
+  - Richard Weinberger
+  - Dan Williams
+  - Rafael J. Wysocki
+  - Arvind Yadav
+  - Masahiro Yamada
+  - Wei Yongjun
+  - Lv Zheng
index ce61d1fe08cacb5af99260241b058d139838029a..694968c7523cc28620c8ac51a28a33dc1b14336e 100644 (file)
@@ -75,6 +75,7 @@ show up in /proc/sys/kernel:
 - reboot-cmd                  [ SPARC only ]
 - rtsig-max
 - rtsig-nr
+- seccomp/                    ==> Documentation/userspace-api/seccomp_filter.rst
 - sem
 - sem_next_id                [ sysv ipc ]
 - sg-big-buff                 [ generic SCSI device (sg) ]
index f71eb5ef1f2df4154a0a31dd0307da00c5fde42f..099c412951d6b9dea515b08384e023e2c2d69025 100644 (file)
@@ -87,11 +87,16 @@ Return values
 A seccomp filter may return any of the following values. If multiple
 filters exist, the return value for the evaluation of a given system
 call will always use the highest precedent value. (For example,
-``SECCOMP_RET_KILL`` will always take precedence.)
+``SECCOMP_RET_KILL_PROCESS`` will always take precedence.)
 
 In precedence order, they are:
 
-``SECCOMP_RET_KILL``:
+``SECCOMP_RET_KILL_PROCESS``:
+       Results in the entire process exiting immediately without executing
+       the system call.  The exit status of the task (``status & 0x7f``)
+       will be ``SIGSYS``, not ``SIGKILL``.
+
+``SECCOMP_RET_KILL_THREAD``:
        Results in the task exiting immediately without executing the
        system call.  The exit status of the task (``status & 0x7f``) will
        be ``SIGSYS``, not ``SIGKILL``.
@@ -141,6 +146,15 @@ In precedence order, they are:
        allow use of ptrace, even of other sandboxed processes, without
        extreme care; ptracers can use this mechanism to escape.)
 
+``SECCOMP_RET_LOG``:
+       Results in the system call being executed after it is logged. This
+       should be used by application developers to learn which syscalls their
+       application needs without having to iterate through multiple test and
+       development cycles to build the list.
+
+       This action will only be logged if "log" is present in the
+       actions_logged sysctl string.
+
 ``SECCOMP_RET_ALLOW``:
        Results in the system call being executed.
 
@@ -169,7 +183,41 @@ The ``samples/seccomp/`` directory contains both an x86-specific example
 and a more generic example of a higher level macro interface for BPF
 program generation.
 
+Sysctls
+=======
 
+Seccomp's sysctl files can be found in the ``/proc/sys/kernel/seccomp/``
+directory. Here's a description of each file in that directory:
+
+``actions_avail``:
+       A read-only ordered list of seccomp return values (refer to the
+       ``SECCOMP_RET_*`` macros above) in string form. The ordering, from
+       left-to-right, is the least permissive return value to the most
+       permissive return value.
+
+       The list represents the set of seccomp return values supported
+       by the kernel. A userspace program may use this list to
+       determine if the actions found in the ``seccomp.h``, when the
+       program was built, differs from the set of actions actually
+       supported in the current running kernel.
+
+``actions_logged``:
+       A read-write ordered list of seccomp return values (refer to the
+       ``SECCOMP_RET_*`` macros above) that are allowed to be logged. Writes
+       to the file do not need to be in ordered form but reads from the file
+       will be ordered in the same way as the actions_avail sysctl.
+
+       It is important to note that the value of ``actions_logged`` does not
+       prevent certain actions from being logged when the audit subsystem is
+       configured to audit a task. If the action is not found in
+       ``actions_logged`` list, the final decision on whether to audit the
+       action for that task is ultimately left up to the audit subsystem to
+       decide for all seccomp return values other than ``SECCOMP_RET_ALLOW``.
+
+       The ``allow`` string is not accepted in the ``actions_logged`` sysctl
+       as it is not possible to log ``SECCOMP_RET_ALLOW`` actions. Attempting
+       to write ``allow`` to the sysctl will result in an EINVAL being
+       returned.
 
 Adding architecture support
 ===========================
index 2281af4b41b6ce2000be7f3305db98475fdd7df6..af0cb69f6a3edf8a483b9091c23dfbc6f54b6503 100644 (file)
@@ -352,6 +352,18 @@ L: linux-acpi@vger.kernel.org
 S:     Maintained
 F:     drivers/acpi/arm64
 
+ACPI PMIC DRIVERS
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M:     Len Brown <lenb@kernel.org>
+R:     Andy Shevchenko <andy@infradead.org>
+R:     Mika Westerberg <mika.westerberg@linux.intel.com>
+L:     linux-acpi@vger.kernel.org
+Q:     https://patchwork.kernel.org/project/linux-acpi/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B:     https://bugzilla.kernel.org
+S:     Supported
+F:     drivers/acpi/pmic/
+
 ACPI THERMAL DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
@@ -2853,7 +2865,6 @@ S:        Supported
 F:     drivers/scsi/bnx2i/
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M:     Yuval Mintz <Yuval.Mintz@cavium.com>
 M:     Ariel Elior <ariel.elior@cavium.com>
 M:     everest-linux-l2@cavium.com
 L:     netdev@vger.kernel.org
@@ -5248,7 +5259,8 @@ S:        Maintained
 F:     drivers/iommu/exynos-iommu.c
 
 EZchip NPS platform support
-M:     Noam Camus <noamc@ezchip.com>
+M:     Elad Kanfi <eladkan@mellanox.com>
+M:     Vineet Gupta <vgupta@synopsys.com>
 S:     Supported
 F:     arch/arc/plat-eznps
 F:     arch/arc/boot/dts/eznps.dts
@@ -5334,9 +5346,7 @@ M:        "J. Bruce Fields" <bfields@fieldses.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     include/linux/fcntl.h
-F:     include/linux/fs.h
 F:     include/uapi/linux/fcntl.h
-F:     include/uapi/linux/fs.h
 F:     fs/fcntl.c
 F:     fs/locks.c
 
@@ -5345,6 +5355,8 @@ M:        Alexander Viro <viro@zeniv.linux.org.uk>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     fs/*
+F:     include/linux/fs.h
+F:     include/uapi/linux/fs.h
 
 FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:     Riku Voipio <riku.voipio@iki.fi>
@@ -6643,8 +6655,8 @@ M:        Alexander Aring <alex.aring@gmail.com>
 M:     Stefan Schmidt <stefan@osg.samsung.com>
 L:     linux-wpan@vger.kernel.org
 W:     http://wpan.cakelab.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
 S:     Maintained
 F:     net/ieee802154/
 F:     net/mac802154/
@@ -6727,7 +6739,7 @@ F:        Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
 F:     drivers/auxdisplay/img-ascii-lcd.c
 
 IMGTEC IR DECODER DRIVER
-M:     James Hogan <james.hogan@imgtec.com>
+M:     James Hogan <jhogan@kernel.org>
 S:     Maintained
 F:     drivers/media/rc/img-ir/
 
@@ -7551,7 +7563,7 @@ F:        arch/arm64/include/asm/kvm*
 F:     arch/arm64/kvm/
 
 KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
-M:     James Hogan <james.hogan@imgtec.com>
+M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@linux-mips.org
 S:     Supported
 F:     arch/mips/include/uapi/asm/kvm*
@@ -7559,7 +7571,7 @@ F:        arch/mips/include/asm/kvm*
 F:     arch/mips/kvm/
 
 KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
-M:     Alexander Graf <agraf@suse.com>
+M:     Paul Mackerras <paulus@ozlabs.org>
 L:     kvm-ppc@vger.kernel.org
 W:     http://www.linux-kvm.org/
 T:     git git://github.com/agraf/linux-2.6.git
@@ -8253,6 +8265,12 @@ L:       libertas-dev@lists.infradead.org
 S:     Orphan
 F:     drivers/net/wireless/marvell/libertas/
 
+MARVELL MACCHIATOBIN SUPPORT
+M:     Russell King <rmk@armlinux.org.uk>
+L:     linux-arm-kernel@lists.infradead.org
+S:     Maintained
+F:     arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
+
 MARVELL MV643XX ETHERNET DRIVER
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     netdev@vger.kernel.org
@@ -8586,6 +8604,12 @@ M:       Sean Wang <sean.wang@mediatek.com>
 S:     Maintained
 F:     drivers/media/rc/mtk-cir.c
 
+MEDIATEK PMIC LED DRIVER
+M:     Sean Wang <sean.wang@mediatek.com>
+S:     Maintained
+F:     drivers/leds/leds-mt6323.c
+F:     Documentation/devicetree/bindings/leds/leds-mt6323.txt
+
 MEDIATEK ETHERNET DRIVER
 M:     Felix Fietkau <nbd@openwrt.org>
 M:     John Crispin <john@phrozen.org>
@@ -8868,7 +8892,7 @@ F:        Documentation/devicetree/bindings/media/meson-ao-cec.txt
 T:     git git://linuxtv.org/media_tree.git
 
 METAG ARCHITECTURE
-M:     James Hogan <james.hogan@imgtec.com>
+M:     James Hogan <jhogan@kernel.org>
 L:     linux-metag@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git
 S:     Odd Fixes
@@ -9189,7 +9213,6 @@ F:        include/linux/isicom.h
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
 M:     Bin Liu <b-liu@ti.com>
 L:     linux-usb@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
 F:     drivers/usb/musb/
 
@@ -9337,7 +9360,7 @@ NETWORK BLOCK DEVICE (NBD)
 M:     Josef Bacik <jbacik@fb.com>
 S:     Maintained
 L:     linux-block@vger.kernel.org
-L:     nbd-general@lists.sourceforge.net
+L:     nbd@other.debian.org
 F:     Documentation/blockdev/nbd.txt
 F:     drivers/block/nbd.c
 F:     include/uapi/linux/nbd.h
@@ -10156,7 +10179,6 @@ F:      Documentation/parport*.txt
 
 PARAVIRT_OPS INTERFACE
 M:     Juergen Gross <jgross@suse.com>
-M:     Chris Wright <chrisw@sous-sol.org>
 M:     Alok Kataria <akataria@vmware.com>
 M:     Rusty Russell <rusty@rustcorp.com.au>
 L:     virtualization@lists.linux-foundation.org
@@ -10536,6 +10558,8 @@ M:      Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     Arnaldo Carvalho de Melo <acme@kernel.org>
 R:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
+R:     Jiri Olsa <jolsa@redhat.com>
+R:     Namhyung Kim <namhyung@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Supported
@@ -11047,7 +11071,6 @@ S:      Supported
 F:     drivers/scsi/qedi/
 
 QLOGIC QL4xxx ETHERNET DRIVER
-M:     Yuval Mintz <Yuval.Mintz@cavium.com>
 M:     Ariel Elior <Ariel.Elior@cavium.com>
 M:     everest-linux-l2@cavium.com
 L:     netdev@vger.kernel.org
@@ -12915,9 +12938,9 @@ F:      drivers/mmc/host/dw_mmc*
 SYNOPSYS HSDK RESET CONTROLLER DRIVER
 M:     Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
 S:     Supported
-F:     drivers/reset/reset-hsdk-v1.c
-F:     include/dt-bindings/reset/snps,hsdk-v1-reset.h
-F:     Documentation/devicetree/bindings/reset/snps,hsdk-v1-reset.txt
+F:     drivers/reset/reset-hsdk.c
+F:     include/dt-bindings/reset/snps,hsdk-reset.h
+F:     Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
 
 SYSTEM CONFIGURATION (SYSCON)
 M:     Lee Jones <lee.jones@linaro.org>
index 64cbc66cebcaa7bbf8c0d10a09412ce6693437b9..5f91a28a3cea65c4c5d79be124c86ae5998b0f7a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -130,8 +130,8 @@ endif
 ifneq ($(KBUILD_OUTPUT),)
 # check that the output directory actually exists
 saved-output := $(KBUILD_OUTPUT)
-$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT))
-KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT))
+KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
+                                                               && /bin/pwd)
 $(if $(KBUILD_OUTPUT),, \
      $(error failed to create output directory "$(saved-output)"))
 
@@ -697,11 +697,11 @@ KBUILD_CFLAGS += $(stackp-flag)
 
 ifeq ($(cc-name),clang)
 ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET   := -target $(notdir $(CROSS_COMPILE:%-=%))
+CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
 GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
 endif
 ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC   := -gcc-toolchain $(GCC_TOOLCHAIN)
+CLANG_GCC_TC   := --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
 KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
@@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
-    $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+    ifdef CONFIG_ORC_UNWINDER
+      $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+    else
+      $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+    endif
     SKIP_STACK_VALIDATION := 1
     export SKIP_STACK_VALIDATION
   endif
@@ -1172,11 +1176,11 @@ headers_check: headers_install
 
 PHONY += kselftest
 kselftest:
-       $(Q)$(MAKE) -C tools/testing/selftests run_tests
+       $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
 
 PHONY += kselftest-clean
 kselftest-clean:
-       $(Q)$(MAKE) -C tools/testing/selftests clean
+       $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
 
 PHONY += kselftest-merge
 kselftest-merge:
@@ -1395,7 +1399,7 @@ help:
        @echo  '                    Build, install, and boot kernel before'
        @echo  '                    running kselftest on it'
        @echo  '  kselftest-clean - Remove all generated kselftest files'
-       @echo  '  kselftest-merge - Merge all the config dependencies of kselftest to existed'
+       @echo  '  kselftest-merge - Merge all the config dependencies of kselftest to existing'
        @echo  '                    .config.'
        @echo  ''
        @echo 'Userspace tools targets:'
index 1aafb4efbb51dfea8c5639f2bd68379c930825d4..d789a89cb32c450fa436bac9e4b99171e3c9cc5c 100644 (file)
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
          and non-text memory will be made non-executable. This provides
          protection against certain security exploits (e.g. writing to text)
 
-config ARCH_WANT_RELAX_ORDER
-       bool
-
 config ARCH_HAS_REFCOUNT
        bool
        help
index 384bd47b5187179f4e40ee63399117c14f40eb0d..45c020a0fe76114688c4a77f1a1f574049821341 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/mm_types.h>
+#include <linux/sched.h>
 
 #include <asm/machvec.h>
 #include <asm/compiler.h>
index 118dc6af1805098e1f69439b07dc4885627a21d6..7ad074fd5ab5066bf7534713da4060852fd6bcba 100644 (file)
@@ -181,10 +181,10 @@ alcor_init_irq(void)
  * comes in on.  This makes interrupt processing much easier.
  */
 
-static int __init
+static int
 alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[7][5] __initdata = {
+       static char irq_tab[7][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                /* note: IDSEL 17 is XLT only */
                {16+13, 16+13, 16+13, 16+13, 16+13},    /* IdSel 17,  TULIP  */
index 4c50f8f40cbbfe73c39ffd0062d1937dd892b9a6..c0fa1fe5ce773fbda6d4b0a4253052a60dff98af 100644 (file)
@@ -173,10 +173,10 @@ pc164_init_irq(void)
  * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
  */
 
-static inline int __init
+static inline int
 eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[5][5] __initdata = {
+       static char irq_tab[5][5] = {
                /*INT  INTA  INTB  INTC   INTD */
                {16+0, 16+0, 16+5,  16+9, 16+13},  /* IdSel 6,  slot 0, J25 */
                {16+1, 16+1, 16+6, 16+10, 16+14},  /* IdSel 7,  slot 1, J26 */
@@ -203,10 +203,10 @@ eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
  * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
  */
 
-static inline int __init
+static inline int
 cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[5][5] __initdata = {
+       static char irq_tab[5][5] = {
                /*INT   INTA  INTB  INTC   INTD */
                { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5,  slot 2, J21 */
                { 16+0, 16+0, 16+5,  16+9, 16+13}, /* IdSel 6,  slot 0, J19 */
@@ -287,10 +287,10 @@ cia_cab_init_pci(void)
  * 
  */
 
-static inline int __init
+static inline int
 alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[7][5] __initdata = {
+       static char irq_tab[7][5] = {
                /*INT   INTA  INTB   INTC   INTD */
                { 16+2, 16+2, 16+9,  16+13, 16+17}, /* IdSel  5, slot 2, J20 */
                { 16+0, 16+0, 16+7,  16+11, 16+15}, /* IdSel  6, slot 0, J29 */
index 6c35159bc00eb47860516bda8868801a74bbe868..9e1e40ea1d14be8e20e99155e5f589b33499e5ba 100644 (file)
@@ -356,7 +356,7 @@ clipper_init_irq(void)
  *  10  64 bit PCI option slot 3 (not bus 0)
  */
 
-static int __init
+static int
 isa_irq_fixup(const struct pci_dev *dev, int irq)
 {
        u8 irq8;
@@ -372,10 +372,10 @@ isa_irq_fixup(const struct pci_dev *dev, int irq)
        return irq8 & 0xf;
 }
 
-static int __init
+static int
 dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[6][5] __initdata = {
+       static char irq_tab[6][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                {    -1,    -1,    -1,    -1,    -1}, /* IdSel 5 ISA Bridge */
                { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
@@ -394,10 +394,10 @@ dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return isa_irq_fixup(dev, irq);
 }
 
-static int __init
+static int
 monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[13][5] __initdata = {
+       static char irq_tab[13][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                {    45,    45,    45,    45,    45}, /* IdSel 3 21143 PCI1 */
                {    -1,    -1,    -1,    -1,    -1}, /* IdSel 4 unused */
@@ -423,7 +423,7 @@ monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
 }
 
-static u8 __init
+static u8
 monet_swizzle(struct pci_dev *dev, u8 *pinp)
 {
        struct pci_controller *hose = dev->sysdata;
@@ -456,10 +456,10 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp)
        return slot;
 }
 
-static int __init
+static int
 webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[13][5] __initdata = {
+       static char irq_tab[13][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                {    -1,    -1,    -1,    -1,    -1}, /* IdSel 7 ISA Bridge */
                {    -1,    -1,    -1,    -1,    -1}, /* IdSel 8 unused */
@@ -478,10 +478,10 @@ webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
 }
 
-static int __init
+static int
 clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[7][5] __initdata = {
+       static char irq_tab[7][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
                { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
index ad40a425e841c2f1c99904a43c45fa2c9a281d1d..372661c5653793f7695fb1e36598bb38269d0552 100644 (file)
@@ -167,10 +167,10 @@ eb64p_init_irq(void)
  * comes in on.  This makes interrupt processing much easier.
  */
 
-static int __init
+static int
 eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[5][5] __initdata = {
+       static char irq_tab[5][5] = {
                /*INT  INTA  INTB  INTC   INTD */
                {16+7, 16+7, 16+7, 16+7,  16+7},  /* IdSel 5,  slot ?, ?? */
                {16+0, 16+0, 16+2, 16+4,  16+9},  /* IdSel 6,  slot ?, ?? */
index 15f42083bdb3501fc041c2e1634225674fe849b3..2731738b5872c415637d87aaa70e495fd85f7d75 100644 (file)
@@ -141,7 +141,7 @@ eiger_init_irq(void)
        }
 }
 
-static int __init
+static int
 eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        u8 irq_orig;
@@ -158,7 +158,7 @@ eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return irq_orig - 0x80;
 }
 
-static u8 __init
+static u8
 eiger_swizzle(struct pci_dev *dev, u8 *pinp)
 {
        struct pci_controller *hose = dev->sysdata;
index d5b9776a608d91d957b8a362c3da3e8c93ccaad5..731d693fa1f99cf3d5d493676217eebf7c323b85 100644 (file)
@@ -149,10 +149,10 @@ miata_init_irq(void)
  * comes in on.  This makes interrupt processing much easier.
  */
 
-static int __init
+static int
 miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-        static char irq_tab[18][5] __initdata = {
+        static char irq_tab[18][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8},  /* IdSel 14,  DC21142 */
                {   -1,    -1,    -1,    -1,    -1},  /* IdSel 15,  EIDE    */
@@ -196,7 +196,7 @@ miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return COMMON_TABLE_LOOKUP;
 }
 
-static u8 __init
+static u8
 miata_swizzle(struct pci_dev *dev, u8 *pinp)
 {
        int slot, pin = *pinp;
index 5e82dc1ad6f2dc096cdc217f9c32f46180c6a7a6..350ec9c8335b4dff3ff36b3e7828ce5ee5b6cc61 100644 (file)
@@ -145,10 +145,10 @@ mikasa_init_irq(void)
  * comes in on.  This makes interrupt processing much easier.
  */
 
-static int __init
+static int
 mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[8][5] __initdata = {
+       static char irq_tab[8][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                {16+12, 16+12, 16+12, 16+12, 16+12},    /* IdSel 17,  SCSI */
                {   -1,    -1,    -1,    -1,    -1},    /* IdSel 18,  PCEB */
index 8ae04a121186e2b090256956b3e7e425a571e1c4..d019e4ce07bd946be467ce996e105c8a345a8bf7 100644 (file)
@@ -62,7 +62,7 @@ nautilus_init_irq(void)
        common_init_isa_dma();
 }
 
-static int __init
+static int
 nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        /* Preserve the IRQ set up by the console.  */
index 063e594fd96934f90971172c25f84fc8e9a89255..2301678d9f9dc747b95867e5b3341b69f3b7543a 100644 (file)
@@ -193,10 +193,10 @@ noritake_init_irq(void)
  * comes in on.  This makes interrupt processing much easier.
  */
 
-static int __init
+static int
 noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[15][5] __initdata = {
+       static char irq_tab[15][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                /* note: IDSELs 16, 17, and 25 are CORELLE only */
                { 16+1,  16+1,  16+1,  16+1,  16+1},  /* IdSel 16,  QLOGIC */
@@ -221,7 +221,7 @@ noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return COMMON_TABLE_LOOKUP;
 }
 
-static u8 __init
+static u8
 noritake_swizzle(struct pci_dev *dev, u8 *pinp)
 {
        int slot, pin = *pinp;
index dfd510ae5d8c82701b5a5fbf278bc0af699d56a5..546822d07dc7a2973e590b61b3396a8d21e05320 100644 (file)
@@ -221,10 +221,10 @@ rawhide_init_irq(void)
  * 
  */
 
-static int __init
+static int
 rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[5][5] __initdata = {
+       static char irq_tab[5][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
                { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */
index a3f4852571700183d033126a8b82ff46613c2943..3b35e19134922fc46c0c86e3d4eb81c5cb111fba 100644 (file)
@@ -117,10 +117,10 @@ ruffian_kill_arch (int mode)
  *
  */
 
-static int __init
+static int
 ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-        static char irq_tab[11][5] __initdata = {
+        static char irq_tab[11][5] = {
              /*INT  INTA INTB INTC INTD */
                {-1,  -1,  -1,  -1,  -1},  /* IdSel 13,  21052       */
                {-1,  -1,  -1,  -1,  -1},  /* IdSel 14,  SIO         */
@@ -139,7 +139,7 @@ ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return COMMON_TABLE_LOOKUP;
 }
 
-static u8 __init
+static u8
 ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
 {
        int slot, pin = *pinp;
index 08ee737d4fba1559c6fe09ce2cb7a98e2fc23ffe..e178007107ef36f44cbe146d9b4c26756457897e 100644 (file)
@@ -142,7 +142,7 @@ rx164_init_irq(void)
  * 
  */
 
-static int __init
+static int
 rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 #if 0
@@ -156,7 +156,7 @@ rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
          { 16+1, 16+1, 16+6, 16+11, 16+16},      /* IdSel 10, slot 4 */
        };
 #else
-       static char irq_tab[6][5] __initdata = {
+       static char irq_tab[6][5] = {
          /*INT   INTA  INTB  INTC   INTD */
          { 16+0, 16+0, 16+6, 16+11, 16+16},      /* IdSel 5,  slot 0 */
          { 16+1, 16+1, 16+7, 16+12, 16+17},      /* IdSel 6,  slot 1 */
index 8a0aa6d67b5319c18d9ba050f4d6364d4108dc5b..86d259c2612d8b8c4a0642fc6d83944d08a0aa67 100644 (file)
@@ -192,10 +192,10 @@ sable_init_irq(void)
  * with the values in the irq swizzling tables above.
  */
 
-static int __init
+static int
 sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[9][5] __initdata = {
+       static char irq_tab[9][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                { 32+0,  32+0,  32+0,  32+0,  32+0},  /* IdSel 0,  TULIP  */
                { 32+1,  32+1,  32+1,  32+1,  32+1},  /* IdSel 1,  SCSI   */
@@ -374,10 +374,10 @@ lynx_init_irq(void)
  * with the values in the irq swizzling tables above.
  */
 
-static int __init
+static int
 lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[19][5] __initdata = {
+       static char irq_tab[19][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                {   -1,    -1,    -1,    -1,    -1},  /* IdSel 13,  PCEB   */
                {   -1,    -1,    -1,    -1,    -1},  /* IdSel 14,  PPB    */
@@ -404,7 +404,7 @@ lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return COMMON_TABLE_LOOKUP;
 }
 
-static u8 __init
+static u8
 lynx_swizzle(struct pci_dev *dev, u8 *pinp)
 {
        int slot, pin = *pinp;
index febd24eba7a6a26d7251dfdd85473d00b531669a..9fd2895639d594d7c456c77994ef5e1ecd1bb1a2 100644 (file)
@@ -144,7 +144,7 @@ sio_fixup_irq_levels(unsigned int level_bits)
        outb((level_bits >> 8) & 0xff, 0x4d1);
 }
 
-static inline int __init
+static inline int
 noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        /*
@@ -165,7 +165,7 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
         * that they use the default INTA line, if they are interrupt
         * driven at all).
         */
-       static char irq_tab[][5] __initdata = {
+       static char irq_tab[][5] = {
                /*INT A   B   C   D */
                { 3,  3,  3,  3,  3}, /* idsel  6 (53c810) */ 
                {-1, -1, -1, -1, -1}, /* idsel  7 (SIO: PCI/ISA bridge) */
@@ -183,10 +183,10 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return irq >= 0 ? tmp : -1;
 }
 
-static inline int __init
+static inline int
 p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[][5] __initdata = {
+       static char irq_tab[][5] = {
                /*INT A   B   C   D */
                { 0,  0, -1, -1, -1}, /* idsel  6 (53c810) */
                {-1, -1, -1, -1, -1}, /* idsel  7 (SIO: PCI/ISA bridge) */
index d063b360efedc49d7690af17d87d39a93205f56d..23eee54d714ae80b6beb0cd951068298f96bd463 100644 (file)
@@ -94,10 +94,10 @@ sx164_init_irq(void)
  *   9  32 bit PCI option slot 3
  */
 
-static int __init
+static int
 sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[5][5] __initdata = {
+       static char irq_tab[5][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
                { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
index dd0f1eae3c68d27122718485760ce2f2db276bf2..9101f2bb61765ebb37364320d8bf07523110d59f 100644 (file)
@@ -155,10 +155,10 @@ takara_init_irq(void)
  * assign it whatever the hell IRQ we like and it doesn't matter.
  */
 
-static int __init
+static int
 takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[15][5] __initdata = {
+       static char irq_tab[15][5] = {
                { 16+3, 16+3, 16+3, 16+3, 16+3},   /* slot  6 == device 3 */
                { 16+2, 16+2, 16+2, 16+2, 16+2},   /* slot  7 == device 2 */
                { 16+1, 16+1, 16+1, 16+1, 16+1},   /* slot  8 == device 1 */
@@ -210,7 +210,7 @@ takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return COMMON_TABLE_LOOKUP;
 }
 
-static u8 __init
+static u8
 takara_swizzle(struct pci_dev *dev, u8 *pinp)
 {
        int slot = PCI_SLOT(dev->devfn);
index ee1874887776bd5c732ce6c964cbe8cb7154d8ef..c3f8b79fe21422af34a2430969d323739d91efbb 100644 (file)
@@ -288,10 +288,10 @@ wildfire_device_interrupt(unsigned long vector)
  *   7  64 bit PCI 1 option slot 7
  */
 
-static int __init
+static int
 wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       static char irq_tab[8][5] __initdata = {
+       static char irq_tab[8][5] = {
                /*INT    INTA   INTB   INTC   INTD */
                { -1,    -1,    -1,    -1,    -1}, /* IdSel 0 ISA Bridge */
                { 36,    36,    36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
index a598641eed98ef4d60dc65a25a4279a70be7e134..c84e67fdea095cbed225edac052f4ce07007abb9 100644 (file)
@@ -24,7 +24,7 @@ config ARC
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_FUTEX_CMPXCHG
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_IOREMAP_PROT
        select HAVE_KPROBES
        select HAVE_KRETPROBES
index 3a4b52b7e09d61456daf12335e7e5b7c75ec7077..d37f49d6a27f40f65d3e34bd3e2df5343a97d1e4 100644 (file)
@@ -6,8 +6,6 @@
 # published by the Free Software Foundation.
 #
 
-UTS_MACHINE := arc
-
 ifeq ($(CROSS_COMPILE),)
 ifndef CONFIG_CPU_BIG_ENDIAN
 CROSS_COMPILE := arc-linux-
index 2367a67c5f10bd3e99ce757dafed25dfa95ecb35..e114000a84f56c9e07ddd3a2e623c4dfeb3df6a2 100644 (file)
 
                        mmcclk: mmcclk {
                                compatible = "fixed-clock";
-                               clock-frequency = <50000000>;
+                               /*
+                                * DW sdio controller has external ciu clock divider
+                                * controlled via register in SDIO IP. It divides
+                                * sdio_ref_clk (which comes from CGU) by 16 for
+                                * default. So default mmcclk clock (which comes
+                                * to sdk_in) is 25000000 Hz.
+                                */
+                               clock-frequency = <25000000>;
                                #clock-cells = <0>;
                        };
 
index 229d13adbce426c1690684f7e8dcb344a001429d..8f627c200d609148c55731aac99b2a353e72f126 100644 (file)
@@ -12,6 +12,7 @@
 /dts-v1/;
 
 #include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/reset/snps,hsdk-reset.h>
 
 / {
        model = "snps,hsdk";
                };
        };
 
-       core_clk: core-clk {
+       input_clk: input-clk {
                #clock-cells = <0>;
                compatible = "fixed-clock";
-               clock-frequency = <500000000>;
+               clock-frequency = <33333333>;
        };
 
        cpu_intc: cpu-interrupt-controller {
 
                ranges = <0x00000000 0xf0000000 0x10000000>;
 
+               cgu_rst: reset-controller@8a0 {
+                       compatible = "snps,hsdk-reset";
+                       #reset-cells = <1>;
+                       reg = <0x8A0 0x4>, <0xFF0 0x4>;
+               };
+
+               core_clk: core-clk@0 {
+                       compatible = "snps,hsdk-core-pll-clock";
+                       reg = <0x00 0x10>, <0x14B8 0x4>;
+                       #clock-cells = <0>;
+                       clocks = <&input_clk>;
+               };
+
                serial: serial@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
 
                mmcclk_ciu: mmcclk-ciu {
                        compatible = "fixed-clock";
-                       clock-frequency = <100000000>;
+                       /*
+                        * DW sdio controller has external ciu clock divider
+                        * controlled via register in SDIO IP. Due to its
+                        * unexpected default value (it should divide by 1
+                        * but it divides by 8) SDIO IP uses wrong clock and
+                        * works unstable (see STAR 9001204800)
+                        * We switched to the minimum possible value of the
+                        * divisor (div-by-2) in HSDK platform code.
+                        * So add temporary fix and change clock frequency
+                        * to 50000000 Hz until we fix dw sdio driver itself.
+                        */
+                       clock-frequency = <50000000>;
                        #clock-cells = <0>;
                };
 
                        clocks = <&gmacclk>;
                        clock-names = "stmmaceth";
                        phy-handle = <&phy0>;
+                       resets = <&cgu_rst HSDK_ETH_RESET>;
+                       reset-names = "stmmaceth";
 
                        mdio {
                                #address-cells = <1>;
index 6980b966a36403b6170130fa1389ea9fb66095b8..ec7c849a5c8e9887fbd60acf176f194cc0c4ba03 100644 (file)
@@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 2233f5777a71f82cb5defb9ae99bbfc11772fdac..63d3cf69e0b02efe45fca035649951836fc1e948 100644 (file)
@@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 30a3d4cf53d20af6f202a61e3f5a980e7f1e7c2e..f613ecac14a750e6008dc817d3a06727a4631989 100644 (file)
@@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 821a2e562f3f12422844556630d7136ad63097a8..3507be2af6fe3684f4acca6de7009903da82b97e 100644 (file)
@@ -84,5 +84,5 @@ CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 # CONFIG_DEBUG_PREEMPT is not set
index 9a3fcf44638820d905895b2bf0ba5a6f18511f4c..7b8f8faf8a24315d3379d189cab69506539e04a8 100644 (file)
@@ -72,7 +72,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index c0d6a010751a9f80e4139fa6a9b833de4b24b6a8..4fcf4f2503f61a9e802b82e2cbb4aad9617725d8 100644 (file)
@@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_SHIRQ=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 5c0971787acf4c18066c4bc537f36c1dae37232f..7b71464f6c2f2904e18893d8ba80c65471fc4ef5 100644 (file)
@@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_SHIRQ=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index ba8e802dba80bd0776508ff70945e6e8b54ea43e..b1c56d35f2a938e59c9677454499aeecb1da4f7e 100644 (file)
@@ -98,6 +98,7 @@
 
 /* Auxiliary registers */
 #define AUX_IDENTITY           4
+#define AUX_EXEC_CTRL          8
 #define AUX_INTR_VEC_BASE      0x25
 #define AUX_VOL                        0x5e
 
@@ -135,12 +136,12 @@ struct bcr_identity {
 #endif
 };
 
-struct bcr_isa {
+struct bcr_isa_arcv2 {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
-                    pad1:11, atomic1:1, ver:8;
+                    pad1:12, ver:8;
 #else
-       unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1,
+       unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1,
                     ldd:1, pad2:4, div_rem:4;
 #endif
 };
@@ -263,13 +264,13 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_mmu mmu;
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
-       struct bcr_isa isa;
+       struct bcr_isa_arcv2 isa;
        const char *details, *name;
        unsigned int vec_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
-                            fpu_sp:1, fpu_dp:1, pad2:6,
+                            fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
                             debug:1, ap:1, smart:1, rtt:1, pad3:4,
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
index d400a2161935de5442c0ae1ddc7159ae16c003a4..8ee41e9881690b648bf1d4e5c385ddeef0a860c2 100644 (file)
@@ -78,9 +78,6 @@ struct task_struct;
 
 #endif
 
-#define copy_segments(tsk, mm)      do { } while (0)
-#define release_segments(mm)        do { } while (0)
-
 #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
 #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
index 877cec8f5ea21256e233f4f353882ff02097537a..fb83844daeea3550aacd27de2525711a6aa5fddc 100644 (file)
@@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
        { 0x51, "R2.0" },
        { 0x52, "R2.1" },
        { 0x53, "R3.0" },
+       { 0x54, "R4.0" },
 #endif
        { 0x00, NULL   }
 };
@@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = {
 #else
        { 0x40, "ARC EM"  },
        { 0x50, "ARC HS38"  },
+       { 0x54, "ARC HS48"  },
 #endif
        { 0x00, "Unknown"   }
 };
@@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void)
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        const struct id_to_str *tbl;
+       struct bcr_isa_arcv2 isa;
 
        FIX_PTR(cpu);
 
        READ_BCR(AUX_IDENTITY, cpu->core);
-       READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 
        for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
                if (cpu->core.family == tbl->id) {
@@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void)
        }
 
        for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
-               if ((cpu->core.family & 0xF0) == tbl->id)
+               if ((cpu->core.family & 0xF4) == tbl->id)
                        break;
        }
        cpu->name = tbl->str;
@@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void)
                cpu->bpu.full = bpu.ft;
                cpu->bpu.num_cache = 256 << bpu.bce;
                cpu->bpu.num_pred = 2048 << bpu.pte;
+
+               if (cpu->core.family >= 0x54) {
+                       unsigned int exec_ctrl;
+
+                       READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+                       cpu->extn.dual_iss_exist = 1;
+                       cpu->extn.dual_iss_enb = exec_ctrl & 1;
+               }
        }
 
        READ_BCR(ARC_REG_AP_BCR, bcr);
@@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void)
 
        cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
 
+       READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
+
        /* some hacks for lack of feature BCR info in old ARC700 cores */
        if (is_isa_arcompact()) {
-               if (!cpu->isa.ver)      /* ISA BCR absent, use Kconfig info */
+               if (!isa.ver)   /* ISA BCR absent, use Kconfig info */
                        cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
-               else
-                       cpu->isa.atomic = cpu->isa.atomic1;
+               else {
+                       /* ARC700_BUILD only has 2 bits of isa info */
+                       struct bcr_generic bcr = *(struct bcr_generic *)&isa;
+                       cpu->isa.atomic = bcr.info & 1;
+               }
 
                cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
 
                 /* there's no direct way to distinguish 750 vs. 770 */
                if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
                        cpu->name = "ARC750";
+       } else {
+               cpu->isa = isa;
        }
 }
 
@@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
                       core->family, core->cpu_id, core->chip_id);
 
-       n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
+       n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
                       cpu_id, cpu->name, cpu->details,
                       is_isa_arcompact() ? "ARCompact" : "ARCv2",
-                      IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
+                      IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
+                      IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
 
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
                       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
index f46267153ec2e971bb4b81e2513835d2ee2be635..6df9d94a953763eca43b20f02f1897308ab1ee7a 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/cpumask.h>
 #include <linux/reboot.h>
 #include <linux/irqdomain.h>
+#include <linux/export.h>
+
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/mach_desc.h>
@@ -30,6 +32,9 @@
 #ifndef CONFIG_ARC_HAS_LLSC
 arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
+EXPORT_SYMBOL_GPL(smp_bitops_lock);
 #endif
 
 struct plat_smp_ops  __weak plat_smp_ops;
index f1ac6790da5fe64782b59b720bf3ea80d999bff1..cf14ebc36916a2a0eca39728c0cc0f315d58bbeb 100644 (file)
@@ -111,6 +111,13 @@ static void __init axs10x_early_init(void)
 
        axs10x_enable_gpio_intc_wire();
 
+       /*
+        * Reset ethernet IP core.
+        * TODO: get rid of this quirk after axs10x reset driver (or simple
+        * reset driver) will be available in upstream.
+        */
+       iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
+
        scnprintf(mb, 32, "MainBoard v%d", mb_rev);
        axs10x_print_board_ver(CREG_MB_VER, mb);
 }
index 5a6ed5afb009a160e617a5e2fd81f1be6f56fcd3..19ab3cf98f0f34904b8431a6d4cf36642066c513 100644 (file)
@@ -6,4 +6,6 @@
 #
 
 menuconfig ARC_SOC_HSDK
-       bool "ARC HS Development Kit SOC"
+       bool "ARC HS Development Kit SOC"
+       select CLK_HSDK
+       select RESET_HSDK
index a2e7fd17e36d36a4e0f29c183834ae349e5a8786..fd0ae5e38639a8756c86d7882c6e74c88f0e07e5 100644 (file)
@@ -38,6 +38,46 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
 #define CREG_PAE               (CREG_BASE + 0x180)
 #define CREG_PAE_UPDATE                (CREG_BASE + 0x194)
 
+#define CREG_CORE_IF_CLK_DIV   (CREG_BASE + 0x4B8)
+#define CREG_CORE_IF_CLK_DIV_2 0x1
+#define CGU_BASE               ARC_PERIPHERAL_BASE
+#define CGU_PLL_STATUS         (ARC_PERIPHERAL_BASE + 0x4)
+#define CGU_PLL_CTRL           (ARC_PERIPHERAL_BASE + 0x0)
+#define CGU_PLL_STATUS_LOCK    BIT(0)
+#define CGU_PLL_STATUS_ERR     BIT(1)
+#define CGU_PLL_CTRL_1GHZ      0x3A10
+#define HSDK_PLL_LOCK_TIMEOUT  500
+
+#define HSDK_PLL_LOCKED() \
+       !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
+
+#define HSDK_PLL_ERR() \
+       !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
+
+static void __init hsdk_set_cpu_freq_1ghz(void)
+{
+       u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
+
+       /*
+        * As we set cpu clock which exceeds 500MHz, the divider for the interface
+        * clock must be programmed to div-by-2.
+        */
+       iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
+
+       /* Set cpu clock to 1GHz */
+       iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
+
+       while (!HSDK_PLL_LOCKED() && timeout--)
+               cpu_relax();
+
+       if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
+               pr_err("Failed to setup CPU frequency to 1GHz!");
+}
+
+#define SDIO_BASE              (ARC_PERIPHERAL_BASE + 0xA000)
+#define SDIO_UHS_REG_EXT       (SDIO_BASE + 0x108)
+#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
+
 static void __init hsdk_init_early(void)
 {
        /*
@@ -52,6 +92,18 @@ static void __init hsdk_init_early(void)
 
        /* Really apply settings made above */
        writel(1, (void __iomem *) CREG_PAE_UPDATE);
+
+       /*
+        * Switch SDIO external ciu clock divider from default div-by-8 to
+        * minimum possible div-by-2.
+        */
+       iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+       /*
+        * Setup CPU frequency to 1GHz.
+        * TODO: remove it after smart hsdk pll driver will be introduced.
+        */
+       hsdk_set_cpu_freq_1ghz();
 }
 
 static const char *hsdk_compat[] __initconst = {
index 47d3a1ab08d2491aff178040b309180226d131e8..817e5cfef83a933e8d66bfd62a7c908a1af92f2c 100644 (file)
@@ -131,7 +131,7 @@ endif
 KBUILD_CFLAGS  +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
 KBUILD_AFLAGS  +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
 
-CHECKFLAGS     += -D__arm__
+CHECKFLAGS     += -D__arm__ -m32
 
 #Default value
 head-y         := arch/arm/kernel/head$(MMUEXT).o
index 5392ee63338fac3453f30b125366e03241158133..8f6e37177de132252c560765440698ac08b69408 100644 (file)
@@ -23,7 +23,11 @@ ENTRY(putc)
        strb    r0, [r1]
        mov     r0, #0x03               @ SYS_WRITEC
    ARM(        svc     #0x123456       )
+#ifdef CONFIG_CPU_V7M
+ THUMB(        bkpt    #0xab           )
+#else
  THUMB(        svc     #0xab           )
+#endif
        mov     pc, lr
        .align  2
 1:     .word   _GLOBAL_OFFSET_TABLE_ - .
index 7d7ca054c557b4b66e5da847c53e6c20fd36cd5b..e58fab8aec5df2a885ea8c04f9eefda09cf2d307 100644 (file)
@@ -36,6 +36,8 @@
                phy1 = &usb1_phy;
                ethernet0 = &cpsw_emac0;
                ethernet1 = &cpsw_emac1;
+               spi0 = &spi0;
+               spi1 = &spi1;
        };
 
        cpus {
index 9d276af7c539f3dd1cb382f12d4bca7d9bd2fb3d..081fa68b6f98049ad2edb1c9c6d579c0958a7a2d 100644 (file)
        pinctrl-0 = <&cpsw_default>;
        pinctrl-1 = <&cpsw_sleep>;
        status = "okay";
+       slaves = <1>;
 };
 
 &davinci_mdio {
        phy-mode = "rmii";
 };
 
-&cpsw_emac1 {
-       phy_id = <&davinci_mdio>, <1>;
-       phy-mode = "rmii";
-};
-
 &phy_sel {
        rmii-clock-ext;
 };
index 7ff0811e61db3ad73be8f0c9ea8f79ebde1681c7..4960722aab32a1cf644ddb1e69d792845c240d44 100644 (file)
                        };
 
                        i2c0: i2c@11000 {
-                               compatible = "marvell,mv64xxx-i2c";
+                               compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
                                reg = <0x11000 0x20>;
                                #address-cells = <1>;
                                #size-cells = <0>;
                        };
 
                        i2c1: i2c@11100 {
-                               compatible = "marvell,mv64xxx-i2c";
+                               compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
                                reg = <0x11100 0x20>;
                                #address-cells = <1>;
                                #size-cells = <0>;
index 63a5af8981659fb7f3162323be34e55dbcc345d6..cf0087b4c9e184259eb63667f8322067c80b584b 100644 (file)
@@ -67,8 +67,8 @@
                                pinctrl-0 = <&pinctrl_macb0_default>;
                                phy-mode = "rmii";
 
-                               ethernet-phy@1 {
-                                       reg = <0x1>;
+                               ethernet-phy@0 {
+                                       reg = <0x0>;
                                        interrupt-parent = <&pioA>;
                                        interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
                                        pinctrl-names = "default";
index 9c9088c99cc4cdb36d963c61e648216ee7fc4c4c..60cb084a8d927e40303a377a40fd283d2eb29f45 100644 (file)
 
                usb1: ohci@00400000 {
                        num-ports = <3>;
-                       atmel,vbus-gpio = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>;
+                       atmel,vbus-gpio = <0 /* &pioA PIN_PD20 GPIO_ACTIVE_HIGH */
+                                          &pioA PIN_PA27 GPIO_ACTIVE_HIGH
+                                          0
+                                         >;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usb_default>;
                        status = "okay";
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_mikrobus2_uart>;
                                atmel,use-dma-rx;
-                               atmel-use-dma-tx;
+                               atmel,use-dma-tx;
                                status = "okay";
                        };
 
                        uart4: serial@fc00c000 {
                                atmel,use-dma-rx;
                                atmel,use-dma-tx;
-                               pinctrl-name = "default";
+                               pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_mikrobus1_uart>;
                                status = "okay";
                        };
                                };
 
                                pinctrl_led_gpio_default: led_gpio_default {
-                                       pinmux = <PIN_PA27__GPIO>,
+                                       pinmux = <PIN_PA10__GPIO>,
                                                 <PIN_PB1__GPIO>,
                                                 <PIN_PA31__GPIO>;
                                        bias-pull-up;
                                };
 
                                pinctrl_usb_default: usb_default {
-                                       pinmux = <PIN_PA10__GPIO>,
+                                       pinmux = <PIN_PA27__GPIO>,
                                                 <PIN_PD19__GPIO>;
                                        bias-disable;
                                };
 
                red {
                        label = "red";
-                       gpios = <&pioA PIN_PA27 GPIO_ACTIVE_LOW>;
+                       gpios = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>;
                };
 
                green {
                        label = "green";
-                       gpios = <&pioA PIN_PB1 GPIO_ACTIVE_LOW>;
+                       gpios = <&pioA PIN_PB1 GPIO_ACTIVE_HIGH>;
                };
 
                blue {
                        label = "blue";
-                       gpios = <&pioA PIN_PA31 GPIO_ACTIVE_LOW>;
+                       gpios = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>;
                        linux,default-trigger = "heartbeat";
                };
        };
index c7e9ccf2bc8724304f44c574bc901b3e9b3999af..cbc26001247bea2fabcf186ad784f187810dbc63 100644 (file)
                                vddana-supply = <&vdd_3v3_lp_reg>;
                                vref-supply = <&vdd_3v3_lp_reg>;
                                pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_adc_default>;
+                               pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
                                status = "okay";
                        };
 
                                        bias-disable;
                                };
 
+                               /*
+                                * The ADTRG pin can work on any edge type.
+                                * In here it's being pulled up, so need to
+                                * connect it to ground to get an edge e.g.
+                                * Trigger can be configured on falling, rise
+                                * or any edge, and the pull-up can be changed
+                                * to pull-down or left floating according to
+                                * needs.
+                                */
+                               pinctrl_adtrg_default: adtrg_default {
+                                       pinmux = <PIN_PD31__ADTRG>;
+                                       bias-pull-up;
+                               };
+
                                pinctrl_charger_chglev: charger_chglev {
                                        pinmux = <PIN_PA12__GPIO>;
                                        bias-disable;
index 82651c3eb682a749652a31eadbb4f910961151f7..b8565fc33eea6bc18b88ab72cf8774b15f3c678d 100644 (file)
        compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
        model = "Raspberry Pi Zero W";
 
-       /* Needed by firmware to properly init UARTs */
-       aliases {
-               uart0 = "/soc/serial@7e201000";
-               uart1 = "/soc/serial@7e215040";
-               serial0 = "/soc/serial@7e201000";
-               serial1 = "/soc/serial@7e215040";
+       chosen {
+               /* 8250 auxiliary UART instead of pl011 */
+               stdout-path = "serial1:115200n8";
        };
 
        leds {
index 20725ca487f30afd5e84059fbcb1513d3399cfa8..c71a0d73d2a2e9fd64c1906b0fe42a6cfefbd230 100644 (file)
@@ -8,6 +8,11 @@
        compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
        model = "Raspberry Pi 3 Model B";
 
+       chosen {
+               /* 8250 auxiliary UART instead of pl011 */
+               stdout-path = "serial1:115200n8";
+       };
+
        memory {
                reg = <0 0x40000000>;
        };
index 431dcfc900c024d85a88231d1df007916dafdfeb..013431e3d7c3140d3a0645bdf4f130e9a860f984 100644 (file)
        #address-cells = <1>;
        #size-cells = <1>;
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart1;
+       };
+
        chosen {
-               bootargs = "earlyprintk console=ttyAMA0";
+               stdout-path = "serial0:115200n8";
        };
 
        thermal-zones {
index 67e72bc72e805be995b0d44fa68f74e29161b140..c75507922f7d7998ee0e45bb875b8b6495b3d8d7 100644 (file)
        compatible = "ti,da850-evm", "ti,da850";
        model = "DA850/AM1808/OMAP-L138 EVM";
 
+       aliases {
+               serial0 = &serial0;
+               serial1 = &serial1;
+               serial2 = &serial2;
+               ethernet0 = &eth0;
+       };
+
        soc@1c00000 {
                pmx_core: pinmux@14120 {
                        status = "okay";
index cf229dfabf6173872d23ea380a436109b5424caf..e62b62875cbad7ed97dd7f23c0f290a38839d3be 100644 (file)
                clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
                ti,bit-shift = <24>;
                reg = <0x1868>;
+               assigned-clocks = <&mcasp3_ahclkx_mux>;
+               assigned-clock-parents = <&abe_24m_fclk>;
        };
 
        mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 {
index c68e8d430234c3824198d46b336f0d689cf38611..f0d178c77153fc6a3e4c84ebb798f3720995e066 100644 (file)
                };
 
                watchdog@41000000 {
-                       compatible = "cortina,gemini-watchdog";
+                       compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
                        reg = <0x41000000 0x1000>;
                        interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
                        resets = <&syscon GEMINI_RESET_WDOG>;
                        clocks = <&syscon GEMINI_CLK_APB>;
+                       clock-names = "PCLK";
                };
 
                uart0: serial@42000000 {
index f46814a7ea44100ff0dbd44f054bb17ed6c9d740..4d308d17f040c71157db72a8d81abf8de9dd4d8e 100644 (file)
                interrupt-names = "msi";
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0x7>;
-               interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
                         <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
                         <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
index 1f4c795d3f7210223d7cdab4b5e430affa133a35..da7b3237bfe9811ff9e8908310b8f146f938f21e 100644 (file)
                };
 
                watchdog: watchdog@98500000 {
-                       compatible = "moxa,moxart-watchdog";
+                       compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
                        reg = <0x98500000 0x10>;
                        clocks = <&clk_apb>;
+                       clock-names = "PCLK";
                };
 
                sdhci: sdhci@98e00000 {
index 26c20e1167b9531ca1fde008787a714856b67c62..4acd32a1c4ef7c8801bbd516f2f317d50db21218 100644 (file)
                io-channel-names = "temp", "bsi", "vbat";
        };
 
-       rear_camera: camera@0 {
-               compatible = "linux,camera";
-
-               module {
-                       model = "TCM8341MD";
-                       sensor = <&cam1>;
-               };
-       };
-
        pwm9: dmtimer-pwm {
                compatible = "ti,omap-dmtimer-pwm";
                #pwm-cells = <3>;
                                clock-lanes = <1>;
                                data-lanes = <0>;
                                lane-polarity = <0 0>;
-                               clock-inv = <0>;
                                /* Select strobe = <1> for back camera, <0> for front camera */
                                strobe = <1>;
-                               crc = <0>;
                        };
                };
        };
index cb47ae79a5f9e87ed1e9ce84b148b6829c9e2935..1b0bd72945f21337d1e89c3e1636bbfb5cabeecd 100644 (file)
        clock-frequency = <400000>;
 
        as3645a@30 {
+               #address-cells = <1>;
+               #size-cells = <0>;
                reg = <0x30>;
                compatible = "ams,as3645a";
-               flash {
+               flash@0 {
+                       reg = <0x0>;
                        flash-timeout-us = <150000>;
                        flash-max-microamp = <320000>;
                        led-max-microamp = <60000>;
-                       peak-current-limit = <1750000>;
+                       ams,input-max-microamp = <1750000>;
                };
-               indicator {
+               indicator@1 {
+                       reg = <0x1>;
                        led-max-microamp = <10000>;
                };
        };
index 38d2216c7ead9ff422cce1e740a820e47977a700..b1a26b42d1904a82817e1986da79fd963b934d2f 100644 (file)
                                atmel,min-sample-rate-hz = <200000>;
                                atmel,max-sample-rate-hz = <20000000>;
                                atmel,startup-time-ms = <4>;
+                               atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
index 97b1c2321ba9dd4f7791213b47beaeb740d6542f..293ecb95722715d6ec4dc36da07496134f51a631 100644 (file)
@@ -47,6 +47,7 @@
 
 /dts-v1/;
 #include "stm32f429.dtsi"
+#include "stm32f429-pinctrl.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
 
        stmpe1600: stmpe1600@42 {
                compatible = "st,stmpe1600";
                reg = <0x42>;
-               irq-gpio = <&gpioi 8 0>;
-               irq-trigger = <3>;
                interrupts = <8 3>;
-               interrupt-parent = <&exti>;
+               interrupt-parent = <&gpioi>;
                interrupt-controller;
                wakeup-source;
 
diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
new file mode 100644 (file)
index 0000000..7f3560c
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
+#include <dt-bindings/mfd/stm32f4-rcc.h>
+
+/ {
+       soc {
+               pinctrl: pin-controller {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0 0x40020000 0x3000>;
+                       interrupt-parent = <&exti>;
+                       st,syscfg = <&syscfg 0x8>;
+                       pins-are-numbered;
+
+                       gpioa: gpio@40020000 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x0 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>;
+                               st,bank-name = "GPIOA";
+                       };
+
+                       gpiob: gpio@40020400 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x400 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>;
+                               st,bank-name = "GPIOB";
+                       };
+
+                       gpioc: gpio@40020800 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x800 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>;
+                               st,bank-name = "GPIOC";
+                       };
+
+                       gpiod: gpio@40020c00 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0xc00 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>;
+                               st,bank-name = "GPIOD";
+                       };
+
+                       gpioe: gpio@40021000 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x1000 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>;
+                               st,bank-name = "GPIOE";
+                       };
+
+                       gpiof: gpio@40021400 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x1400 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>;
+                               st,bank-name = "GPIOF";
+                       };
+
+                       gpiog: gpio@40021800 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x1800 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>;
+                               st,bank-name = "GPIOG";
+                       };
+
+                       gpioh: gpio@40021c00 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x1c00 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>;
+                               st,bank-name = "GPIOH";
+                       };
+
+                       gpioi: gpio@40022000 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x2000 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>;
+                               st,bank-name = "GPIOI";
+                       };
+
+                       gpioj: gpio@40022400 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x2400 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>;
+                               st,bank-name = "GPIOJ";
+                       };
+
+                       gpiok: gpio@40022800 {
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               reg = <0x2800 0x400>;
+                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>;
+                               st,bank-name = "GPIOK";
+                       };
+
+                       usart1_pins_a: usart1@0 {
+                               pins1 {
+                                       pinmux = <STM32F429_PA9_FUNC_USART1_TX>;
+                                       bias-disable;
+                                       drive-push-pull;
+                                       slew-rate = <0>;
+                               };
+                               pins2 {
+                                       pinmux = <STM32F429_PA10_FUNC_USART1_RX>;
+                                       bias-disable;
+                               };
+                       };
+
+                       usart3_pins_a: usart3@0 {
+                               pins1 {
+                                       pinmux = <STM32F429_PB10_FUNC_USART3_TX>;
+                                       bias-disable;
+                                       drive-push-pull;
+                                       slew-rate = <0>;
+                               };
+                               pins2 {
+                                       pinmux = <STM32F429_PB11_FUNC_USART3_RX>;
+                                       bias-disable;
+                               };
+                       };
+
+                       usbotg_fs_pins_a: usbotg_fs@0 {
+                               pins {
+                                       pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>,
+                                                <STM32F429_PA11_FUNC_OTG_FS_DM>,
+                                                <STM32F429_PA12_FUNC_OTG_FS_DP>;
+                                       bias-disable;
+                                       drive-push-pull;
+                                       slew-rate = <2>;
+                               };
+                       };
+
+                       usbotg_fs_pins_b: usbotg_fs@1 {
+                               pins {
+                                       pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>,
+                                                <STM32F429_PB14_FUNC_OTG_HS_DM>,
+                                                <STM32F429_PB15_FUNC_OTG_HS_DP>;
+                                       bias-disable;
+                                       drive-push-pull;
+                                       slew-rate = <2>;
+                               };
+                       };
+
+                       usbotg_hs_pins_a: usbotg_hs@0 {
+                               pins {
+                                       pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>,
+                                                <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>,
+                                                <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>,
+                                                <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>,
+                                                <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>,
+                                                <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>,
+                                                <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>,
+                                                <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>,
+                                                <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>,
+                                                <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>,
+                                                <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>,
+                                                <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>;
+                                       bias-disable;
+                                       drive-push-pull;
+                                       slew-rate = <2>;
+                               };
+                       };
+
+                       ethernet_mii: mii@0 {
+                               pins {
+                                       pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>,
+                                                <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>,
+                                                <STM32F429_PC2_FUNC_ETH_MII_TXD2>,
+                                                <STM32F429_PB8_FUNC_ETH_MII_TXD3>,
+                                                <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>,
+                                                <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>,
+                                                <STM32F429_PA2_FUNC_ETH_MDIO>,
+                                                <STM32F429_PC1_FUNC_ETH_MDC>,
+                                                <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>,
+                                                <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>,
+                                                <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>,
+                                                <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>,
+                                                <STM32F429_PH6_FUNC_ETH_MII_RXD2>,
+                                                <STM32F429_PH7_FUNC_ETH_MII_RXD3>;
+                                       slew-rate = <2>;
+                               };
+                       };
+
+                       adc3_in8_pin: adc@200 {
+                               pins {
+                                       pinmux = <STM32F429_PF10_FUNC_ANALOG>;
+                               };
+                       };
+
+                       pwm1_pins: pwm@1 {
+                               pins {
+                                       pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>,
+                                                <STM32F429_PB13_FUNC_TIM1_CH1N>,
+                                                <STM32F429_PB12_FUNC_TIM1_BKIN>;
+                               };
+                       };
+
+                       pwm3_pins: pwm@3 {
+                               pins {
+                                       pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>,
+                                                <STM32F429_PB5_FUNC_TIM3_CH2>;
+                               };
+                       };
+
+                       i2c1_pins: i2c1@0 {
+                               pins {
+                                       pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>,
+                                                <STM32F429_PB6_FUNC_I2C1_SCL>;
+                                       bias-disable;
+                                       drive-open-drain;
+                                       slew-rate = <3>;
+                               };
+                       };
+
+                       ltdc_pins: ltdc@0 {
+                               pins {
+                                       pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>,
+                                                <STM32F429_PI13_FUNC_LCD_VSYNC>,
+                                                <STM32F429_PI14_FUNC_LCD_CLK>,
+                                                <STM32F429_PI15_FUNC_LCD_R0>,
+                                                <STM32F429_PJ0_FUNC_LCD_R1>,
+                                                <STM32F429_PJ1_FUNC_LCD_R2>,
+                                                <STM32F429_PJ2_FUNC_LCD_R3>,
+                                                <STM32F429_PJ3_FUNC_LCD_R4>,
+                                                <STM32F429_PJ4_FUNC_LCD_R5>,
+                                                <STM32F429_PJ5_FUNC_LCD_R6>,
+                                                <STM32F429_PJ6_FUNC_LCD_R7>,
+                                                <STM32F429_PJ7_FUNC_LCD_G0>,
+                                                <STM32F429_PJ8_FUNC_LCD_G1>,
+                                                <STM32F429_PJ9_FUNC_LCD_G2>,
+                                                <STM32F429_PJ10_FUNC_LCD_G3>,
+                                                <STM32F429_PJ11_FUNC_LCD_G4>,
+                                                <STM32F429_PJ12_FUNC_LCD_B0>,
+                                                <STM32F429_PJ13_FUNC_LCD_B1>,
+                                                <STM32F429_PJ14_FUNC_LCD_B2>,
+                                                <STM32F429_PJ15_FUNC_LCD_B3>,
+                                                <STM32F429_PK0_FUNC_LCD_G5>,
+                                                <STM32F429_PK1_FUNC_LCD_G6>,
+                                                <STM32F429_PK2_FUNC_LCD_G7>,
+                                                <STM32F429_PK3_FUNC_LCD_B4>,
+                                                <STM32F429_PK4_FUNC_LCD_B5>,
+                                                <STM32F429_PK5_FUNC_LCD_B6>,
+                                                <STM32F429_PK6_FUNC_LCD_B7>,
+                                                <STM32F429_PK7_FUNC_LCD_DE>;
+                                       slew-rate = <2>;
+                               };
+                       };
+
+                       dcmi_pins: dcmi@0 {
+                               pins {
+                                       pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>,
+                                                <STM32F429_PB7_FUNC_DCMI_VSYNC>,
+                                                <STM32F429_PA6_FUNC_DCMI_PIXCLK>,
+                                                <STM32F429_PC6_FUNC_DCMI_D0>,
+                                                <STM32F429_PC7_FUNC_DCMI_D1>,
+                                                <STM32F429_PC8_FUNC_DCMI_D2>,
+                                                <STM32F429_PC9_FUNC_DCMI_D3>,
+                                                <STM32F429_PC11_FUNC_DCMI_D4>,
+                                                <STM32F429_PD3_FUNC_DCMI_D5>,
+                                                <STM32F429_PB8_FUNC_DCMI_D6>,
+                                                <STM32F429_PE6_FUNC_DCMI_D7>,
+                                                <STM32F429_PC10_FUNC_DCMI_D8>,
+                                                <STM32F429_PC12_FUNC_DCMI_D9>,
+                                                <STM32F429_PD6_FUNC_DCMI_D10>,
+                                                <STM32F429_PD2_FUNC_DCMI_D11>;
+                                       bias-disable;
+                                       drive-push-pull;
+                                       slew-rate = <3>;
+                               };
+                       };
+               };
+       };
+};
index c66d617e4245b4c7b83f45e2266480c1ac64759d..5ceb2cf3777ff425a41a59da6dc90fe08adc257e 100644 (file)
@@ -47,6 +47,7 @@
 
 /dts-v1/;
 #include "stm32f429.dtsi"
+#include "stm32f429-pinctrl.dtsi"
 #include <dt-bindings/input/input.h>
 
 / {
diff --git a/arch/arm/boot/dts/stm32f429-pinctrl.dtsi b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
new file mode 100644 (file)
index 0000000..3e7a17d
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "stm32f4-pinctrl.dtsi"
+
+/ {
+       soc {
+               pinctrl: pin-controller {
+                       compatible = "st,stm32f429-pinctrl";
+
+                       gpioa: gpio@40020000 {
+                               gpio-ranges = <&pinctrl 0 0 16>;
+                       };
+
+                       gpiob: gpio@40020400 {
+                               gpio-ranges = <&pinctrl 0 16 16>;
+                       };
+
+                       gpioc: gpio@40020800 {
+                               gpio-ranges = <&pinctrl 0 32 16>;
+                       };
+
+                       gpiod: gpio@40020c00 {
+                               gpio-ranges = <&pinctrl 0 48 16>;
+                       };
+
+                       gpioe: gpio@40021000 {
+                               gpio-ranges = <&pinctrl 0 64 16>;
+                       };
+
+                       gpiof: gpio@40021400 {
+                               gpio-ranges = <&pinctrl 0 80 16>;
+                       };
+
+                       gpiog: gpio@40021800 {
+                               gpio-ranges = <&pinctrl 0 96 16>;
+                       };
+
+                       gpioh: gpio@40021c00 {
+                               gpio-ranges = <&pinctrl 0 112 16>;
+                       };
+
+                       gpioi: gpio@40022000 {
+                               gpio-ranges = <&pinctrl 0 128 16>;
+                       };
+
+                       gpioj: gpio@40022400 {
+                               gpio-ranges = <&pinctrl 0 144 16>;
+                       };
+
+                       gpiok: gpio@40022800 {
+                               gpio-ranges = <&pinctrl 0 160 8>;
+                       };
+               };
+       };
+};
index dd7e99b1f43bc78111f0de584849367a8f07cf8a..5b36eb114ddc953067904afbd08497b2286c3785 100644 (file)
@@ -47,7 +47,6 @@
 
 #include "skeleton.dtsi"
 #include "armv7-m.dtsi"
-#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
 #include <dt-bindings/clock/stm32fx-clock.h>
 #include <dt-bindings/mfd/stm32f4-rcc.h>
 
                        status = "disabled";
                };
 
-               pinctrl: pin-controller {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       compatible = "st,stm32f429-pinctrl";
-                       ranges = <0 0x40020000 0x3000>;
-                       interrupt-parent = <&exti>;
-                       st,syscfg = <&syscfg 0x8>;
-                       pins-are-numbered;
-
-                       gpioa: gpio@40020000 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x0 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>;
-                               st,bank-name = "GPIOA";
-                       };
-
-                       gpiob: gpio@40020400 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x400 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>;
-                               st,bank-name = "GPIOB";
-                       };
-
-                       gpioc: gpio@40020800 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x800 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>;
-                               st,bank-name = "GPIOC";
-                       };
-
-                       gpiod: gpio@40020c00 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0xc00 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>;
-                               st,bank-name = "GPIOD";
-                       };
-
-                       gpioe: gpio@40021000 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x1000 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>;
-                               st,bank-name = "GPIOE";
-                       };
-
-                       gpiof: gpio@40021400 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x1400 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>;
-                               st,bank-name = "GPIOF";
-                       };
-
-                       gpiog: gpio@40021800 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x1800 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>;
-                               st,bank-name = "GPIOG";
-                       };
-
-                       gpioh: gpio@40021c00 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x1c00 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>;
-                               st,bank-name = "GPIOH";
-                       };
-
-                       gpioi: gpio@40022000 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x2000 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>;
-                               st,bank-name = "GPIOI";
-                       };
-
-                       gpioj: gpio@40022400 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x2400 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>;
-                               st,bank-name = "GPIOJ";
-                       };
-
-                       gpiok: gpio@40022800 {
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                               reg = <0x2800 0x400>;
-                               clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>;
-                               st,bank-name = "GPIOK";
-                       };
-
-                       usart1_pins_a: usart1@0 {
-                               pins1 {
-                                       pinmux = <STM32F429_PA9_FUNC_USART1_TX>;
-                                       bias-disable;
-                                       drive-push-pull;
-                                       slew-rate = <0>;
-                               };
-                               pins2 {
-                                       pinmux = <STM32F429_PA10_FUNC_USART1_RX>;
-                                       bias-disable;
-                               };
-                       };
-
-                       usart3_pins_a: usart3@0 {
-                               pins1 {
-                                       pinmux = <STM32F429_PB10_FUNC_USART3_TX>;
-                                       bias-disable;
-                                       drive-push-pull;
-                                       slew-rate = <0>;
-                               };
-                               pins2 {
-                                       pinmux = <STM32F429_PB11_FUNC_USART3_RX>;
-                                       bias-disable;
-                               };
-                       };
-
-                       usbotg_fs_pins_a: usbotg_fs@0 {
-                               pins {
-                                       pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>,
-                                                <STM32F429_PA11_FUNC_OTG_FS_DM>,
-                                                <STM32F429_PA12_FUNC_OTG_FS_DP>;
-                                       bias-disable;
-                                       drive-push-pull;
-                                       slew-rate = <2>;
-                               };
-                       };
-
-                       usbotg_fs_pins_b: usbotg_fs@1 {
-                               pins {
-                                       pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>,
-                                                <STM32F429_PB14_FUNC_OTG_HS_DM>,
-                                                <STM32F429_PB15_FUNC_OTG_HS_DP>;
-                                       bias-disable;
-                                       drive-push-pull;
-                                       slew-rate = <2>;
-                               };
-                       };
-
-                       usbotg_hs_pins_a: usbotg_hs@0 {
-                               pins {
-                                       pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>,
-                                                <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>,
-                                                <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>,
-                                                <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>,
-                                                <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>,
-                                                <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>,
-                                                <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>,
-                                                <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>,
-                                                <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>,
-                                                <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>,
-                                                <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>,
-                                                <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>;
-                                       bias-disable;
-                                       drive-push-pull;
-                                       slew-rate = <2>;
-                               };
-                       };
-
-                       ethernet_mii: mii@0 {
-                               pins {
-                                       pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>,
-                                                <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>,
-                                                <STM32F429_PC2_FUNC_ETH_MII_TXD2>,
-                                                <STM32F429_PB8_FUNC_ETH_MII_TXD3>,
-                                                <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>,
-                                                <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>,
-                                                <STM32F429_PA2_FUNC_ETH_MDIO>,
-                                                <STM32F429_PC1_FUNC_ETH_MDC>,
-                                                <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>,
-                                                <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>,
-                                                <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>,
-                                                <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>,
-                                                <STM32F429_PH6_FUNC_ETH_MII_RXD2>,
-                                                <STM32F429_PH7_FUNC_ETH_MII_RXD3>;
-                                       slew-rate = <2>;
-                               };
-                       };
-
-                       adc3_in8_pin: adc@200 {
-                               pins {
-                                       pinmux = <STM32F429_PF10_FUNC_ANALOG>;
-                               };
-                       };
-
-                       pwm1_pins: pwm@1 {
-                               pins {
-                                       pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>,
-                                                <STM32F429_PB13_FUNC_TIM1_CH1N>,
-                                                <STM32F429_PB12_FUNC_TIM1_BKIN>;
-                               };
-                       };
-
-                       pwm3_pins: pwm@3 {
-                               pins {
-                                       pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>,
-                                                <STM32F429_PB5_FUNC_TIM3_CH2>;
-                               };
-                       };
-
-                       i2c1_pins: i2c1@0 {
-                               pins {
-                                       pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>,
-                                                <STM32F429_PB6_FUNC_I2C1_SCL>;
-                                       bias-disable;
-                                       drive-open-drain;
-                                       slew-rate = <3>;
-                               };
-                       };
-
-                       ltdc_pins: ltdc@0 {
-                               pins {
-                                       pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>,
-                                                <STM32F429_PI13_FUNC_LCD_VSYNC>,
-                                                <STM32F429_PI14_FUNC_LCD_CLK>,
-                                                <STM32F429_PI15_FUNC_LCD_R0>,
-                                                <STM32F429_PJ0_FUNC_LCD_R1>,
-                                                <STM32F429_PJ1_FUNC_LCD_R2>,
-                                                <STM32F429_PJ2_FUNC_LCD_R3>,
-                                                <STM32F429_PJ3_FUNC_LCD_R4>,
-                                                <STM32F429_PJ4_FUNC_LCD_R5>,
-                                                <STM32F429_PJ5_FUNC_LCD_R6>,
-                                                <STM32F429_PJ6_FUNC_LCD_R7>,
-                                                <STM32F429_PJ7_FUNC_LCD_G0>,
-                                                <STM32F429_PJ8_FUNC_LCD_G1>,
-                                                <STM32F429_PJ9_FUNC_LCD_G2>,
-                                                <STM32F429_PJ10_FUNC_LCD_G3>,
-                                                <STM32F429_PJ11_FUNC_LCD_G4>,
-                                                <STM32F429_PJ12_FUNC_LCD_B0>,
-                                                <STM32F429_PJ13_FUNC_LCD_B1>,
-                                                <STM32F429_PJ14_FUNC_LCD_B2>,
-                                                <STM32F429_PJ15_FUNC_LCD_B3>,
-                                                <STM32F429_PK0_FUNC_LCD_G5>,
-                                                <STM32F429_PK1_FUNC_LCD_G6>,
-                                                <STM32F429_PK2_FUNC_LCD_G7>,
-                                                <STM32F429_PK3_FUNC_LCD_B4>,
-                                                <STM32F429_PK4_FUNC_LCD_B5>,
-                                                <STM32F429_PK5_FUNC_LCD_B6>,
-                                                <STM32F429_PK6_FUNC_LCD_B7>,
-                                                <STM32F429_PK7_FUNC_LCD_DE>;
-                                       slew-rate = <2>;
-                               };
-                       };
-
-                       dcmi_pins: dcmi@0 {
-                               pins {
-                                       pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>,
-                                                <STM32F429_PB7_FUNC_DCMI_VSYNC>,
-                                                <STM32F429_PA6_FUNC_DCMI_PIXCLK>,
-                                                <STM32F429_PC6_FUNC_DCMI_D0>,
-                                                <STM32F429_PC7_FUNC_DCMI_D1>,
-                                                <STM32F429_PC8_FUNC_DCMI_D2>,
-                                                <STM32F429_PC9_FUNC_DCMI_D3>,
-                                                <STM32F429_PC11_FUNC_DCMI_D4>,
-                                                <STM32F429_PD3_FUNC_DCMI_D5>,
-                                                <STM32F429_PB8_FUNC_DCMI_D6>,
-                                                <STM32F429_PE6_FUNC_DCMI_D7>,
-                                                <STM32F429_PC10_FUNC_DCMI_D8>,
-                                                <STM32F429_PC12_FUNC_DCMI_D9>,
-                                                <STM32F429_PD6_FUNC_DCMI_D10>,
-                                                <STM32F429_PD2_FUNC_DCMI_D11>;
-                                       bias-disable;
-                                       drive-push-pull;
-                                       slew-rate = <3>;
-                               };
-                       };
-               };
-
                crc: crc@40023000 {
                        compatible = "st,stm32f4-crc";
                        reg = <0x40023000 0x400>;
index 6ae1f037f3f0e5065eb80a6159a8bfb1d5eb3ba1..c18acbe4cf4e2448b085331fca86a72710eeb613 100644 (file)
@@ -47,6 +47,7 @@
 
 /dts-v1/;
 #include "stm32f429.dtsi"
+#include "stm32f469-pinctrl.dtsi"
 
 / {
        model = "STMicroelectronics STM32F469i-DISCO board";
diff --git a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
new file mode 100644 (file)
index 0000000..fff5426
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "stm32f4-pinctrl.dtsi"
+
+/ {
+       soc {
+               pinctrl: pin-controller {
+                       compatible = "st,stm32f469-pinctrl";
+
+                       gpioa: gpio@40020000 {
+                               gpio-ranges = <&pinctrl 0 0 16>;
+                       };
+
+                       gpiob: gpio@40020400 {
+                               gpio-ranges = <&pinctrl 0 16 16>;
+                       };
+
+                       gpioc: gpio@40020800 {
+                               gpio-ranges = <&pinctrl 0 32 16>;
+                       };
+
+                       gpiod: gpio@40020c00 {
+                               gpio-ranges = <&pinctrl 0 48 16>;
+                       };
+
+                       gpioe: gpio@40021000 {
+                               gpio-ranges = <&pinctrl 0 64 16>;
+                       };
+
+                       gpiof: gpio@40021400 {
+                               gpio-ranges = <&pinctrl 0 80 16>;
+                       };
+
+                       gpiog: gpio@40021800 {
+                               gpio-ranges = <&pinctrl 0 96 16>;
+                       };
+
+                       gpioh: gpio@40021c00 {
+                               gpio-ranges = <&pinctrl 0 112 16>;
+                       };
+
+                       gpioi: gpio@40022000 {
+                               gpio-ranges = <&pinctrl 0 128 16>;
+                       };
+
+                       gpioj: gpio@40022400 {
+                               gpio-ranges = <&pinctrl 0 144 6>,
+                                             <&pinctrl 12 156 4>;
+                       };
+
+                       gpiok: gpio@40022800 {
+                               gpio-ranges = <&pinctrl 3 163 5>;
+                       };
+               };
+       };
+};
index b147cb0dc14b26ce92db7ea70bba2a8f77bd0d38..eef072a21accaed0c29d6407d076331ad579a987 100644 (file)
                                        #size-cells = <0>;
                                        reg = <0>;
 
-                                       tcon1_in_drc1: endpoint@0 {
-                                               reg = <0>;
+                                       tcon1_in_drc1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&drc1_out_tcon1>;
                                        };
                                };
                                        #size-cells = <0>;
                                        reg = <1>;
 
-                                       be1_out_drc1: endpoint@0 {
-                                               reg = <0>;
+                                       be1_out_drc1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&drc1_in_be1>;
                                        };
                                };
                                        #size-cells = <0>;
                                        reg = <0>;
 
-                                       drc1_in_be1: endpoint@0 {
-                                               reg = <0>;
+                                       drc1_in_be1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&be1_out_drc1>;
                                        };
                                };
                                        #size-cells = <0>;
                                        reg = <1>;
 
-                                       drc1_out_tcon1: endpoint@0 {
-                                               reg = <0>;
+                                       drc1_out_tcon1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&tcon1_in_drc1>;
                                        };
                                };
index d2d75fa664a64338bc6bf0af3c3c65e3de0926f4..2a63fa10c813042bf2b98261964c7b6cd9f03790 100644 (file)
@@ -32,6 +32,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=y
+CONFIG_PATA_FTIDE010=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -55,8 +56,8 @@ CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_GEMINI=y
 CONFIG_DMADEVICES=y
+CONFIG_AMBA_PL08X=y
 # CONFIG_DNOTIFY is not set
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
index 64e3a2a8cedec353203694d6f9f74b4fa09f962f..d5e1370ec303d5440c4572262f6918d4a047a170 100644 (file)
@@ -471,7 +471,7 @@ CONFIG_LCD_PLATFORM=m
 CONFIG_LCD_TOSA=m
 CONFIG_BACKLIGHT_PWM=m
 CONFIG_BACKLIGHT_TOSA=m
-CONFIG_FRAMEBUFFER_CONSOLE=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
index 44d4fa57ba0a346cde5f53d099a3be61b42d841a..070e5074f1ee5b118ed35612c9cd09d65f611255 100644 (file)
@@ -113,7 +113,7 @@ CONFIG_FB_PXA_PARAMETERS=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_PWM=m
 # CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
index 8d4c0c926c344d9f654a87d179488eb5bfff7096..09e7050d56532dabd512cc99e1038a6dae525d69 100644 (file)
@@ -112,7 +112,7 @@ CONFIG_FB_PXA=m
 CONFIG_FB_PXA_PARAMETERS=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
index 1d468b527b7b6928d2bb21c2152a3a282a7a366d..776757d1604ab3901996bb24bb02748e54c2aee7 100644 (file)
@@ -139,11 +139,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_NEED_RESCHED       1       /* rescheduling necessary */
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_UPROBE             3       /* breakpointed or singlestepping */
-#define TIF_FSCHECK            4       /* Check FS is USER_DS on return */
-#define TIF_SYSCALL_TRACE      5       /* syscall trace active */
-#define TIF_SYSCALL_AUDIT      6       /* syscall auditing active */
-#define TIF_SYSCALL_TRACEPOINT 7       /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP            8       /* seccomp syscall filtering active */
+#define TIF_SYSCALL_TRACE      4       /* syscall trace active */
+#define TIF_SYSCALL_AUDIT      5       /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 6       /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP            7       /* seccomp syscall filtering active */
 
 #define TIF_NOHZ               12      /* in adaptive nohz mode */
 #define TIF_USING_IWMMXT       17
@@ -154,7 +153,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
-#define _TIF_FSCHECK           (1 << TIF_FSCHECK)
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
@@ -168,9 +166,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 /*
  * Change these and you break ASM code in entry-common.S
  */
-#define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING |  \
-                                _TIF_NOTIFY_RESUME | _TIF_UPROBE |     \
-                                _TIF_FSCHECK)
+#define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+                                _TIF_NOTIFY_RESUME | _TIF_UPROBE)
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_THREAD_INFO_H */
index 87936dd5d1510572993d4bcc9b4fa1222a4dd3b2..0bf2347495f13e4db7fdc1c560b0976ec1fbd619 100644 (file)
@@ -70,8 +70,6 @@ static inline void set_fs(mm_segment_t fs)
 {
        current_thread_info()->addr_limit = fs;
        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
-       /* On user-mode return, check fs is correct */
-       set_thread_flag(TIF_FSCHECK);
 }
 
 #define segment_eq(a, b)       ((a) == (b))
index ea9646cc2a0ed7eba2fa4f7f7638c0802642edcc..0a498cb3fad88d046c23073a5d1b7785fdfaf1a7 100644 (file)
@@ -115,7 +115,11 @@ ENTRY(printascii)
                mov     r1, r0
                mov     r0, #0x04               @ SYS_WRITE0
        ARM(    svc     #0x123456       )
+#ifdef CONFIG_CPU_V7M
+       THUMB(  bkpt    #0xab           )
+#else
        THUMB(  svc     #0xab           )
+#endif
                ret     lr
 ENDPROC(printascii)
 
@@ -124,7 +128,11 @@ ENTRY(printch)
                strb    r0, [r1]
                mov     r0, #0x03               @ SYS_WRITEC
        ARM(    svc     #0x123456       )
+#ifdef CONFIG_CPU_V7M
+       THUMB(  bkpt    #0xab           )
+#else
        THUMB(  svc     #0xab           )
+#endif
                ret     lr
 ENDPROC(printch)
 
index ca3614dc6938e1595f71a095f53456eb26701bec..99c908226065cf6331324309edeadfa618597874 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
 #include <asm/unwind.h>
+#include <asm/memory.h>
 #ifdef CONFIG_AEABI
 #include <asm/unistd-oabi.h>
 #endif
@@ -48,12 +49,14 @@ ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        disable_irq_notrace                     @ disable interrupts
+       ldr     r2, [tsk, #TI_ADDR_LIMIT]
+       cmp     r2, #TASK_SIZE
+       blne    addr_limit_check_failed
        ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
-       tst     r1, #_TIF_SYSCALL_WORK
-       bne     fast_work_pending
-       tst     r1, #_TIF_WORK_MASK
+       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
        bne     fast_work_pending
 
+
        /* perform architecture specific actions before user return */
        arch_ret_to_user r1, lr
 
@@ -76,16 +79,16 @@ ret_fast_syscall:
  UNWIND(.cantunwind    )
        str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
        disable_irq_notrace                     @ disable interrupts
+       ldr     r2, [tsk, #TI_ADDR_LIMIT]
+       cmp     r2, #TASK_SIZE
+       blne    addr_limit_check_failed
        ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
-       tst     r1, #_TIF_SYSCALL_WORK
-       bne     fast_work_pending
-       tst     r1, #_TIF_WORK_MASK
+       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
        beq     no_work_pending
  UNWIND(.fnend         )
 ENDPROC(ret_fast_syscall)
 
        /* Slower path - fall through to work_pending */
-fast_work_pending:
 #endif
 
        tst     r1, #_TIF_SYSCALL_WORK
@@ -111,6 +114,9 @@ ENTRY(ret_to_user)
 ret_slow_syscall:
        disable_irq_notrace                     @ disable interrupts
 ENTRY(ret_to_user_from_irq)
+       ldr     r2, [tsk, #TI_ADDR_LIMIT]
+       cmp     r2, #TASK_SIZE
+       blne    addr_limit_check_failed
        ldr     r1, [tsk, #TI_FLAGS]
        tst     r1, #_TIF_WORK_MASK
        bne     slow_work_pending
index e2de50bf87425e4a55baa66ef68d1fe99e53e1f8..b67ae12503f30c3ba113f1d8c7a943d17e9db717 100644 (file)
@@ -614,10 +614,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
         * Update the trace code with the current status.
         */
        trace_hardirqs_off();
-
-       /* Check valid user FS if needed */
-       addr_limit_user_check();
-
        do {
                if (likely(thread_flags & _TIF_NEED_RESCHED)) {
                        schedule();
@@ -678,3 +674,9 @@ struct page *get_signal_page(void)
 
        return page;
 }
+
+/* Defer to generic check */
+asmlinkage void addr_limit_check_failed(void)
+{
+       addr_limit_user_check();
+}
index 5036f996e694a533d66f82743b901385ae048007..849014c01cf4d62a465ec6e54b829d4ff45f05a6 100644 (file)
@@ -533,8 +533,8 @@ static void __init at91_pm_backup_init(void)
        }
 
        pm_bu->suspended = 0;
-       pm_bu->canary = virt_to_phys(&canary);
-       pm_bu->resume = virt_to_phys(cpu_resume);
+       pm_bu->canary = __pa_symbol(&canary);
+       pm_bu->resume = __pa_symbol(cpu_resume);
 
        return;
 
index 5b614388d72f4b7ca8be1c851f1369f719ce2c29..6d28aa20a7d32c4142dba9a52cbaa06fc6c387f1 100644 (file)
@@ -58,10 +58,10 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
        struct platform_device *pdev;
        int res;
 
-       if (omap_hsmmc_done != 1)
+       if (omap_hsmmc_done)
                return;
 
-       omap_hsmmc_done++;
+       omap_hsmmc_done = 1;
 
        for (; c->mmc; c++) {
                pdev = c->pdev;
index f040244c57e73f381c0004e730ff1664f9300e00..2f4f7002f38d0138e9d8cb9cb2d3cb9d97237675 100644 (file)
@@ -839,6 +839,7 @@ static struct omap_hwmod dra7xx_gpio1_hwmod = {
        .name           = "gpio1",
        .class          = &dra7xx_gpio_hwmod_class,
        .clkdm_name     = "wkupaon_clkdm",
+       .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .main_clk       = "wkupaon_iclk_mux",
        .prcm = {
                .omap4 = {
index 71a34e8c345a5b19b98f42cc368f796118d3214f..57058ac46f49733887e439012afa3247f03e1737 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/mach/arch.h>
 
 #include "db8500-regs.h"
+#include "pm_domains.h"
 
 static int __init ux500_l2x0_unlock(void)
 {
@@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
 
 static void __init u8500_init_machine(void)
 {
+       /* Initialize ux500 power domains */
+       ux500_pm_domains_init();
+
        /* automatically probe child nodes of dbx5x0 devices */
        if (of_machine_is_compatible("st-ericsson,u8540"))
                of_platform_populate(NULL, u8500_local_bus_nodes,
index a970e7fcba9e02fe6e2651cd5cfca76321d26314..f6c33a0c1c610cf17881fc276725817acdb4b29d 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/of_address.h>
 
 #include "db8500-regs.h"
-#include "pm_domains.h"
 
 /* ARM WFI Standby signal register */
 #define PRCM_ARM_WFI_STANDBY    (prcmu_base + 0x130)
@@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
 
        /* Set up ux500 suspend callbacks. */
        suspend_set_ops(UX500_SUSPEND_OPS);
-
-       /* Initialize ux500 power domains */
-       ux500_pm_domains_init();
 }
index 3b8e728cc9443975c6cd66c63350a2074df310b7..91537d90f5f526e4e9135b2b0e5403141172a4cb 100644 (file)
@@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
         * reserved here.
         */
 #endif
+       /*
+        * In any case, always ensure address 0 is never used as many things
+        * get very confused if 0 is returned as a legitimate address.
+        */
+       memblock_reserve(0, 1);
 }
 
 void __init adjust_lowmem_bounds(void)
index e71eefa2e427bf2703ba7f6c7fb7a6577d8ab390..0641ba54ab62ae9786cb12ab88b2666b5c783b7a 100644 (file)
@@ -1,7 +1,7 @@
 #include <linux/bootmem.h>
 #include <linux/gfp.h>
 #include <linux/export.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
index 9b41f1e3b1a039cd45fe842e10abff0181186fdf..939b310913cf38cd7ca3136128fb5440340d7d12 100644 (file)
@@ -50,17 +50,22 @@ KBUILD_CFLAGS       += -fno-asynchronous-unwind-tables
 KBUILD_CFLAGS  += $(call cc-option, -mpc-relative-literal-loads)
 KBUILD_AFLAGS  += $(lseinstr) $(brokengasinst)
 
+KBUILD_CFLAGS  += $(call cc-option,-mabi=lp64)
+KBUILD_AFLAGS  += $(call cc-option,-mabi=lp64)
+
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS        += -mbig-endian
 CHECKFLAGS     += -D__AARCH64EB__
 AS             += -EB
 LD             += -EB
+LDFLAGS                += -maarch64linuxb
 UTS_MACHINE    := aarch64_be
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__AARCH64EL__
 AS             += -EL
 LD             += -EL
+LDFLAGS                += -maarch64linux
 UTS_MACHINE    := aarch64
 endif
 
index caf8b6fbe5e350de2095d16489f19ce734ca4bc3..d06e34b5d192323ffef69303330e1f37a9a27310 100644 (file)
        chosen {
                stdout-path = "serial0:115200n8";
        };
-
-       reg_vcc3v3: vcc3v3 {
-               compatible = "regulator-fixed";
-               regulator-name = "vcc3v3";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-       };
 };
 
 &ehci0 {
@@ -91,7 +84,7 @@
 &mmc0 {
        pinctrl-names = "default";
        pinctrl-0 = <&mmc0_pins>;
-       vmmc-supply = <&reg_vcc3v3>;
+       vmmc-supply = <&reg_dcdc1>;
        cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
        cd-inverted;
        disable-wp;
index c89010e564888ee76f0f2764a10592fb59ab7672..4157987f4a3d2515053c1b5a49031c32f7f3b652 100644 (file)
 &sd_emmc_a {
        status = "okay";
        pinctrl-0 = <&sdio_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdio_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
        #address-cells = <1>;
        #size-cells = <0>;
 
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index 9697a7a794644bdfd5e7dbedfa6f71dcd8451e24..4b17a76959b2f6bff6275013d95b74c091032c51 100644 (file)
 
                states = <3300000 0>,
                         <1800000 1>;
+
+               regulator-settling-time-up-us = <100>;
+               regulator-settling-time-down-us = <5000>;
        };
 
        wifi_32k: wifi-32k {
 &sd_emmc_a {
        status = "okay";
        pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdio_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
        #address-cells = <1>;
        #size-cells = <0>;
 
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
-       max-frequency = <100000000>;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
+       sd-uhs-sdr104;
+       max-frequency = <200000000>;
        disable-wp;
 
        cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
 &sd_emmc_c {
        status = "disabled";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        max-frequency = <200000000>;
        non-removable;
        disable-wp;
index 9c59c3c6d1b6a5d2e3d8cd9d234caef971b352c3..38dfdde5c1473c71f5eb04ed779d1537d386fe31 100644 (file)
@@ -51,7 +51,7 @@
 / {
        compatible = "nexbox,a95x", "amlogic,meson-gxbb";
        model = "NEXBOX A95X";
-       
+
        aliases {
                serial0 = &uart_AO;
        };
 &sd_emmc_a {
        status = "okay";
        pinctrl-0 = <&sdio_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdio_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
        #address-cells = <1>;
        #size-cells = <0>;
 
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index d147c853ab054d86affa734311ae2c9df713ea58..1ffa1c238a725348cdcc10be5afe35be7dac1d60 100644 (file)
@@ -50,7 +50,7 @@
 / {
        compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb";
        model = "Hardkernel ODROID-C2";
-       
+
        aliases {
                serial0 = &uart_AO;
        };
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        max-frequency = <200000000>;
        non-removable;
        disable-wp;
index 81ffc689a5bf42532f25706545b1f84cb5dcdbac..23c08c3afd0ab499255ff4502bc9a472aa9d4548 100644 (file)
 &sd_emmc_a {
        status = "okay";
        pinctrl-0 = <&sdio_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdio_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
        #address-cells = <1>;
        #size-cells = <0>;
 
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
        max-frequency = <100000000>;
        disable-wp;
 
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index 346753fb632431f1c2e2af4d9c72b740a3f8c3b8..f2bc6dea1fc62235987f28d6ff1871913ee5753d 100644 (file)
 &sd_emmc_a {
        status = "okay";
        pinctrl-0 = <&sdio_pins &sdio_irq_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdio_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
        #address-cells = <1>;
        #size-cells = <0>;
 
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index 52f1687e7a099af5789ed97e3597b727a8ee4d82..af834cdbba791ab4432065eea6b855396e1937f9 100644 (file)
                        };
                };
 
+               emmc_clk_gate_pins: emmc_clk_gate {
+                       mux {
+                               groups = "BOOT_8";
+                               function = "gpio_periphs";
+                       };
+                       cfg-pull-down {
+                               pins = "BOOT_8";
+                               bias-pull-down;
+                       };
+               };
+
                nor_pins: nor {
                        mux {
                                groups = "nor_d",
                        };
                };
 
+               sdcard_clk_gate_pins: sdcard_clk_gate {
+                       mux {
+                               groups = "CARD_2";
+                               function = "gpio_periphs";
+                       };
+                       cfg-pull-down {
+                               pins = "CARD_2";
+                               bias-pull-down;
+                       };
+               };
+
                sdio_pins: sdio {
                        mux {
                                groups = "sdio_d0",
                        };
                };
 
+               sdio_clk_gate_pins: sdio_clk_gate {
+                       mux {
+                               groups = "GPIOX_4";
+                               function = "gpio_periphs";
+                       };
+                       cfg-pull-down {
+                               pins = "GPIOX_4";
+                               bias-pull-down;
+                       };
+               };
+
                sdio_irq_pins: sdio_irq {
                        mux {
                                groups = "sdio_irq";
 
 &sd_emmc_a {
        clocks = <&clkc CLKID_SD_EMMC_A>,
-                <&xtal>,
+                <&clkc CLKID_SD_EMMC_A_CLK0>,
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
 
 &sd_emmc_b {
        clocks = <&clkc CLKID_SD_EMMC_B>,
-                <&xtal>,
+                <&clkc CLKID_SD_EMMC_B_CLK0>,
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
 
 &sd_emmc_c {
        clocks = <&clkc CLKID_SD_EMMC_C>,
-                <&xtal>,
+                <&clkc CLKID_SD_EMMC_C_CLK0>,
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
index 2a5804ce7f4bd6bbfe1285f3cc40786778576a09..977b4240f3c1b0de15e0b32c70aafce1123acd01 100644 (file)
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <100000000>;
        non-removable;
index 69ca14ac10fa097633562cb96db4f088e543ebd9..64c54c92e214d686154de4b237f71a5212c5eb20 100644 (file)
@@ -91,6 +91,9 @@
 
                states = <3300000 0>,
                         <1800000 1>;
+
+               regulator-settling-time-up-us = <200>;
+               regulator-settling-time-down-us = <50000>;
        };
 
        vddio_boot: regulator-vddio_boot {
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
        max-frequency = <100000000>;
        disable-wp;
 
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
        cap-mmc-highspeed;
+       mmc-ddr-3_3v;
        max-frequency = <50000000>;
        non-removable;
        disable-wp;
index 4c2ac7650fcd3317a673efdd7308891a154272d8..1b8f32867aa10ab9ae75e90c995cdf16a088632e 100644 (file)
 &sd_emmc_a {
        status = "okay";
        pinctrl-0 = <&sdio_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdio_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
        #address-cells = <1>;
        #size-cells = <0>;
 
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index f3eea8e89d12b7a4479851a8d1e0482374235568..129af9068814d3b0f8e33090adbcda435ea3e202 100644 (file)
@@ -95,7 +95,8 @@
 &sd_emmc_a {
        status = "okay";
        pinctrl-0 = <&sdio_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdio_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
        #address-cells = <1>;
        #size-cells = <0>;
 
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index d6876e64979e7a5c5d539dc3eba7006fe4d79315..d8dd3298b15cfe5a37d653b8c6d5d39f5ca8c008 100644 (file)
                        };
                };
 
+               emmc_clk_gate_pins: emmc_clk_gate {
+                       mux {
+                               groups = "BOOT_8";
+                               function = "gpio_periphs";
+                       };
+                       cfg-pull-down {
+                               pins = "BOOT_8";
+                               bias-pull-down;
+                       };
+               };
+
                nor_pins: nor {
                        mux {
                                groups = "nor_d",
                        };
                };
 
+               sdcard_clk_gate_pins: sdcard_clk_gate {
+                       mux {
+                               groups = "CARD_2";
+                               function = "gpio_periphs";
+                       };
+                       cfg-pull-down {
+                               pins = "CARD_2";
+                               bias-pull-down;
+                       };
+               };
+
                sdio_pins: sdio {
                        mux {
                                groups = "sdio_d0",
                        };
                };
 
+               sdio_clk_gate_pins: sdio_clk_gate {
+                       mux {
+                               groups = "GPIOX_4";
+                               function = "gpio_periphs";
+                       };
+                       cfg-pull-down {
+                               pins = "GPIOX_4";
+                               bias-pull-down;
+                       };
+               };
+
                sdio_irq_pins: sdio_irq {
                        mux {
                                groups = "sdio_irq";
 
 &sd_emmc_a {
        clocks = <&clkc CLKID_SD_EMMC_A>,
-                <&xtal>,
+                <&clkc CLKID_SD_EMMC_A_CLK0>,
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
 
 &sd_emmc_b {
        clocks = <&clkc CLKID_SD_EMMC_B>,
-                <&xtal>,
+                <&clkc CLKID_SD_EMMC_B_CLK0>,
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
 
 &sd_emmc_c {
        clocks = <&clkc CLKID_SD_EMMC_C>,
-                <&xtal>,
+                <&clkc CLKID_SD_EMMC_C_CLK0>,
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
index 9b10c5f4f8c0311af38380f7d5c4c2a0e709e747..22c697732f668c5fadb5c445a849a80e57956fe0 100644 (file)
 &sd_emmc_b {
        status = "okay";
        pinctrl-0 = <&sdcard_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&sdcard_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <4>;
        cap-sd-highspeed;
 &sd_emmc_c {
        status = "okay";
        pinctrl-0 = <&emmc_pins>;
-       pinctrl-names = "default";
+       pinctrl-1 = <&emmc_clk_gate_pins>;
+       pinctrl-names = "default", "clk-gate";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index 08f1dd69b6792e3ceb54bd1e56697ce14f35df12..470f72bb863c5ff8850508cefd828b1c2e4f5f24 100644 (file)
        pinctrl-names = "default";
 
        bus-width = <8>;
-       cap-sd-highspeed;
        cap-mmc-highspeed;
        max-frequency = <200000000>;
        non-removable;
index 4d360713ed12459199ca726f34c32266739865dc..30d48ecf46e087b24063c0ac546d45ae9afb0d88 100644 (file)
 
                        ap_syscon: system-controller@6f4000 {
                                compatible = "syscon", "simple-mfd";
-                               reg = <0x6f4000 0x1000>;
+                               reg = <0x6f4000 0x2000>;
 
                                ap_clk: clock {
                                        compatible = "marvell,ap806-clock";
                                        compatible = "marvell,ap806-pinctrl";
                                };
 
-                               ap_gpio: gpio {
+                               ap_gpio: gpio@1040 {
                                        compatible = "marvell,armada-8k-gpio";
                                        offset = <0x1040>;
                                        ngpios = <20>;
index 8263a8a504a8fd11896da6aebc99eabdd4be928a..f2aa2a81de4dd2e982ec1e5fd5ae67f01bb08a63 100644 (file)
                                /* non-prefetchable memory */
                                0x82000000 0 0xf6000000 0  0xf6000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        num-lanes = <1>;
                        clocks = <&cpm_clk 1 13>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xf7000000 0  0xf7000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xf8000000 0  0xf8000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
index b71ee6c83668e8900a4a889614ed83e97e718f85..4fe70323abb3a58c374a762607bb63f08b2cb0e0 100644 (file)
                                /* non-prefetchable memory */
                                0x82000000 0 0xfa000000 0  0xfa000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        num-lanes = <1>;
                        clocks = <&cps_clk 1 13>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xfb000000 0  0xfb000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xfc000000 0  0xfc000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
index 4786c67b5e6527fd99d27aa969ef0b2d4274b4ad..d9d885006a8e8c9a6630f635132b6d0c24370ccc 100644 (file)
@@ -62,6 +62,7 @@
                brightness-levels = <256 128 64 16 8 4 0>;
                default-brightness-level = <6>;
 
+               power-supply = <&reg_12v>;
                enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-always-on;
        };
 
+       reg_12v: regulator2 {
+               compatible = "regulator-fixed";
+               regulator-name = "fixed-12V";
+               regulator-min-microvolt = <12000000>;
+               regulator-max-microvolt = <12000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
        rsnd_ak4613: sound {
                compatible = "simple-audio-card";
 
index 6d615cb6e64d07cebcfa0a7ecebf04b8afb152b2..41d61840fb99ce52ec553c94e119ab63bb79cdbe 100644 (file)
        vop_mmu: iommu@ff373f00 {
                compatible = "rockchip,iommu";
                reg = <0x0 0xff373f00 0x0 0x100>;
-               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "vop_mmu";
                #iommu-cells = <0>;
                status = "disabled";
index e0518b4bc6c2a018963787d0096077ab540b78e8..1070c8264c13376a578338e95421f71321825243 100644 (file)
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x0>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKL>;
-                       operating-points-v2 = <&cluster0_opp>;
+
                        #cooling-cells = <2>; /* min followed by max */
                };
 
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x1>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKL>;
-                       operating-points-v2 = <&cluster0_opp>;
                };
 
                cpu_l2: cpu@2 {
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x2>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKL>;
-                       operating-points-v2 = <&cluster0_opp>;
                };
 
                cpu_l3: cpu@3 {
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x3>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKL>;
-                       operating-points-v2 = <&cluster0_opp>;
                };
 
                cpu_b0: cpu@100 {
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x100>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKB>;
-                       operating-points-v2 = <&cluster1_opp>;
+
                        #cooling-cells = <2>; /* min followed by max */
                };
 
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x101>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKB>;
-                       operating-points-v2 = <&cluster1_opp>;
                };
 
                cpu_b2: cpu@102 {
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x102>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKB>;
-                       operating-points-v2 = <&cluster1_opp>;
                };
 
                cpu_b3: cpu@103 {
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0 0x103>;
                        enable-method = "psci";
-                       clocks = <&cru ARMCLKB>;
-                       operating-points-v2 = <&cluster1_opp>;
-               };
-       };
-
-       cluster0_opp: opp-table0 {
-               compatible = "operating-points-v2";
-               opp-shared;
-
-               opp00 {
-                       opp-hz = /bits/ 64 <312000000>;
-                       opp-microvolt = <950000>;
-                       clock-latency-ns = <40000>;
-               };
-               opp01 {
-                       opp-hz = /bits/ 64 <408000000>;
-                       opp-microvolt = <950000>;
-               };
-               opp02 {
-                       opp-hz = /bits/ 64 <600000000>;
-                       opp-microvolt = <950000>;
-               };
-               opp03 {
-                       opp-hz = /bits/ 64 <816000000>;
-                       opp-microvolt = <1025000>;
-               };
-               opp04 {
-                       opp-hz = /bits/ 64 <1008000000>;
-                       opp-microvolt = <1125000>;
-               };
-       };
-
-       cluster1_opp: opp-table1 {
-               compatible = "operating-points-v2";
-               opp-shared;
-
-               opp00 {
-                       opp-hz = /bits/ 64 <312000000>;
-                       opp-microvolt = <950000>;
-                       clock-latency-ns = <40000>;
-               };
-               opp01 {
-                       opp-hz = /bits/ 64 <408000000>;
-                       opp-microvolt = <950000>;
-               };
-               opp02 {
-                       opp-hz = /bits/ 64 <600000000>;
-                       opp-microvolt = <950000>;
-               };
-               opp03 {
-                       opp-hz = /bits/ 64 <816000000>;
-                       opp-microvolt = <975000>;
-               };
-               opp04 {
-                       opp-hz = /bits/ 64 <1008000000>;
-                       opp-microvolt = <1050000>;
                };
        };
 
        iep_mmu: iommu@ff900800 {
                compatible = "rockchip,iommu";
                reg = <0x0 0xff900800 0x0 0x100>;
-               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
+               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "iep_mmu";
                #iommu-cells = <0>;
                status = "disabled";
index 7fd4bfcaa38e33c8b58ef60dc7adb99772b04cc1..fef82274a39dac27fc6e293affc81b4ff5d5d64b 100644 (file)
                                regulator-always-on;
                                regulator-boot-on;
                                regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <3300000>;
+                               regulator-max-microvolt = <3000000>;
                                regulator-state-mem {
                                        regulator-on-in-suspend;
-                                       regulator-suspend-microvolt = <3300000>;
+                                       regulator-suspend-microvolt = <3000000>;
                                };
                        };
 
index 53ff3d191a1d176cc0d7c6d380894b48e42ec7da..910628d18add07d9a39974bc6ce2ac4a403adb81 100644 (file)
                        vcc_sd: LDO_REG4 {
                                regulator-name = "vcc_sd";
                                regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <3300000>;
+                               regulator-max-microvolt = <3000000>;
                                regulator-always-on;
                                regulator-boot-on;
                                regulator-state-mem {
                                        regulator-on-in-suspend;
-                                       regulator-suspend-microvolt = <3300000>;
+                                       regulator-suspend-microvolt = <3000000>;
                                };
                        };
 
index 6c30bb02210d80a2d78843cccaca7a3d83f971cd..0f873c897d0de5a75f9d4e4d90d7c658b7a173d3 100644 (file)
                                regulator-always-on;
                                regulator-boot-on;
                                regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <3300000>;
+                               regulator-max-microvolt = <3000000>;
                                regulator-state-mem {
                                        regulator-on-in-suspend;
-                                       regulator-suspend-microvolt = <3300000>;
+                                       regulator-suspend-microvolt = <3000000>;
                                };
                        };
 
index d79e9b3265b98cbe0955c8139950627d2af09492..ab7629c5b856d7a6ed2ac8e95600262e098a01d6 100644 (file)
                compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
                reg = <0x0 0xff960000 0x0 0x8000>;
                interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>;
-               clocks = <&cru SCLK_MIPIDPHY_REF>, <&cru PCLK_MIPI_DSI0>,
-                        <&cru SCLK_DPHY_TX0_CFG>;
-               clock-names = "ref", "pclk", "phy_cfg";
+               clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>,
+                        <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>;
+               clock-names = "ref", "pclk", "phy_cfg", "grf";
                power-domains = <&power RK3399_PD_VIO>;
                rockchip,grf = <&grf>;
                status = "disabled";
index 636c1bced7d4a9dd60d3a6489997ebd82b83d96e..1b266292f0bee00ce1952005479053e820b5ca0e 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ASM_LINKAGE_H
 #define __ASM_LINKAGE_H
 
-#define __ALIGN                .align 4
-#define __ALIGN_STR    ".align 4"
+#define __ALIGN                .align 2
+#define __ALIGN_STR    ".align 2"
 
 #endif
index 3585a5e2615105162f82510f64917b094974a2a4..f7c4d2146aed09b715a59506f7e01ab0d167f3de 100644 (file)
 #define KERNEL_END        _end
 
 /*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
+ * KASAN requires 1/8th of the kernel virtual address space for the shadow
+ * region. KASAN can bloat the stack significantly, so double the (minimum)
+ * stack size when KASAN is in use.
  */
 #ifdef CONFIG_KASAN
 #define KASAN_SHADOW_SIZE      (UL(1) << (VA_BITS - 3))
+#define KASAN_THREAD_SHIFT     1
 #else
 #define KASAN_SHADOW_SIZE      (0)
+#define KASAN_THREAD_SHIFT     0
 #endif
 
-#define MIN_THREAD_SHIFT       14
+#define MIN_THREAD_SHIFT       (14 + KASAN_THREAD_SHIFT)
 
 /*
  * VMAP'd stacks are allocated at page granularity, so we must ensure that such
index bc4e92337d1690045b7b5c97efd11ad4fb8bb453..b46e54c2399b58b6451ea9dacc5c033115b05c64 100644 (file)
@@ -401,7 +401,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
 /* Find an entry in the third-level page table. */
 #define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
-#define pte_offset_phys(dir,addr)      (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_phys(dir,addr)      (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
 #define pte_offset_kernel(dir,addr)    ((pte_t *)__va(pte_offset_phys((dir), (addr))))
 
 #define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
index f0e6d717885b1fcf3b22f64c10c38f19c25f809d..d06fbe4cd38d7423c900aff64b0e728f995478d3 100644 (file)
@@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void)
        return 0;
 }
 
-late_initcall(armv8_deprecated_init);
+core_initcall(armv8_deprecated_init);
index cd52d365d1f01aefae2f63e6a0207e65e0fdcb57..21e2c95d24e72a8e339406af601f02690675d3d8 100644 (file)
@@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void)
        return 0;
 }
 
-late_initcall(enable_mrs_emulation);
+core_initcall(enable_mrs_emulation);
index 3a68cf38a6b36712e1a5a6a8463a19d32bbdb4f5..5d547deb6996c0091c64f14de18b5b8a75c88ae4 100644 (file)
@@ -321,6 +321,8 @@ void kernel_neon_end(void)
 }
 EXPORT_SYMBOL(kernel_neon_end);
 
+#ifdef CONFIG_EFI
+
 static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
 
@@ -370,6 +372,8 @@ void __efi_fpsimd_end(void)
                kernel_neon_end();
 }
 
+#endif /* CONFIG_EFI */
+
 #endif /* CONFIG_KERNEL_MODE_NEON */
 
 #ifdef CONFIG_CPU_PM
@@ -440,4 +444,4 @@ static int __init fpsimd_init(void)
 
        return 0;
 }
-late_initcall(fpsimd_init);
+core_initcall(fpsimd_init);
index 7434ec0c7a271632adce1a497bc097306199e580..0b243ecaf7ac87faa81fed0a09db996757d71f4e 100644 (file)
@@ -384,6 +384,7 @@ ENTRY(kimage_vaddr)
  * booted in EL1 or EL2 respectively.
  */
 ENTRY(el2_setup)
+       msr     SPsel, #1                       // We want to use SP_EL{1,2}
        mrs     x0, CurrentEL
        cmp     x0, #CurrentEL_EL2
        b.eq    1f
index c45214f8fb5497fdaed8665b8b066bbfc0118ed2..0bdc96c61bc0f2b14f627142a2778409c9c6a7d0 100644 (file)
@@ -751,10 +751,10 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
         */
        trace_hardirqs_off();
 
-       /* Check valid user FS if needed */
-       addr_limit_user_check();
-
        do {
+               /* Check valid user FS if needed */
+               addr_limit_user_check();
+
                if (thread_flags & _TIF_NEED_RESCHED) {
                        schedule();
                } else {
index 89993c4be1befe3d7629bf45174f10081027d2a2..b64958b23a7fa193c23dd1a9039f69f4e9f0b863 100644 (file)
@@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr)
                         (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
                         (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
        } else {
-               pr_alert("  ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK);
+               pr_alert("  ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
        }
 
        pr_alert("  CM = %lu, WnR = %lu\n",
@@ -651,7 +651,7 @@ static const struct fault_info fault_info[] = {
        { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 0 translation fault"     },
        { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 1 translation fault"     },
        { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 2 translation fault"     },
-       { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "level 3 translation fault"     },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 3 translation fault"     },
        { do_bad,               SIGBUS,  0,             "unknown 8"                     },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 1 access flag fault"     },
        { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 access flag fault"     },
index 7c87b5be53b5b74c76fbe03829034b1e96ac3c7c..8f7cce829f8e2630cb88e81cdb024a8375a59e4f 100644 (file)
@@ -92,9 +92,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-#define copy_segments(tsk, mm)         do { } while (0)
-#define release_segments(mm)           do { } while (0)
-
 /*
  * saved kernel SP and DP of a blocked thread.
  */
index e4d08d74ed9f8dc4f81140fcf60442378f02cf58..021cce78b4010d7b5c727d1e86812f85e81671bf 100644 (file)
@@ -92,10 +92,6 @@ static inline void release_thread(struct task_struct *dead_task)
 extern asmlinkage void save_user_regs(struct user_context *target);
 extern asmlinkage void *restore_user_regs(const struct user_context *target, ...);
 
-#define copy_segments(tsk, mm)         do { } while (0)
-#define release_segments(mm)           do { } while (0)
-#define forget_segments()              do { } while (0)
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define        KSTK_EIP(tsk)   ((tsk)->thread.frame0->pc)
index 87cde1e4b38ca026483729dec6ea61ad5c242a97..0777f3a8a1f37d6f93d707a3333be23c486492e2 100644 (file)
@@ -194,6 +194,10 @@ config TIMER_DIVIDE
        int "Timer divider (integer)"
        default "128"
 
+config CPU_BIG_ENDIAN
+        bool "Generate big endian code"
+       default n
+
 config CPU_LITTLE_ENDIAN
         bool "Generate little endian code"
        default n
index 657874eeeccc262c11268094e0f0ba530e05dd92..c70fa9ac71690d2529d97e1040a87ed01a8a35da 100644 (file)
@@ -118,14 +118,6 @@ struct mm_struct;
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/* Copy and release all segment info associated with a VM */
-extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
-extern void release_segments(struct mm_struct * mm);
-
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm)  do { } while (0)
-#define release_segments(mm)  do { } while (0)
-
 unsigned long get_wchan(struct task_struct *p);
 #define KSTK_EIP(tsk)  ((tsk)->thread.lr)
 #define KSTK_ESP(tsk)  ((tsk)->thread.sp)
index 647dd94a0c399f13f8311e751d273b033908328e..72b96f282689aec63378d85cc50b079ddff2b799 100644 (file)
@@ -114,6 +114,15 @@ static void set_eit_vector_entries(void)
        _flush_cache_copyback_all();
 }
 
+void abort(void)
+{
+       BUG();
+
+       /* if that doesn't kill us, halt */
+       panic("Oops failed to kill thread");
+}
+EXPORT_SYMBOL(abort);
+
 void __init trap_init(void)
 {
        set_eit_vector_entries();
index ec6a49076980fff90dc04ece4b389ea1782449b9..8ae92d6abfd2e24d9e6d767fab2fc83db8720fcf 100644 (file)
@@ -131,9 +131,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-#define copy_segments(tsk, mm)         do { } while (0)
-#define release_segments(mm)           do { } while (0)
-
 /*
  * Return saved PC of a blocked thread.
  */
index 9d26abdf0dc1c2349e5822b01416f0f0cd154b58..4f798aa671ddd2f481c35689f03be1ea73318ae8 100644 (file)
@@ -39,7 +39,7 @@ config MICROBLAZE
 # Endianness selection
 choice
        prompt "Endianness selection"
-       default CPU_BIG_ENDIAN
+       default CPU_LITTLE_ENDIAN
        help
          microblaze architectures can be configured for either little or
          big endian formats. Be sure to select the appropriate mode.
index e77a596f3f1efcfe94c4bc89b9b3cc860d589210..06609ca361150ab77529bf0999d8e258ad25d62c 100644 (file)
@@ -7,6 +7,7 @@ generic-y += fcntl.h
 generic-y += ioctl.h
 generic-y += ioctls.h
 generic-y += ipcbuf.h
+generic-y += kvm_para.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += param.h
index e45ada8fb00669a8889284db230908ed64f2dda1..94700c5270a91e69cb0acb7b32b20f272899a1a6 100644 (file)
@@ -165,7 +165,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
                             unsigned long attrs)
 {
 #ifdef CONFIG_MMU
-       unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long user_count = vma_pages(vma);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long off = vma->vm_pgoff;
        unsigned long pfn;
index 730c0b03060d0c941eaf29ee394f3c55be121954..b816cb4a25ff95eb7ca737f3296a71c5aea54830 100644 (file)
 #include "pci.h"
 
 static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
-static const struct ath79_pci_irq *ath79_pci_irq_map __initdata;
-static unsigned ath79_pci_nr_irqs __initdata;
+static const struct ath79_pci_irq *ath79_pci_irq_map;
+static unsigned ath79_pci_nr_irqs;
 
-static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
+static const struct ath79_pci_irq ar71xx_pci_irq_map[] = {
        {
                .slot   = 17,
                .pin    = 1,
@@ -41,7 +41,7 @@ static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
        }
 };
 
-static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
+static const struct ath79_pci_irq ar724x_pci_irq_map[] = {
        {
                .slot   = 0,
                .pin    = 1,
@@ -49,7 +49,7 @@ static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
        }
 };
 
-static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = {
+static const struct ath79_pci_irq qca955x_pci_irq_map[] = {
        {
                .bus    = 0,
                .slot   = 0,
@@ -64,7 +64,7 @@ static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = {
        },
 };
 
-int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
+int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
 {
        int irq = -1;
        int i;
index 903f3bf48419cb917dfc15af9013674e45c23017..7e25c5cc353a8223c29ed546772ee453b994c529 100644 (file)
@@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
                return __cmpxchg_small(ptr, old, new, size);
 
        case 4:
-               return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
+               return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
+                                    (u32)old, new);
 
        case 8:
                /* lld/scd are only available for MIPS64 */
                if (!IS_ENABLED(CONFIG_64BIT))
                        return __cmpxchg_called_with_bad_pointer();
 
-               return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
+               return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
+                                    (u64)old, new);
 
        default:
                return __cmpxchg_called_with_bad_pointer();
index e4ed1bc9a7345626d316bd1192dfdbf5bf1db8b0..a6810923b3f0214dfa36eab4a28320a65bba7790 100644 (file)
@@ -1377,29 +1377,32 @@ do {                                                                    \
 
 #define __write_64bit_c0_split(source, sel, val)                       \
 do {                                                                   \
+       unsigned long long __tmp;                                       \
        unsigned long __flags;                                          \
                                                                        \
        local_irq_save(__flags);                                        \
        if (sel == 0)                                                   \
                __asm__ __volatile__(                                   \
                        ".set\tmips64\n\t"                              \
-                       "dsll\t%L0, %L0, 32\n\t"                        \
+                       "dsll\t%L0, %L1, 32\n\t"                        \
                        "dsrl\t%L0, %L0, 32\n\t"                        \
-                       "dsll\t%M0, %M0, 32\n\t"                        \
+                       "dsll\t%M0, %M1, 32\n\t"                        \
                        "or\t%L0, %L0, %M0\n\t"                         \
                        "dmtc0\t%L0, " #source "\n\t"                   \
                        ".set\tmips0"                                   \
-                       : : "r" (val));                                 \
+                       : "=&r,r" (__tmp)                               \
+                       : "r,0" (val));                                 \
        else                                                            \
                __asm__ __volatile__(                                   \
                        ".set\tmips64\n\t"                              \
-                       "dsll\t%L0, %L0, 32\n\t"                        \
+                       "dsll\t%L0, %L1, 32\n\t"                        \
                        "dsrl\t%L0, %L0, 32\n\t"                        \
-                       "dsll\t%M0, %M0, 32\n\t"                        \
+                       "dsll\t%M0, %M1, 32\n\t"                        \
                        "or\t%L0, %L0, %M0\n\t"                         \
                        "dmtc0\t%L0, " #source ", " #sel "\n\t"         \
                        ".set\tmips0"                                   \
-                       : : "r" (val));                                 \
+                       : "=&r,r" (__tmp)                               \
+                       : "r,0" (val));                                 \
        local_irq_restore(__flags);                                     \
 } while (0)
 
index 9e6c74bf66c485cc3c1a23ee1d298c1b6f81dad5..6668f67a61c3a2bb09fc23d9e2172d7a738020e1 100644 (file)
@@ -618,8 +618,7 @@ static int mipspmu_event_init(struct perf_event *event)
                return -ENOENT;
        }
 
-       if ((unsigned int)event->cpu >= nr_cpumask_bits ||
-           (event->cpu >= 0 && !cpu_online(event->cpu)))
+       if (event->cpu >= 0 && !cpu_online(event->cpu))
                return -ENODEV;
 
        if (!atomic_inc_not_zero(&active_events)) {
index 100f23dfa4384495dc4868ee0b921f6963188729..ac584c5823d08666c40c38144ad33289cc9302ae 100644 (file)
@@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
 }
 
 static struct plat_stmmacenet_data ls1x_eth0_pdata = {
-       .bus_id         = 0,
-       .phy_addr       = -1,
+       .bus_id                 = 0,
+       .phy_addr               = -1,
 #if defined(CONFIG_LOONGSON1_LS1B)
-       .interface      = PHY_INTERFACE_MODE_MII,
+       .interface              = PHY_INTERFACE_MODE_MII,
 #elif defined(CONFIG_LOONGSON1_LS1C)
-       .interface      = PHY_INTERFACE_MODE_RMII,
+       .interface              = PHY_INTERFACE_MODE_RMII,
 #endif
-       .mdio_bus_data  = &ls1x_mdio_bus_data,
-       .dma_cfg        = &ls1x_eth_dma_cfg,
-       .has_gmac       = 1,
-       .tx_coe         = 1,
-       .init           = ls1x_eth_mux_init,
+       .mdio_bus_data          = &ls1x_mdio_bus_data,
+       .dma_cfg                = &ls1x_eth_dma_cfg,
+       .has_gmac               = 1,
+       .tx_coe                 = 1,
+       .rx_queues_to_use       = 1,
+       .tx_queues_to_use       = 1,
+       .init                   = ls1x_eth_mux_init,
 };
 
 static struct resource ls1x_eth0_resources[] = {
@@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
 
 #ifdef CONFIG_LOONGSON1_LS1B
 static struct plat_stmmacenet_data ls1x_eth1_pdata = {
-       .bus_id         = 1,
-       .phy_addr       = -1,
-       .interface      = PHY_INTERFACE_MODE_MII,
-       .mdio_bus_data  = &ls1x_mdio_bus_data,
-       .dma_cfg        = &ls1x_eth_dma_cfg,
-       .has_gmac       = 1,
-       .tx_coe         = 1,
-       .init           = ls1x_eth_mux_init,
+       .bus_id                 = 1,
+       .phy_addr               = -1,
+       .interface              = PHY_INTERFACE_MODE_MII,
+       .mdio_bus_data          = &ls1x_mdio_bus_data,
+       .dma_cfg                = &ls1x_eth_dma_cfg,
+       .has_gmac               = 1,
+       .tx_coe                 = 1,
+       .rx_queues_to_use       = 1,
+       .tx_queues_to_use       = 1,
+       .init                   = ls1x_eth_mux_init,
 };
 
 static struct resource ls1x_eth1_resources[] = {
index 192542dbd9724788838a74cb5ed4b8404b0d83fc..16d9ef5a78c57086c5c5db3f518b5bb4c7b1f0b9 100644 (file)
@@ -2558,7 +2558,6 @@ dcopuop:
                                        break;
                                default:
                                        /* Reserved R6 ops */
-                                       pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
                                        return SIGILL;
                                }
                        }
@@ -2719,7 +2718,6 @@ dcopuop:
                                        break;
                                default:
                                        /* Reserved R6 ops */
-                                       pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
                                        return SIGILL;
                                }
                        }
index 7646891c4e9b18077d59ac28bfe2ffe5b82d0d37..01b7a87ea67866a19c86019ab95654185d74e84d 100644 (file)
@@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 {
        int src, dst, r, td, ts, mem_off, b_off;
        bool need_swap, did_move, cmp_eq;
-       unsigned int target;
+       unsigned int target = 0;
        u64 t64;
        s64 t64s;
        int bpf_op = BPF_OP(insn->code);
index 1c02f5737367aa7e75569afeb98aebcd3ebb5327..b4c263f16b15eeb62c4fc93d70ff828cdfa7ac7f 100644 (file)
 #define INTC   PC104PLUS_INTC_IRQ
 #define INTD   PC104PLUS_INTD_IRQ
 
-static char irq_tab_capcella[][5] __initdata = {
+static char irq_tab_capcella[][5] = {
  [11] = { -1, INT1, INT1, INT1, INT1 },
  [12] = { -1, INT2, INT2, INT2, INT2 },
  [14] = { -1, INTA, INTB, INTC, INTD }
 };
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        return irq_tab_capcella[slot][pin];
 }
index b3ab59318d91a7a2817e6615efba1ef3fc0a2c37..44be65c3e6bb3d211d8692211b732d5f41f2caa4 100644 (file)
@@ -147,7 +147,7 @@ static void qube_raq_via_board_id_fixup(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
         qube_raq_via_board_id_fixup);
 
-static char irq_tab_qube1[] __initdata = {
+static char irq_tab_qube1[] = {
   [COBALT_PCICONF_CPU]    = 0,
   [COBALT_PCICONF_ETH0]           = QUBE1_ETH0_IRQ,
   [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
@@ -156,7 +156,7 @@ static char irq_tab_qube1[] __initdata = {
   [COBALT_PCICONF_ETH1]           = 0
 };
 
-static char irq_tab_cobalt[] __initdata = {
+static char irq_tab_cobalt[] = {
   [COBALT_PCICONF_CPU]    = 0,
   [COBALT_PCICONF_ETH0]           = ETH0_IRQ,
   [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
@@ -165,7 +165,7 @@ static char irq_tab_cobalt[] __initdata = {
   [COBALT_PCICONF_ETH1]           = ETH1_IRQ
 };
 
-static char irq_tab_raq2[] __initdata = {
+static char irq_tab_raq2[] = {
   [COBALT_PCICONF_CPU]    = 0,
   [COBALT_PCICONF_ETH0]           = ETH0_IRQ,
   [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
@@ -174,7 +174,7 @@ static char irq_tab_raq2[] __initdata = {
   [COBALT_PCICONF_ETH1]           = ETH1_IRQ
 };
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
                return irq_tab_qube1[slot];
index 19caf775c20645ada7f1bbc86777c2bd45b23246..c31cb6af1cd07f8e9dd5ad715d30f93abd53a927 100644 (file)
@@ -43,7 +43,7 @@
  */
 
 #define MAX_SLOT_NUM 10
-static unsigned char irq_map[][5] __initdata = {
+static unsigned char irq_map[][5] = {
        [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC,
               MARKEINS_PCI_IRQ_INTD, 0,},
        [4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,},
@@ -85,7 +85,7 @@ static void emma2rh_pci_host_fixup(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH,
                         emma2rh_pci_host_fixup);
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        return irq_map[slot][pin];
 }
index 50da773faedee8fc5850480857ea5e36a78f24c0..b47c2771dc997663a7046dcb6166d1987957c89e 100644 (file)
@@ -19,7 +19,7 @@
 /* South bridge slot number is set by the pci probe process */
 static u8 sb_slot = 5;
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq = 0;
 
index 133685e215ee6a2f02a5a244e04e08464543273d..c6ec18a07e63ab2f88e53a5ff25cde90704f59a5 100644 (file)
@@ -21,7 +21,7 @@
 #define INTB   MACEPCI_SHARED0_IRQ
 #define INTC   MACEPCI_SHARED1_IRQ
 #define INTD   MACEPCI_SHARED2_IRQ
-static char irq_tab_mace[][5] __initdata = {
+static char irq_tab_mace[][5] = {
       /* Dummy INT#A  INT#B  INT#C  INT#D */
        {0,         0,     0,     0,     0}, /* This is placeholder row - never used */
        {0,     SCSI0, SCSI0, SCSI0, SCSI0},
@@ -39,7 +39,7 @@ static char irq_tab_mace[][5] __initdata = {
  * irqs.  I suppose a device without a pin A will thank us for doing it
  * right if there exists such a broken piece of crap.
  */
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        return irq_tab_mace[slot][pin];
 }
index 0f1069527cbae435775c39d7acbaeb9369561d87..d3102eeea898983c655d7b8e6d7f923cdc56395a 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/txx9/pci.h>
 #include <asm/txx9/jmr3927.h>
 
-int __init jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        unsigned char irq = pin;
 
index 2b5427d3f35c255bc566978b02ff1ac8e057f551..81530a13b34990a38d159ae248aa0a2d453bf332 100644 (file)
@@ -23,7 +23,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        return of_irq_parse_and_map_pci(dev, slot, pin);
 }
index 95ab9a1bd0107384a3faa8e45c9f2872f7c561df..20cdfdc0893875a7556cfbef91db0ab18b8099d2 100644 (file)
@@ -30,7 +30,7 @@
 #define PCID           7
 
 /* all the pci device has the PCIA pin, check the datasheet. */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
        /*      INTA    INTB    INTC    INTD */
        {0, 0, 0, 0, 0},        /*  11: Unused */
        {0, 0, 0, 0, 0},        /*  12: Unused */
@@ -51,7 +51,7 @@ static char irq_tab[][5] __initdata = {
        {0, 0, 0, 0, 0},        /*  27: Unused */
 };
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int virq;
 
index 2b6d5e196f9990255eb5d35de3e6580a69f6003f..8a741c2c6685ded75b18975613f3e0a8b40867a1 100644 (file)
@@ -32,7 +32,7 @@ static void print_fixup_info(const struct pci_dev *pdev)
                        pdev->vendor, pdev->device, pdev->irq);
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        print_fixup_info(dev);
        return dev->irq;
index 40e920c653cc31762bcc10e4e6a235a4c53b78fa..3ec85331795ef561ce1bc624ae55cd009fc6a844 100644 (file)
@@ -12,7 +12,7 @@
 static char pci_irq[5] = {
 };
 
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
        /*      INTA    INTB    INTC    INTD */
        {0,     0,      0,      0,      0 },    /*  0: GT64120 PCI bridge */
        {0,     0,      0,      0,      0 },    /*  1: Unused */
@@ -38,7 +38,7 @@ static char irq_tab[][5] __initdata = {
        {0,     PCID,   PCIA,   PCIB,   PCIC }  /* 21: PCI Slot 4 */
 };
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int virq;
        virq = irq_tab[slot][pin];
index 8e4f8288eca2e2fcfc17bd615a26a5e0a6a152de..66eaf456bc89c31031abc74762874d09131a9ed0 100644 (file)
 
 #include <asm/vr41xx/mpc30x.h>
 
-static const int internal_func_irqs[] __initconst = {
+static const int internal_func_irqs[] = {
        VRC4173_CASCADE_IRQ,
        VRC4173_AC97_IRQ,
        VRC4173_USB_IRQ,
 };
 
-static const int irq_tab_mpc30x[] __initconst = {
+static const int irq_tab_mpc30x[] = {
  [12] = VRC4173_PCMCIA1_IRQ,
  [13] = VRC4173_PCMCIA2_IRQ,
  [29] = MQ200_IRQ,
 };
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        if (slot == 30)
                return internal_func_irqs[PCI_FUNC(dev->devfn)];
index fab405c21c2f76f856af004fab4e88e68f81cef2..4ad2ef02087bc0cf8bec3b19835ce88aec73b1f7 100644 (file)
@@ -47,7 +47,7 @@
 
 #if defined(CONFIG_PMC_MSP7120_GW)
 /* Garibaldi Board IRQ wiring to PCI slots */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
        /* INTA    INTB    INTC    INTD */
        {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
        {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
@@ -86,7 +86,7 @@ static char irq_tab[][5] __initdata = {
 #elif defined(CONFIG_PMC_MSP7120_EVAL)
 
 /* MSP7120 Eval Board IRQ wiring to PCI slots */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
        /* INTA    INTB    INTC    INTD */
        {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
        {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
@@ -125,7 +125,7 @@ static char irq_tab[][5] __initdata = {
 #else
 
 /* Unknown board -- don't assign any IRQs */
-static char irq_tab[][5] __initdata = {
+static char irq_tab[][5] = {
        /* INTA    INTB    INTC    INTD */
        {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
        {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
@@ -202,7 +202,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
  *  RETURNS:    IRQ number
  *
  ****************************************************************************/
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL)
        printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n");
index 321db265829c0c0e88cd83790edc028d2bec77d0..d6aaed1d6be9ed3f10d9843b068661b3dcc2e18d 100644 (file)
@@ -36,7 +36,7 @@
 #include <asm/txx9/pci.h>
 #include <asm/txx9/rbtx4927.h>
 
-int __init rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        unsigned char irq = pin;
 
index a80579af609bc7155f86836a3e4f0e8ed457e7d3..ff22a22db73ee618a30cb7af4e906ce060da7d92 100644 (file)
@@ -13,7 +13,7 @@
 #include <asm/txx9/pci.h>
 #include <asm/txx9/rbtx4938.h>
 
-int __init rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq = tx4938_pcic1_map_irq(dev, slot);
 
index f67ebeeb42000bb5d4d73961d6bcc4d632375298..adb9a58641e8a1891e409b078e256d846ffa5d14 100644 (file)
@@ -40,7 +40,7 @@
  * seem to be a documentation error.  At least on my RM200C the Cirrus
  * Logic CL-GD5434 VGA is device 3.
  */
-static char irq_tab_rm200[8][5] __initdata = {
+static char irq_tab_rm200[8][5] = {
        /*       INTA  INTB  INTC  INTD */
        {     0,    0,    0,    0,    0 },      /* EISA bridge */
        {  SCSI, SCSI, SCSI, SCSI, SCSI },      /* SCSI */
@@ -57,7 +57,7 @@ static char irq_tab_rm200[8][5] __initdata = {
  *
  * The VGA card is optional for RM300 systems.
  */
-static char irq_tab_rm300d[8][5] __initdata = {
+static char irq_tab_rm300d[8][5] = {
        /*       INTA  INTB  INTC  INTD */
        {     0,    0,    0,    0,    0 },      /* EISA bridge */
        {  SCSI, SCSI, SCSI, SCSI, SCSI },      /* SCSI */
@@ -69,7 +69,7 @@ static char irq_tab_rm300d[8][5] __initdata = {
        {     0, INTD, INTA, INTB, INTC },      /* Slot 4 */
 };
 
-static char irq_tab_rm300e[5][5] __initdata = {
+static char irq_tab_rm300e[5][5] = {
        /*       INTA  INTB  INTC  INTD */
        {     0,    0,    0,    0,    0 },      /* HOST bridge */
        {  SCSI, SCSI, SCSI, SCSI, SCSI },      /* SCSI */
@@ -96,7 +96,7 @@ static char irq_tab_rm300e[5][5] __initdata = {
 #define INTC   PCIT_IRQ_INTC
 #define INTD   PCIT_IRQ_INTD
 
-static char irq_tab_pcit[13][5] __initdata = {
+static char irq_tab_pcit[13][5] = {
        /*       INTA  INTB  INTC  INTD */
        {     0,     0,     0,     0,     0 },  /* HOST bridge */
        { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 },  /* SCSI */
@@ -113,7 +113,7 @@ static char irq_tab_pcit[13][5] __initdata = {
        {     0,  INTA,  INTB,  INTC,  INTD },  /* Slot 5 */
 };
 
-static char irq_tab_pcit_cplus[13][5] __initdata = {
+static char irq_tab_pcit_cplus[13][5] = {
        /*       INTA  INTB  INTC  INTD */
        {     0,     0,     0,     0,     0 },  /* HOST bridge */
        {     0,  INTB,  INTC,  INTD,  INTA },  /* PCI Slot 9 */
@@ -130,7 +130,7 @@ static inline int is_rm300_revd(void)
        return (csmsr & 0xa0) == 0x20;
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        switch (sni_brd_type) {
        case SNI_BRD_PCI_TOWER_CPLUS:
index d0b0083fbd278a529209d41c7259e645d2c37f6c..cc581535f25772daf542445e1e4e59318e94a56f 100644 (file)
@@ -23,7 +23,7 @@
 
 #include <asm/vr41xx/tb0219.h>
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq = -1;
 
index 4196ccf3ea3da5e6783de7acd913f68d2fc218bc..b827b5cad5fd80eb8e4fde0a0e66bfbed524751c 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/vr41xx/giu.h>
 #include <asm/vr41xx/tb0226.h>
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq = -1;
 
index 8c5039ed75d74c40df1b739ad37edf177dfad5bf..98f26285f2e3a904c7bff305d5a1cc9d65389f26 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <asm/vr41xx/tb0287.h>
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        unsigned char bus;
        int irq = -1;
index e99ca7702d8ad81660ab011ae4c990c0fb954f96..f15ec98de2de5b5569f051705b74985693406ea5 100644 (file)
@@ -522,7 +522,7 @@ static int __init alchemy_pci_init(void)
 arch_initcall(alchemy_pci_init);
 
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        struct alchemy_pci_context *ctx = dev->sysdata;
        if (ctx && ctx->board_map_irq)
index 76f16eaed0ad3e9851d854d940e871c8c5f69aa9..230d7dd273e21af0668062c648ee2b6962383de1 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/bcma/bcma.h>
 #include <bcm47xx.h>
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        return 0;
 }
index 40d2797d2bc43b255edd050cf17d270cdbc4325d..47f4ee6bbb3bf9bb9874d6d26510d7e1f13cf94e 100644 (file)
@@ -61,7 +61,7 @@ arch_initcall(lasat_pci_setup);
 #define LASAT_IRQ_PCIC  (LASAT_IRQ_BASE + 7)
 #define LASAT_IRQ_PCID  (LASAT_IRQ_BASE + 8)
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        switch (slot) {
        case 1:
index 4e633c1e7ff3e7834b93195ee2f1a829fbed3971..90fba9bf98da7104ee5ef74bd0f259653d279aa3 100644 (file)
@@ -361,7 +361,7 @@ static int mt7620_pci_probe(struct platform_device *pdev)
        return 0;
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        u16 cmd;
        u32 val;
index 9ee01936862ee1dfe11d8a21754bb16572230aac..3e92a06fa77288c2705e8435e9352b6bf8c5e368 100644 (file)
@@ -59,8 +59,7 @@ union octeon_pci_address {
        } s;
 };
 
-int __initconst (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
-                                        u8 slot, u8 pin);
+int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
 enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
 
 /**
@@ -74,7 +73,7 @@ enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
  *              as it goes through each bridge.
  * Returns Interrupt number for the device
  */
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        if (octeon_pcibios_map_irq)
                return octeon_pcibios_map_irq(dev, slot, pin);
index d6360fe73d058c5733274fb1b163c393a3f0e14c..711cdccdf65ba7df9647f72bd11952432086e33b 100644 (file)
@@ -181,7 +181,7 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
        spin_unlock_irqrestore(&rt2880_pci_lock, flags);
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        u16 cmd;
        int irq = -1;
index 04f8ea953297ff96dfef3fedebc031075c31251d..958899ffe99c14b3e149a88e303fb4cd3d497d81 100644 (file)
@@ -564,7 +564,7 @@ err_put_intc_node:
        return err;
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        return of_irq_parse_and_map_pci(dev, slot, pin);
 }
index 000c0e1f9ef869d884d8c9a0cbb286ba87bae97b..a6418460e3c4447e62028b63655e29233a166c42 100644 (file)
@@ -112,7 +112,7 @@ int __init tx4938_pciclk66_setup(void)
        return pciclk;
 }
 
-int __init tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
+int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
 {
        if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) {
                switch (slot) {
index 9d6acc00f3489ddbf4558aa1d67568899e7eea9f..09a65f7dbe7ca001d55780d0bf7c2008a9320cdf 100644 (file)
@@ -48,7 +48,7 @@ void __init tx4939_report_pci1clk(void)
                ((pciclk + 50000) / 100000) % 10);
 }
 
-int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
+int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
 {
        if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) {
                switch (slot) {
@@ -68,7 +68,7 @@ int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
        return -1;
 }
 
-int __init tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq = tx4939_pcic1_map_irq(dev, slot);
 
index 7babf01600cb0fd9cbb97d4bb40f079693b2e8ed..9eff9137f78e52dc60bd4c031e42f1db4001ab6e 100644 (file)
@@ -205,7 +205,7 @@ int xlp_socdev_to_node(const struct pci_dev *lnkdev)
                return PCI_SLOT(lnkdev->devfn) / 8;
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        struct pci_dev *lnkdev;
        int lnkfunc, node;
index 26d2dabef28152541b709b11d2257cb563fe0349..2a1c81a129ba31ae9a6757ef819f2cab3fe2f93a 100644 (file)
@@ -315,7 +315,7 @@ static void xls_pcie_ack_b(struct irq_data *d)
        }
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        return get_irq_vector(dev);
 }
index ad3584dbc9d72bc1d0b845a1089ce41cec494737..fd2887415bc8935acf86c79a864b78dbd5878a03 100644 (file)
@@ -1464,8 +1464,7 @@ static int cvmx_pcie_rc_initialize(int pcie_port)
  *              as it goes through each bridge.
  * Returns Interrupt number for the device
  */
-int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
-                                      u8 slot, u8 pin)
+int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        /*
         * The EBH5600 board with the PCI to PCIe bridge mistakenly
index ffa0f7101a9773ec8e24813f37e3c270d912b5e2..2b08242ade628989e96aa3f24450b406ecdc7e7a 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 
+#include <asm/setup.h>
+
 #ifdef CONFIG_MIPS_MT_SMP
 #define MIPS_CPU_IPI_RESCHED_IRQ 0     /* SW int 0 for resched */
 #define MIPS_CPU_IPI_CALL_IRQ 1                /* SW int 1 for call */
index 5c4f936870391195da1d8de70d789650fc11acb5..654d652d7fa13adeac15390629d155597ee15296 100755 (executable)
@@ -30,8 +30,6 @@ cfg="$4"
 boards_origin="$5"
 shift 5
 
-cd "${srctree}"
-
 # Only print Skipping... lines if the user explicitly specified BOARDS=. In the
 # general case it only serves to obscure the useful output about what actually
 # was included.
@@ -48,7 +46,7 @@ environment*)
 esac
 
 for board in $@; do
-       board_cfg="arch/mips/configs/generic/board-${board}.config"
+       board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
        if [ ! -f "${board_cfg}" ]; then
                echo "WARNING: Board config '${board_cfg}' not found"
                continue
@@ -84,7 +82,7 @@ for board in $@; do
        done || continue
 
        # Merge this board config fragment into our final config file
-       ./scripts/kconfig/merge_config.sh \
+       ${srctree}/scripts/kconfig/merge_config.sh \
                -m -O ${objtree} ${cfg} ${board_cfg} \
                | grep -Ev '^(#|Using)'
 done
index 0bd2a1e1ff9ab8e0f0d8c56e31872ac08404c7b6..fb998726bd5d58b31e520d8ac4f8290835db473a 100644 (file)
@@ -386,9 +386,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int (*txx9_pci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       return txx9_board_vec->pci_map_irq(dev, slot, pin);
+       return txx9_pci_map_irq(dev, slot, pin);
 }
 
 char * (*txx9_board_pcibios_setup)(char *str) __initdata;
@@ -424,5 +425,8 @@ char *__init txx9_pcibios_setup(char *str)
                        txx9_pci_err_action = TXX9_PCI_ERR_IGNORE;
                return NULL;
        }
+
+       txx9_pci_map_irq = txx9_board_vec->pci_map_irq;
+
        return str;
 }
index 89e8027e07fb327d39de0170c0da61af177c7160..7c475fd99c468396122e77c8f1dbe738ae368bc0 100644 (file)
@@ -59,10 +59,6 @@ void arch_cpu_idle(void)
 }
 #endif
 
-void release_segments(struct mm_struct *mm)
-{
-}
-
 void machine_restart(char *cmd)
 {
 #ifdef CONFIG_KERNEL_DEBUGGER
@@ -112,14 +108,6 @@ void release_thread(struct task_struct *dead_task)
 {
 }
 
-/*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
- */
-void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
-{
-}
-
 /*
  * this gets called so that we can store lazy state into memory and copy the
  * current task into the new thread.
index ba7b7ddc38442dab7e9b0679bba03478bf20096c..a57dedbfc7b77802e09104ee115702a9fe6fee13 100644 (file)
@@ -257,6 +257,18 @@ config PARISC_PAGE_SIZE_64KB
 
 endchoice
 
+config PARISC_SELF_EXTRACT
+       bool "Build kernel as self-extracting executable"
+       default y
+       help
+         Say Y if you want to build the parisc kernel as a kind of
+         self-extracting executable.
+
+         If you say N here, the kernel will be compressed with gzip
+         which can be loaded by the palo bootloader directly too.
+
+         If you don't know what to do here, say Y.
+
 config SMP
        bool "Symmetric multi-processing support"
        ---help---
index 58fae5d2449daae762fbe09f54a52561e29832f9..01946ebaff72814435cf6128665bd778eb217fb1 100644 (file)
@@ -129,8 +129,13 @@ Image: vmlinux
 bzImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
+ifdef CONFIG_PARISC_SELF_EXTRACT
 vmlinuz: bzImage
        $(OBJCOPY) $(boot)/bzImage $@
+else
+vmlinuz: vmlinux
+       @gzip -cf -9 $< > $@
+endif
 
 install:
        $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
index 5450a11c9d108cf1bfeefef0af4f943e29551a41..7d7e594bda36f0403aef33577c9e75c1817011d4 100644 (file)
@@ -15,7 +15,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
 KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
-KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs
+KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
 ifndef CONFIG_64BIT
 KBUILD_CFLAGS += -mfast-indirect-calls
 endif
index 13a4bf9ac4dae5eb626c5c9d01c7e6448a232df0..9345b44b86f036572e33721eb80e9bbbe4493aa4 100644 (file)
@@ -24,7 +24,8 @@
 /* Symbols defined by linker scripts */
 extern char input_data[];
 extern int input_len;
-extern __le32 output_len;      /* at unaligned address, little-endian */
+/* output_len is inserted by the linker possibly at an unaligned address */
+extern __le32 output_len __aligned(1);
 extern char _text, _end;
 extern char _bss, _ebss;
 extern char _startcode_end;
index 26b4455baa8370560bb572d7cb550cdbd17f1101..510341f62d979e0ced75108c4b771380e0866c57 100644 (file)
@@ -280,6 +280,7 @@ void setup_pdc(void);               /* in inventory.c */
 /* wrapper-functions from pdc.c */
 
 int pdc_add_valid(unsigned long address);
+int pdc_instr(unsigned int *instr);
 int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len);
 int pdc_chassis_disp(unsigned long disp);
 int pdc_chassis_warn(unsigned long *warn);
index a5dc9066c6d8d50cb35f2e4cf88509fa4621b84b..ad9c9c3b4136da6633e5d0c72fb117465376993e 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __ASM_SMP_H
 #define __ASM_SMP_H
 
+extern int init_per_cpu(int cpuid);
 
 #if defined(CONFIG_SMP)
 
index ab80e5c6f6517c4f165ab9ce8c3863b2d94e7c05..6d471c00c71af12f6a48c4deebb5d260071d5dd3 100644 (file)
@@ -232,6 +232,26 @@ int pdc_add_valid(unsigned long address)
 }
 EXPORT_SYMBOL(pdc_add_valid);
 
+/**
+ * pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler.
+ * @instr: Pointer to variable which will get instruction opcode.
+ *
+ * The return value is PDC_OK (0) in case call succeeded.
+ */
+int __init pdc_instr(unsigned int *instr)
+{
+       int retval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pdc_lock, flags);
+       retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result));
+       convert_to_wide(pdc_result);
+       *instr = pdc_result[0];
+       spin_unlock_irqrestore(&pdc_lock, flags);
+
+       return retval;
+}
+
 /**
  * pdc_chassis_info - Return chassis information.
  * @result: The return buffer.
index c6d6272a934f03823b655cf07b38e7bbc01ca12e..7baa2265d43927fd7e5a24269e627486c60c6b35 100644 (file)
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(__xchg8);
 EXPORT_SYMBOL(__xchg32);
 EXPORT_SYMBOL(__cmpxchg_u32);
+EXPORT_SYMBOL(__cmpxchg_u64);
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(__atomic_hash);
 #endif
 #ifdef CONFIG_64BIT
 EXPORT_SYMBOL(__xchg64);
-EXPORT_SYMBOL(__cmpxchg_u64);
 #endif
 
 #include <linux/uaccess.h>
index 05730a83895c7f5700760e880a460257e8c372ba..00aed082969b126719a988bbe7c63a210c10ed90 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/memblock.h>
 #include <linux/seq_file.h>
 #include <linux/kthread.h>
+#include <linux/initrd.h>
 
 #include <asm/pdc.h>
 #include <asm/pdcpat.h>
@@ -216,8 +217,16 @@ void __init pdc_pdt_init(void)
        }
 
        for (i = 0; i < pdt_status.pdt_entries; i++) {
+               unsigned long addr;
+
                report_mem_err(pdt_entry[i]);
 
+               addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK;
+               if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
+                       addr >= initrd_start && addr < initrd_end)
+                       pr_crit("CRITICAL: initrd possibly broken "
+                               "due to bad memory!\n");
+
                /* mark memory page bad */
                memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
        }
index a45a67d526f8ca8001fd1d06625b3b233d5a3835..30f92391a93ef6d5a90970b81921a2133d5e2eb0 100644 (file)
@@ -146,7 +146,7 @@ void machine_power_off(void)
 
        /* prevent soft lockup/stalled CPU messages for endless loop. */
        rcu_sysrq_start();
-       lockup_detector_suspend();
+       lockup_detector_soft_poweroff();
        for (;;);
 }
 
index a778bd3c107c507e5a76d99e06813876aad64bd7..e120d63c1b285ad545c2281de0abbbcc2dde6e08 100644 (file)
@@ -317,7 +317,7 @@ void __init collect_boot_cpu_data(void)
  *
  * o Enable CPU profiling hooks.
  */
-int init_per_cpu(int cpunum)
+int __init init_per_cpu(int cpunum)
 {
        int ret;
        struct pdc_coproc_cfg coproc_cfg;
index dee6f9d6a153ce461ec816e6caeb5a38dd3bb9f2..f7d0c3b33d70a53b5350729f1e3d59b2e803749b 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
+#include <linux/start_kernel.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
@@ -48,6 +49,7 @@
 #include <asm/io.h>
 #include <asm/setup.h>
 #include <asm/unwind.h>
+#include <asm/smp.h>
 
 static char __initdata command_line[COMMAND_LINE_SIZE];
 
@@ -115,7 +117,6 @@ void __init dma_ops_init(void)
 }
 #endif
 
-extern int init_per_cpu(int cpuid);
 extern void collect_boot_cpu_data(void);
 
 void __init setup_arch(char **cmdline_p)
@@ -398,9 +399,8 @@ static int __init parisc_init(void)
 }
 arch_initcall(parisc_init);
 
-void start_parisc(void)
+void __init start_parisc(void)
 {
-       extern void start_kernel(void);
        extern void early_trap_init(void);
 
        int ret, cpunum;
index 63365106ea1907589bbdee1688a1a9be54b1a74d..30c28ab145409b5966f7237ec2b6ca07121adc10 100644 (file)
@@ -255,12 +255,11 @@ void arch_send_call_function_single_ipi(int cpu)
 static void __init
 smp_cpu_init(int cpunum)
 {
-       extern int init_per_cpu(int);  /* arch/parisc/kernel/processor.c */
        extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
        extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
 
        /* Set modes and Enable floating point coprocessor */
-       (void) init_per_cpu(cpunum);
+       init_per_cpu(cpunum);
 
        disable_sr_hashing();
 
index 23de307c3052aa9ecac21fd6c294657fb53de447..41e60a9c7db23b8384b18bf8ddd45f188ef4a0c3 100644 (file)
@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
 10:    ldd     0(%r25), %r25
 11:    ldd     0(%r24), %r24
 #else
-       /* Load new value into r22/r23 - high/low */
+       /* Load old value into r22/r23 - high/low */
 10:    ldw     0(%r25), %r22
 11:    ldw     4(%r25), %r23
        /* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@ cas2_action:
        copy    %r0, %r28
 #else
        /* Compare first word */
-19:    ldw,ma  0(%r26), %r29
+19:    ldw     0(%r26), %r29
        sub,=   %r29, %r22, %r0
        b,n     cas2_end
        /* Compare second word */
-20:    ldw,ma  4(%r26), %r29
+20:    ldw     4(%r26), %r29
        sub,=   %r29, %r23, %r0
        b,n     cas2_end
        /* Perform the store */
index 2d956aa0a38abbc3829757bab4749dd6a0037490..8c0105a49839cf018a80108f76dadccb5e793ee1 100644 (file)
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
                cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
 
                for_each_online_cpu(cpu) {
-                       if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
+                       if (cpu == 0)
+                               continue;
+                       if ((cpu0_loc != 0) &&
+                           (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
                                continue;
 
                        clocksource_cr16.name = "cr16_unstable";
index 991654c88eec8635056bef3ae9f536a6e8ff8862..230333157fe384c4171749e2b4eab43e3070a89d 100644 (file)
@@ -817,7 +817,7 @@ void __init initialize_ivt(const void *iva)
        u32 check = 0;
        u32 *ivap;
        u32 *hpmcp;
-       u32 length;
+       u32 length, instr;
 
        if (strcmp((const char *)iva, "cows can fly"))
                panic("IVT invalid");
@@ -827,6 +827,14 @@ void __init initialize_ivt(const void *iva)
        for (i = 0; i < 8; i++)
            *ivap++ = 0;
 
+       /*
+        * Use PDC_INSTR firmware function to get instruction that invokes
+        * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
+        * the PA 1.1 Firmware Architecture document.
+        */
+       if (pdc_instr(&instr) == PDC_OK)
+               ivap[0] = instr;
+
        /* Compute Checksum for HPMC handler */
        length = os_hpmc_size;
        ivap[7] = length;
index 48dc7d4d20bba5e6fdb8669ae6d03b3ed3cb27c4..caab39dfa95d606564af62d3fd565eef6fc41757 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/kallsyms.h>
 #include <linux/sort.h>
+#include <linux/sched.h>
 
 #include <linux/uaccess.h>
 #include <asm/assembly.h>
@@ -279,6 +280,17 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
 
                        info->prev_sp = sp - 64;
                        info->prev_ip = 0;
+
+                       /* The stack is at the end inside the thread_union
+                        * struct. If we reach data, we have reached the
+                        * beginning of the stack and should stop unwinding. */
+                       if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
+                           info->prev_sp < ((unsigned long) task_thread_info(info->t)
+                                               + THREAD_SZ_ALGN)) {
+                               info->prev_sp = 0;
+                               break;
+                       }
+
                        if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) 
                                break;
                        info->prev_ip = tmp;
index 5b101f6a5607dc1ef9e229b6f2421a78d35355ce..e247edbca68ecd2f56500e467776c9d2fa1f1c11 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/extable.h>
 #include <linux/uaccess.h>
+#include <linux/hugetlb.h>
 
 #include <asm/traps.h>
 
@@ -261,7 +262,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
        struct task_struct *tsk;
        struct mm_struct *mm;
        unsigned long acc_type;
-       int fault;
+       int fault = 0;
        unsigned int flags;
 
        if (faulthandler_disabled())
@@ -315,7 +316,8 @@ good_area:
                        goto out_of_memory;
                else if (fault & VM_FAULT_SIGSEGV)
                        goto bad_area;
-               else if (fault & VM_FAULT_SIGBUS)
+               else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+                                 VM_FAULT_HWPOISON_LARGE))
                        goto bad_area;
                BUG();
        }
@@ -352,8 +354,7 @@ bad_area:
 
        if (user_mode(regs)) {
                struct siginfo si;
-
-               show_signal_msg(regs, code, address, tsk, vma);
+               unsigned int lsb = 0;
 
                switch (code) {
                case 15:        /* Data TLB miss fault/Data page fault */
@@ -386,6 +387,30 @@ bad_area:
                        si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
                        break;
                }
+
+#ifdef CONFIG_MEMORY_FAILURE
+               if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+                       printk(KERN_ERR
+       "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n",
+                       tsk->comm, tsk->pid, address);
+                       si.si_signo = SIGBUS;
+                       si.si_code = BUS_MCEERR_AR;
+               }
+#endif
+
+               /*
+                * Either small page or large page may be poisoned.
+                * In other words, VM_FAULT_HWPOISON_LARGE and
+                * VM_FAULT_HWPOISON are mutually exclusive.
+                */
+               if (fault & VM_FAULT_HWPOISON_LARGE)
+                       lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+               else if (fault & VM_FAULT_HWPOISON)
+                       lsb = PAGE_SHIFT;
+               else
+                       show_signal_msg(regs, code, address, tsk, vma);
+               si.si_addr_lsb = lsb;
+
                si.si_errno = 0;
                si.si_addr = (void __user *) address;
                force_sig_info(si.si_signo, &si, current);
index e084fa548d73d217b72ca96dbe0783dbf1b2105d..063817fee61cfeea164555a99139de3ffda3aedf 100644 (file)
@@ -138,10 +138,11 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=m
 CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQUENCER_OSS=m
 CONFIG_SND_POWERMAC=m
 CONFIG_SND_AOA=m
 CONFIG_SND_AOA_FABRIC_LAYOUT=m
index 79bbc8238b325aa5a637b7bc79913ebbc835b3be..805b0f87653c1939f2722929689db2ebb128930f 100644 (file)
@@ -64,11 +64,12 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_CLUT224 is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
 # CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQUENCER_OSS=y
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=y
index 8cf4a46bef86b00c0d2f5562d3f30262be4f5ecd..6daa56f8895cb22bcd37011328c68932abe50074 100644 (file)
@@ -115,9 +115,10 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y
 CONFIG_LOGO=y
 CONFIG_SOUND=y
 CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
+CONFIG_SND_SEQUENCER=y
 CONFIG_SND_SEQUENCER_OSS=y
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_USB_USX2Y=y
index 8e798b1fbc9900eba3830878e77ea809a8b56b90..1aab9a62a681d4856aff8f7afaa0f9435832b4d9 100644 (file)
@@ -227,11 +227,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=m
 CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
 CONFIG_SND_DUMMY=m
 CONFIG_SND_POWERMAC=m
 CONFIG_SND_AOA=m
index 791db775a09cac4e695fd199f9f9ed060337ea5c..6ddca80c52c3b4c1fef2e1442a5c1d2a31f24e2e 100644 (file)
@@ -222,11 +222,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=m
 CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
 CONFIG_SND_POWERMAC=m
 CONFIG_SND_AOA=m
 CONFIG_SND_AOA_FABRIC_LAYOUT=m
index d0fe0f8f77c236f03470abb7fc039861e6587a12..41d85cb3c9a2fb65fae203654e0759dde8099080 100644 (file)
@@ -141,11 +141,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=m
 CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
 CONFIG_HID_DRAGONRISE=y
 CONFIG_HID_GYRATION=y
 CONFIG_HID_TWINHAN=y
index ae6eba482d75da632b90be24bb83a89cb7140e78..da0e8d535eb8889efbd937a1d2ba62aac6236400 100644 (file)
@@ -789,17 +789,18 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=m
 CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
 CONFIG_SND_DYNAMIC_MINORS=y
 # CONFIG_SND_SUPPORT_OLD_API is not set
 CONFIG_SND_VERBOSE_PRINTK=y
 CONFIG_SND_DEBUG=y
 CONFIG_SND_DEBUG_VERBOSE=y
 CONFIG_SND_PCM_XRUN_DEBUG=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_SEQUENCER_OSS=m
 CONFIG_SND_DUMMY=m
 CONFIG_SND_VIRMIDI=m
 CONFIG_SND_MTPAV=m
index aef41b17a8bc02d0445bd8f145904f0b9c9454b0..9c7400a19e9df2aca8b005bb658d802c48eb88af 100644 (file)
@@ -79,11 +79,12 @@ CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_SOUND=y
 CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
+CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
 # CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQUENCER_OSS=y
 CONFIG_HID_APPLE=m
 CONFIG_HID_WACOM=m
 CONFIG_MMC=y
index 1df770e8cbe03194f31576cff264fdedbb1aa517..7275fed271afa601711b2f43b0da2c2377c68eff 100644 (file)
@@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void)
        case PVR_POWER8:
        case PVR_POWER8E:
        case PVR_POWER8NVL:
-               __flush_tlb_power8(POWER8_TLB_SETS);
+               __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
                break;
        case PVR_POWER9:
-               __flush_tlb_power9(POWER9_TLB_SETS_HASH);
+               __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
                break;
        default:
                pr_err("unknown CPU version for boot TLB flush\n");
index 9e816787c0d49fd31bc153debe1235e01281a5fa..116000b4553195f44adabf2894b002fcd5e6d6e1 100644 (file)
@@ -1019,6 +1019,10 @@ int eeh_init(void)
        } else if ((ret = eeh_ops->init()))
                return ret;
 
+       /* Initialize PHB PEs */
+       list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+               eeh_dev_phb_init_dynamic(hose);
+
        /* Initialize EEH event */
        ret = eeh_event_init();
        if (ret)
index ad04ecd63c207def1a1dbb33172c4acc933a2a39..a34e6912c15e2a027c55b3ef1802d22792cac5f9 100644 (file)
@@ -78,21 +78,3 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
        /* EEH PE for PHB */
        eeh_phb_pe_create(phb);
 }
-
-/**
- * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs
- *
- * Scan all the existing PHBs and create EEH devices for their OF
- * nodes and their children OF nodes
- */
-static int __init eeh_dev_phb_init(void)
-{
-       struct pci_controller *phb, *tmp;
-
-       list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
-               eeh_dev_phb_init_dynamic(phb);
-
-       return 0;
-}
-
-core_initcall(eeh_dev_phb_init);
index 48da0f5d2f7fe0a4745bce864795df16d612a89e..b82586c535604158986bc1d3c1abb481ff736dd6 100644 (file)
@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
 EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
 TRAMP_KVM(PACA_EXGEN, 0x700)
 EXC_COMMON_BEGIN(program_check_common)
-       EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+       /*
+        * It's possible to receive a TM Bad Thing type program check with
+        * userspace register values (in particular r1), but with SRR1 reporting
+        * that we came from the kernel. Normally that would confuse the bad
+        * stack logic, and we would report a bad kernel stack pointer. Instead
+        * we switch to the emergency stack if we're taking a TM Bad Thing from
+        * the kernel.
+        */
+       li      r10,MSR_PR              /* Build a mask of MSR_PR ..    */
+       oris    r10,r10,0x200000@h      /* .. and SRR1_PROGTM           */
+       and     r10,r10,r12             /* Mask SRR1 with that.         */
+       srdi    r10,r10,8               /* Shift it so we can compare   */
+       cmpldi  r10,(0x200000 >> 8)     /* .. with an immediate.        */
+       bne 1f                          /* If != go to normal path.     */
+
+       /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack  */
+       andi.   r10,r12,MSR_PR;         /* Set CR0 correctly for label  */
+                                       /* 3 in EXCEPTION_PROLOG_COMMON */
+       mr      r10,r1                  /* Save r1                      */
+       ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
+       subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
+       b 3f                            /* Jump into the macro !!       */
+1:     EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
index b76ca198e09c186e2d90e9b3eb83c7765c6c1cdf..72f153c6f3facea2bda06b64e90b24a6fed253d5 100644 (file)
@@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
 
 long __machine_check_early_realmode_p9(struct pt_regs *regs)
 {
+       /*
+        * On POWER9 DD2.1 and below, it's possible to get a machine check
+        * caused by a paste instruction where only DSISR bit 25 is set. This
+        * will result in the MCE handler seeing an unknown event and the kernel
+        * crashing. An MCE that occurs like this is spurious, so we don't need
+        * to do anything in terms of servicing it. If there is something that
+        * needs to be serviced, the CPU will raise the MCE again with the
+        * correct DSISR so that it can be serviced properly. So detect this
+        * case and mark it as handled.
+        */
+       if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
+               return 1;
+
        return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
 }
index 6f8273f5e988b2cca6f320299ca6c67445ebca66..91e037ab20a197de398161c057d86bffabc3a68a 100644 (file)
@@ -104,8 +104,10 @@ static unsigned long can_optimize(struct kprobe *p)
         * and that can be emulated.
         */
        if (!is_conditional_branch(*p->ainsn.insn) &&
-                       analyse_instr(&op, &regs, *p->ainsn.insn))
+                       analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
+               emulate_update_regs(&regs, &op);
                nip = regs.nip;
+       }
 
        return nip;
 }
index 07cd22e354053597d990bf57457164bbb6f02b9c..f52ad5bb710960906b8ae61400688845e2811dd5 100644 (file)
@@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
         * in the appropriate thread structures from live.
         */
 
-       if (tsk != current)
+       if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
                return;
 
        if (MSR_TM_SUSPENDED(mfmsr())) {
index 0ac741fae90ea5c1cffc227323fe40cbb5319188..2e3bc16d02b289b5d03906010518bc7005233452 100644 (file)
@@ -904,9 +904,6 @@ void __init setup_arch(char **cmdline_p)
 #endif
 #endif
 
-#ifdef CONFIG_PPC_64K_PAGES
-       init_mm.context.pte_frag = NULL;
-#endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
        mm_iommu_init(&init_mm);
 #endif
index c83c115858c1909218c5897a23f15f0bbd7b10ed..b2c002993d78d340db6ef882d5eeec6692577475 100644 (file)
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
        if (MSR_TM_RESV(msr))
                return -EINVAL;
 
-       /* pull in MSR TM from user context */
+       /* pull in MSR TS bits from user context */
        regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
 
+       /*
+        * Ensure that TM is enabled in regs->msr before we leave the signal
+        * handler. It could be the case that (a) user disabled the TM bit
+        * through the manipulation of the MSR bits in uc_mcontext or (b) the
+        * TM bit was disabled because a sufficient number of context switches
+        * happened whilst in the signal handler and load_tm overflowed,
+        * disabling the TM bit. In either case we can end up with an illegal
+        * TM state leading to a TM Bad Thing when we return to userspace.
+        */
+       regs->msr |= MSR_TM;
+
        /* pull in MSR LE from user context */
        regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
index c98e90b4ea7b1f15a2dd7157300376e370774cf2..b4e2b7165f79b0d45da686ea8fe77da4dca109ca 100644 (file)
@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
         *  - we have no stack frame and can not allocate one
         *  - LR points back to the original caller (in A)
         *  - CTR holds the new NIP in C
-        *  - r0 & r12 are free
-        *
-        * r0 can't be used as the base register for a DS-form load or store, so
-        * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+        *  - r0, r11 & r12 are free
         */
 livepatch_handler:
        CURRENT_THREAD_INFO(r12, r1)
 
-       /* Save stack pointer into r0 */
-       mr      r0, r1
-
        /* Allocate 3 x 8 bytes */
-       ld      r1, TI_livepatch_sp(r12)
-       addi    r1, r1, 24
-       std     r1, TI_livepatch_sp(r12)
+       ld      r11, TI_livepatch_sp(r12)
+       addi    r11, r11, 24
+       std     r11, TI_livepatch_sp(r12)
 
        /* Save toc & real LR on livepatch stack */
-       std     r2,  -24(r1)
+       std     r2,  -24(r11)
        mflr    r12
-       std     r12, -16(r1)
+       std     r12, -16(r11)
 
        /* Store stack end marker */
        lis     r12, STACK_END_MAGIC@h
        ori     r12, r12, STACK_END_MAGIC@l
-       std     r12, -8(r1)
-
-       /* Restore real stack pointer */
-       mr      r1, r0
+       std     r12, -8(r11)
 
        /* Put ctr in r12 for global entry and branch there */
        mfctr   r12
@@ -216,36 +207,30 @@ livepatch_handler:
 
        /*
         * Now we are returning from the patched function to the original
-        * caller A. We are free to use r0 and r12, and we can use r2 until we
+        * caller A. We are free to use r11, r12 and we can use r2 until we
         * restore it.
         */
 
        CURRENT_THREAD_INFO(r12, r1)
 
-       /* Save stack pointer into r0 */
-       mr      r0, r1
-
-       ld      r1, TI_livepatch_sp(r12)
+       ld      r11, TI_livepatch_sp(r12)
 
        /* Check stack marker hasn't been trashed */
        lis     r2,  STACK_END_MAGIC@h
        ori     r2,  r2, STACK_END_MAGIC@l
-       ld      r12, -8(r1)
+       ld      r12, -8(r11)
 1:     tdne    r12, r2
        EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
 
        /* Restore LR & toc from livepatch stack */
-       ld      r12, -16(r1)
+       ld      r12, -16(r11)
        mtlr    r12
-       ld      r2,  -24(r1)
+       ld      r2,  -24(r11)
 
        /* Pop livepatch stack frame */
-       CURRENT_THREAD_INFO(r12, r0)
-       subi    r1, r1, 24
-       std     r1, TI_livepatch_sp(r12)
-
-       /* Restore real stack pointer */
-       mr      r1, r0
+       CURRENT_THREAD_INFO(r12, r1)
+       subi    r11, r11, 24
+       std     r11, TI_livepatch_sp(r12)
 
        /* Return to original caller of live patched function */
        blr
index ec74e203ee04e5b8e7e307df28c133b6226cd487..13c9dcdcba6922e32d4c00267201f47e9498f805 100644 (file)
@@ -437,6 +437,7 @@ static inline int check_io_access(struct pt_regs *regs)
 int machine_check_e500mc(struct pt_regs *regs)
 {
        unsigned long mcsr = mfspr(SPRN_MCSR);
+       unsigned long pvr = mfspr(SPRN_PVR);
        unsigned long reason = mcsr;
        int recoverable = 1;
 
@@ -478,8 +479,15 @@ int machine_check_e500mc(struct pt_regs *regs)
                 * may still get logged and cause a machine check.  We should
                 * only treat the non-write shadow case as non-recoverable.
                 */
-               if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
-                       recoverable = 0;
+               /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
+                * is not implemented but L1 data cache always runs in write
+                * shadow mode. Hence on data cache parity errors HW will
+                * automatically invalidate the L1 Data Cache.
+                */
+               if (PVR_VER(pvr) != PVR_VER_E6500) {
+                       if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
+                               recoverable = 0;
+               }
        }
 
        if (reason & MCSR_L2MMU_MHIT) {
index 2f6eadd9408d9490b171ca20f937c1af79096acd..c702a898145250aa4e4da3a88bd32a960ad4fb78 100644 (file)
@@ -310,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu)
        if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
                return 0;
 
-       if (watchdog_suspended)
-               return 0;
-
        if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
                return 0;
 
@@ -358,36 +355,39 @@ static void watchdog_calc_timeouts(void)
        wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
 }
 
-void watchdog_nmi_reconfigure(void)
+void watchdog_nmi_stop(void)
 {
        int cpu;
 
-       watchdog_calc_timeouts();
-
        for_each_cpu(cpu, &wd_cpus_enabled)
                stop_wd_on_cpu(cpu);
+}
 
+void watchdog_nmi_start(void)
+{
+       int cpu;
+
+       watchdog_calc_timeouts();
        for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
                start_wd_on_cpu(cpu);
 }
 
 /*
- * This runs after lockup_detector_init() which sets up watchdog_cpumask.
+ * Invoked from core watchdog init.
  */
-static int __init powerpc_watchdog_init(void)
+int __init watchdog_nmi_probe(void)
 {
        int err;
 
-       watchdog_calc_timeouts();
-
-       err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
-                               start_wd_on_cpu, stop_wd_on_cpu);
-       if (err < 0)
+       err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                       "powerpc/watchdog:online",
+                                       start_wd_on_cpu, stop_wd_on_cpu);
+       if (err < 0) {
                pr_warn("Watchdog could not be initialized");
-
+               return err;
+       }
        return 0;
 }
-arch_initcall(powerpc_watchdog_init);
 
 static void handle_backtrace_ipi(struct pt_regs *regs)
 {
index 8f2da8bba737b066488a5c0b3909a02ede239ba6..4dffa611376d67850ac4ef8730a547fcccf63491 100644 (file)
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                return ret;
 
        dir = iommu_tce_direction(tce);
+
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
        if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
-                       tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
-               return H_PARAMETER;
+                       tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
+               ret = H_PARAMETER;
+               goto unlock_exit;
+       }
 
        entry = ioba >> stt->page_shift;
 
        list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
-               if (dir == DMA_NONE) {
+               if (dir == DMA_NONE)
                        ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
                                        stit->tbl, entry);
-               } else {
-                       idx = srcu_read_lock(&vcpu->kvm->srcu);
+               else
                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
                                        entry, ua, dir);
-                       srcu_read_unlock(&vcpu->kvm->srcu, idx);
-               }
 
                if (ret == H_SUCCESS)
                        continue;
 
                if (ret == H_TOO_HARD)
-                       return ret;
+                       goto unlock_exit;
 
                WARN_ON_ONCE(1);
                kvmppc_clear_tce(stit->tbl, entry);
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 
        kvmppc_tce_put(stt, entry, tce);
 
-       return H_SUCCESS;
+unlock_exit:
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 
index 17936f82d3c787c38327f41186f634d043d843a9..42639fba89e881c12e0d9ba5af3bfa1139565688 100644 (file)
@@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
        beq     no_xive
        ld      r11, VCPU_XIVE_SAVED_STATE(r4)
        li      r9, TM_QW1_OS
-       stdcix  r11,r9,r10
        eieio
+       stdcix  r11,r9,r10
        lwz     r11, VCPU_XIVE_CAM_WORD(r4)
        li      r9, TM_QW1_OS + TM_WORD2
        stwcix  r11,r9,r10
        li      r9, 1
        stw     r9, VCPU_XIVE_PUSHED(r4)
+       eieio
 no_xive:
 #endif /* CONFIG_KVM_XICS */
 
@@ -1121,6 +1122,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 BEGIN_FTR_SECTION
        mtspr   SPRN_PPR, r0
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+/* Move canary into DSISR to check for later */
+BEGIN_FTR_SECTION
+       li      r0, 0x7fff
+       mtspr   SPRN_HDSISR, r0
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
        ld      r0, VCPU_GPR(R0)(r4)
        ld      r4, VCPU_GPR(R4)(r4)
 
@@ -1303,6 +1311,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        bne     3f
 BEGIN_FTR_SECTION
        PPC_MSGSYNC
+       lwsync
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        lbz     r0, HSTATE_HOST_IPI(r13)
        cmpwi   r0, 0
@@ -1393,8 +1402,8 @@ guest_exit_cont:          /* r9 = vcpu, r12 = trap, r13 = paca */
        cmpldi  cr0, r10, 0
        beq     1f
        /* First load to pull the context, we ignore the value */
-       lwzx    r11, r7, r10
        eieio
+       lwzx    r11, r7, r10
        /* Second load to recover the context state (Words 0 and 1) */
        ldx     r11, r6, r10
        b       3f
@@ -1402,8 +1411,8 @@ guest_exit_cont:          /* r9 = vcpu, r12 = trap, r13 = paca */
        cmpldi  cr0, r10, 0
        beq     1f
        /* First load to pull the context, we ignore the value */
-       lwzcix  r11, r7, r10
        eieio
+       lwzcix  r11, r7, r10
        /* Second load to recover the context state (Words 0 and 1) */
        ldcix   r11, r6, r10
 3:     std     r11, VCPU_XIVE_SAVED_STATE(r9)
@@ -1413,6 +1422,7 @@ guest_exit_cont:          /* r9 = vcpu, r12 = trap, r13 = paca */
        stw     r10, VCPU_XIVE_PUSHED(r9)
        stb     r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
        stb     r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
+       eieio
 1:
 #endif /* CONFIG_KVM_XICS */
        /* Save more register state  */
@@ -1956,9 +1966,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
 kvmppc_hdsi:
        ld      r3, VCPU_KVM(r9)
        lbz     r0, KVM_RADIX(r3)
-       cmpwi   r0, 0
        mfspr   r4, SPRN_HDAR
        mfspr   r6, SPRN_HDSISR
+BEGIN_FTR_SECTION
+       /* Look for DSISR canary. If we find it, retry instruction */
+       cmpdi   r6, 0x7fff
+       beq     6f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+       cmpwi   r0, 0
        bne     .Lradix_hdsi            /* on radix, just save DAR/DSISR/ASDR */
        /* HPTE not found fault or protection fault? */
        andis.  r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
@@ -2776,6 +2791,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        PPC_MSGCLR(6)
        /* see if it's a host IPI */
        li      r3, 1
+BEGIN_FTR_SECTION
+       PPC_MSGSYNC
+       lwsync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        lbz     r0, HSTATE_HOST_IPI(r13)
        cmpwi   r0, 0
        bnelr
index 13304622ab1c78682fa18f06120f5eb0970f57fb..bf457843e03217b9aa02815d7791f0fce72aea2b 100644 (file)
@@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
                return -EINVAL;
        state = &sb->irq_state[idx];
        arch_spin_lock(&sb->lock);
-       *server = state->guest_server;
+       *server = state->act_server;
        *priority = state->guest_priority;
        arch_spin_unlock(&sb->lock);
 
@@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
        xive->saved_src_count++;
 
        /* Convert saved state into something compatible with xics */
-       val = state->guest_server;
+       val = state->act_server;
        prio = state->saved_scan_prio;
 
        if (prio == MASKED) {
@@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
        /* First convert prio and mark interrupt as untargetted */
        act_prio = xive_prio_from_guest(guest_prio);
        state->act_priority = MASKED;
-       state->guest_server = server;
 
        /*
         * We need to drop the lock due to the mutex below. Hopefully
index 5938f7644dc17587dc811132459cf8b71425856a..6ba63f8e8a614ed4aad2a5890236834f3e08c0a7 100644 (file)
@@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state {
        struct xive_irq_data *pt_data;  /* XIVE Pass-through associated data */
 
        /* Targetting as set by guest */
-       u32 guest_server;               /* Current guest selected target */
        u8 guest_priority;              /* Guest set priority */
        u8 saved_priority;              /* Saved priority when masking */
 
index 3480faaf1ef886118ba1ee26389ea25c4c3d8a69..ee279c7f48021e0b43c658d7529b9160061b5415 100644 (file)
@@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
 #endif
        case KVM_CAP_PPC_HTM:
-               r = cpu_has_feature(CPU_FTR_TM_COMP) &&
-                   is_kvmppc_hv_enabled(kvm);
+               r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
                break;
        default:
                r = 0;
index fb9f58b868e76ef0692e45fe874b6546cf74de37..f208f560aecd086b579ace960a78f25123974abd 100644 (file)
@@ -944,9 +944,9 @@ NOKPROBE_SYMBOL(emulate_dcbz);
                : "r" (addr), "i" (-EFAULT), "0" (err))
 
 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
-                                   struct instruction_op *op, int rd)
+                                   struct instruction_op *op)
 {
-       long val = regs->gpr[rd];
+       long val = op->val;
 
        op->type |= SETCC;
        op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
@@ -1326,7 +1326,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
        case 13:        /* addic. */
                imm = (short) instr;
                add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
-               set_cr0(regs, op, rd);
+               set_cr0(regs, op);
                return 1;
 
        case 14:        /* addi */
@@ -1397,13 +1397,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
 
        case 28:        /* andi. */
                op->val = regs->gpr[rd] & (unsigned short) instr;
-               set_cr0(regs, op, ra);
+               set_cr0(regs, op);
                goto logical_done_nocc;
 
        case 29:        /* andis. */
                imm = (unsigned short) instr;
                op->val = regs->gpr[rd] & (imm << 16);
-               set_cr0(regs, op, ra);
+               set_cr0(regs, op);
                goto logical_done_nocc;
 
 #ifdef __powerpc64__
@@ -1513,10 +1513,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
                        op->type = COMPUTE + SETCC;
                        imm = 0xf0000000UL;
                        val = regs->gpr[rd];
-                       op->val = regs->ccr;
+                       op->ccval = regs->ccr;
                        for (sh = 0; sh < 8; ++sh) {
                                if (instr & (0x80000 >> sh))
-                                       op->val = (op->val & ~imm) |
+                                       op->ccval = (op->ccval & ~imm) |
                                                (val & imm);
                                imm >>= 4;
                        }
@@ -1651,8 +1651,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
                        goto arith_done;
 
                case 235:       /* mullw */
-                       op->val = (unsigned int) regs->gpr[ra] *
-                               (unsigned int) regs->gpr[rb];
+                       op->val = (long)(int) regs->gpr[ra] *
+                               (int) regs->gpr[rb];
+
                        goto arith_done;
 
                case 266:       /* add */
@@ -1683,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
  * Logical instructions
  */
                case 26:        /* cntlzw */
-                       op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
+                       val = (unsigned int) regs->gpr[rd];
+                       op->val = ( val ? __builtin_clz(val) : 32 );
                        goto logical_done;
 #ifdef __powerpc64__
                case 58:        /* cntlzd */
-                       op->val = __builtin_clzl(regs->gpr[rd]);
+                       val = regs->gpr[rd];
+                       op->val = ( val ? __builtin_clzl(val) : 64 );
                        goto logical_done;
 #endif
                case 28:        /* and */
@@ -2526,7 +2529,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
 
  logical_done:
        if (instr & 1)
-               set_cr0(regs, op, ra);
+               set_cr0(regs, op);
  logical_done_nocc:
        op->reg = ra;
        op->type |= SETREG;
@@ -2534,7 +2537,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
 
  arith_done:
        if (instr & 1)
-               set_cr0(regs, op, rd);
+               set_cr0(regs, op);
  compute_done:
        op->reg = rd;
        op->type |= SETREG;
index b95c584ce19d33b658a9a546ecb3de64ff93d23a..a51df9ef529d9356cb54c75721510e50cfa69b8d 100644 (file)
@@ -1438,7 +1438,6 @@ out:
 
 int arch_update_cpu_topology(void)
 {
-       lockdep_assert_cpus_held();
        return numa_update_cpu_topology(true);
 }
 
index 65eda1997c3f8750c899294bc1d9bb15215d9426..f6c7f54c05157e226c5426798b6fa0f379f8becb 100644 (file)
@@ -361,9 +361,9 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
                        break;
        }
        wmb();
+       local_irq_restore(flags);
        flush_tlb_kernel_range((unsigned long)page_address(start),
                               (unsigned long)page_address(page));
-       local_irq_restore(flags);
        return err;
 }
 
index 2e3eb7431571603fc9cd2452df25a32c6cd4c6da..9e3da168d54cdcd36e3911ff040ff2ad187c92b7 100644 (file)
@@ -793,6 +793,11 @@ void perf_event_print_debug(void)
        u32 pmcs[MAX_HWEVENTS];
        int i;
 
+       if (!ppmu) {
+               pr_info("Performance monitor hardware not registered.\n");
+               return;
+       }
+
        if (!ppmu->n_counter)
                return;
 
index 9ccac86f346385eaa624930f3050cdae62b6b09d..88126245881b3f8e500ab099bbfa4ebba8bf03d9 100644 (file)
@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
 
        /* Take the mutex lock for this node and then decrement the reference count */
        mutex_lock(&ref->lock);
+       if (ref->refc == 0) {
+               /*
+                * The scenario where this is true is, when perf session is
+                * started, followed by offlining of all cpus in a given node.
+                *
+                * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
+                * function set the ref->count to zero, if the cpu which is
+                * about to offline is the last cpu in a given node and make
+                * an OPAL call to disable the engine in that node.
+                *
+                */
+               mutex_unlock(&ref->lock);
+               return;
+       }
        ref->refc--;
        if (ref->refc == 0) {
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
 
        /* We need only vbase for core counters */
        mem_info->vbase = page_address(alloc_pages_node(phys_id,
-                                         GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
-                                         get_order(size)));
+                                         GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                                         __GFP_NOWARN, get_order(size)));
        if (!mem_info->vbase)
                return -ENOMEM;
 
@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
                return;
 
        mutex_lock(&ref->lock);
+       if (ref->refc == 0) {
+               /*
+                * The scenario where this is true is, when perf session is
+                * started, followed by offlining of all cpus in a given core.
+                *
+                * In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
+                * function set the ref->count to zero, if the cpu which is
+                * about to offline is the last cpu in a given core and make
+                * an OPAL call to disable the engine in that core.
+                *
+                */
+               mutex_unlock(&ref->lock);
+               return;
+       }
        ref->refc--;
        if (ref->refc == 0) {
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
                 * free the memory in cpu offline path.
                 */
                local_mem = page_address(alloc_pages_node(phys_id,
-                                 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
-                                 get_order(size)));
+                                 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                                 __GFP_NOWARN, get_order(size)));
                if (!local_mem)
                        return -ENOMEM;
 
@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
        }
 
        /* Only free the attr_groups which are dynamically allocated  */
-       kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
+       if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
+               kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
        kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
        kfree(pmu_ptr);
        return;
index 9f59041a172bbfdea270ccb4139e66948bc3354f..443d5ca719958e5170374edbcaaf7d2cc52b8104 100644 (file)
@@ -393,7 +393,13 @@ static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
        u64 pir = get_hard_smp_processor_id(cpu);
 
        mtspr(SPRN_LPCR, lpcr_val);
-       opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
+
+       /*
+        * Program the LPCR via stop-api only if the deepest stop state
+        * can lose hypervisor context.
+        */
+       if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
+               opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
 }
 
 /*
index 897aa1400eb833e944fabbc65840904006267a1f..bbb73aa0eb8f041630110c795828215471cc5f2d 100644 (file)
@@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 static unsigned long pnv_memory_block_size(void)
 {
-       return 256UL * 1024 * 1024;
+       /*
+        * We map the kernel linear region with 1GB large pages on radix. For
+        * memory hot unplug to work our memory block size must be at least
+        * this size.
+        */
+       if (radix_enabled())
+               return 1UL * 1024 * 1024 * 1024;
+       else
+               return 256UL * 1024 * 1024;
 }
 #endif
 
index 783f3636469093034ae26a23856fef6aaf1922a6..e45b5f10645ae54b6610298a0a03c07f7e5d0966 100644 (file)
@@ -266,7 +266,6 @@ int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
                return rc;
        }
 
-       of_node_put(dn->parent);
        return 0;
 }
 
index fc0d8f97c03a2f243a51c9ba27c7043626bd95cc..fadb95efbb9e1856ed8a7182cf870dcf3a5f5331 100644 (file)
@@ -462,15 +462,19 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
        }
 
        dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
-       of_node_put(parent);
        if (!dn) {
                pr_warn("Failed call to configure-connector, drc index: %x\n",
                        drc_index);
                dlpar_release_drc(drc_index);
+               of_node_put(parent);
                return -EINVAL;
        }
 
        rc = dlpar_attach_node(dn, parent);
+
+       /* Regardless we are done with parent now */
+       of_node_put(parent);
+
        if (rc) {
                saved_rc = rc;
                pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
index 210ce632d63e1e47c2796acd767e3bf610da7439..f7042ad492bafba5ac21e3db9ce24977fd527169 100644 (file)
@@ -226,8 +226,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
                return -ENOENT;
 
        dn = dlpar_configure_connector(drc_index, parent_dn);
-       if (!dn)
+       if (!dn) {
+               of_node_put(parent_dn);
                return -ENOENT;
+       }
 
        rc = dlpar_attach_node(dn, parent_dn);
        if (rc)
index 9234be1e66f556b129148efec300c1f5a4f24239..5011ffea4e4b3a8d3c69160bb1bb896cdeb8cb1f 100644 (file)
@@ -71,6 +71,8 @@
 #define RIWAR_WRTYP_ALLOC      0x00006000
 #define RIWAR_SIZE_MASK                0x0000003F
 
+static DEFINE_SPINLOCK(fsl_rio_config_lock);
+
 #define __fsl_read_rio_config(x, addr, err, op)                \
        __asm__ __volatile__(                           \
                "1:     "op" %1,0(%2)\n"                \
@@ -184,6 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
                        u8 hopcount, u32 offset, int len, u32 *val)
 {
        struct rio_priv *priv = mport->priv;
+       unsigned long flags;
        u8 *data;
        u32 rval, err = 0;
 
@@ -197,6 +200,8 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
        if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
                return -EINVAL;
 
+       spin_lock_irqsave(&fsl_rio_config_lock, flags);
+
        out_be32(&priv->maint_atmu_regs->rowtar,
                 (destid << 22) | (hopcount << 12) | (offset >> 12));
        out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
@@ -213,6 +218,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
                __fsl_read_rio_config(rval, data, err, "lwz");
                break;
        default:
+               spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
                return -EINVAL;
        }
 
@@ -221,6 +227,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
                         err, destid, hopcount, offset);
        }
 
+       spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
        *val = rval;
 
        return err;
@@ -244,7 +251,10 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
                        u8 hopcount, u32 offset, int len, u32 val)
 {
        struct rio_priv *priv = mport->priv;
+       unsigned long flags;
        u8 *data;
+       int ret = 0;
+
        pr_debug
                ("fsl_rio_config_write:"
                " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
@@ -255,6 +265,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
        if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
                return -EINVAL;
 
+       spin_lock_irqsave(&fsl_rio_config_lock, flags);
+
        out_be32(&priv->maint_atmu_regs->rowtar,
                 (destid << 22) | (hopcount << 12) | (offset >> 12));
        out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
@@ -271,10 +283,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
                out_be32((u32 *) data, val);
                break;
        default:
-               return -EINVAL;
+               ret = -EINVAL;
        }
+       spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
 
-       return 0;
+       return ret;
 }
 
 static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
index ab7a74c75be8f6a8da71c77ad027cf16bb5eac3b..88b35a3dcdc599b01c37da944ba3f8481215d6cc 100644 (file)
 
 #define DOORBELL_MESSAGE_SIZE  0x08
 
+static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
+
 struct rio_msg_regs {
        u32 omr;
        u32 osr;
@@ -626,9 +628,13 @@ err_out:
 int fsl_rio_doorbell_send(struct rio_mport *mport,
                                int index, u16 destid, u16 data)
 {
+       unsigned long flags;
+
        pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
                 index, destid, data);
 
+       spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
+
        /* In the serial version silicons, such as MPC8548, MPC8641,
         * below operations is must be.
         */
@@ -638,6 +644,8 @@ int fsl_rio_doorbell_send(struct rio_mport *mport,
        out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
        out_be32(&dbell->dbell_regs->odmr, 0x00000001);
 
+       spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
+
        return 0;
 }
 
index f387318678b97abdfa8e57fe46eade5a707e566d..a3b8d7d1316eb1863f19ffa19ff34f0c761ada33 100644 (file)
@@ -1402,6 +1402,14 @@ void xive_teardown_cpu(void)
 
        if (xive_ops->teardown_cpu)
                xive_ops->teardown_cpu(cpu, xc);
+
+#ifdef CONFIG_SMP
+       /* Get rid of IPI */
+       xive_cleanup_cpu_ipi(cpu, xc);
+#endif
+
+       /* Disable and free the queues */
+       xive_cleanup_cpu_queues(cpu, xc);
 }
 
 void xive_kexec_teardown_cpu(int secondary)
index f24a70bc6855d54cf86a50d17fefebc9777e2ed0..d9c4c93660491849029044d37ab8e60160b0d7de 100644 (file)
@@ -431,7 +431,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
 
 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
 {
+       if (!xc->hw_ipi)
+               return;
+
        xive_irq_bitmap_free(xc->hw_ipi);
+       xc->hw_ipi = 0;
 }
 #endif /* CONFIG_SMP */
 
index afa46a7406eaeddbbf70bbaf384f49ffa0535874..04e042edbab760f13a2da86a0cab071346bf2ab5 100644 (file)
@@ -27,6 +27,7 @@ CONFIG_NET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_RAM=y
 # CONFIG_BLK_DEV_XPRAM is not set
 # CONFIG_DCSSBLK is not set
 # CONFIG_DASD is not set
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
index dce708e061eac79a56b655eb71bb6bb63f3ce978..20e75a2ca93a283f6413ca6a8ee40ff280e3c74d 100644 (file)
@@ -1507,7 +1507,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
 static inline void pmdp_invalidate(struct vm_area_struct *vma,
                                   unsigned long addr, pmd_t *pmdp)
 {
-       pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+       pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+
+       pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
 }
 
 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
index ca8cd80e8feb7c6f01be173a76471420020e2bb6..60181caf8e8ad332029212e82b07c561cb44395a 100644 (file)
@@ -404,18 +404,6 @@ static inline void save_vector_registers(void)
 #endif
 }
 
-static int __init topology_setup(char *str)
-{
-       bool enabled;
-       int rc;
-
-       rc = kstrtobool(str, &enabled);
-       if (!rc && !enabled)
-               S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY;
-       return rc;
-}
-early_param("topology", topology_setup);
-
 static int __init disable_vector_extension(char *str)
 {
        S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
index 21900e1cee9c2e4f35c5bfc5bcb125b303a0d067..d185aa3965bfd0652dfb83f8e07f33939526eb30 100644 (file)
@@ -521,12 +521,15 @@ ENTRY(pgm_check_handler)
        tmhh    %r8,0x0001              # test problem state bit
        jnz     2f                      # -> fault in user space
 #if IS_ENABLED(CONFIG_KVM)
-       # cleanup critical section for sie64a
+       # cleanup critical section for program checks in sie64a
        lgr     %r14,%r9
        slg     %r14,BASED(.Lsie_critical_start)
        clg     %r14,BASED(.Lsie_critical_length)
        jhe     0f
-       brasl   %r14,.Lcleanup_sie
+       lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
+       ni      __SIE_PROG0C+3(%r14),0xfe       # no longer in SIE
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
+       larl    %r9,sie_exit                    # skip forward to sie_exit
 #endif
 0:     tmhh    %r8,0x4000              # PER bit set in old PSW ?
        jnz     1f                      # -> enabled, can't be a double fault
index c1bf75ffb8756549b1c6a5c97908ff49dd2e79aa..7e1e40323b78e1bb7a3910e1191dfbe40027b748 100644 (file)
@@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
        }
 
        /* Check online status of the CPU to which the event is pinned */
-       if ((unsigned int)event->cpu >= nr_cpumask_bits ||
-           (event->cpu >= 0 && !cpu_online(event->cpu)))
-               return -ENODEV;
+       if (event->cpu >= 0) {
+               if ((unsigned int)event->cpu >= nr_cpumask_bits)
+                       return -ENODEV;
+               if (!cpu_online(event->cpu))
+                       return -ENODEV;
+       }
 
        /* Force reset of idle/hv excludes regardless of what the
         * user requested.
index 1cee6753d47a5cba115730cb73cef4324e8d3ed1..495ff6959dec76a904ee2f6ccc30f8ef7809960d 100644 (file)
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
        lc->lpp = LPP_MAGIC;
        lc->current_pid = tsk->pid;
        lc->user_timer = tsk->thread.user_timer;
+       lc->guest_timer = tsk->thread.guest_timer;
        lc->system_timer = tsk->thread.system_timer;
+       lc->hardirq_timer = tsk->thread.hardirq_timer;
+       lc->softirq_timer = tsk->thread.softirq_timer;
        lc->steal_timer = 0;
 }
 
index bb47c92476f0d852ded0b9b50f9756e46480e9b9..ed0bdd220e1a643788655fbefaef328dd38c7951 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/workqueue.h>
 #include <linux/bootmem.h>
+#include <linux/uaccess.h>
+#include <linux/sysctl.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #define PTF_VERTICAL   (1UL)
 #define PTF_CHECK      (2UL)
 
+enum {
+       TOPOLOGY_MODE_HW,
+       TOPOLOGY_MODE_SINGLE,
+       TOPOLOGY_MODE_PACKAGE,
+       TOPOLOGY_MODE_UNINITIALIZED
+};
+
 struct mask_info {
        struct mask_info *next;
        unsigned char id;
        cpumask_t mask;
 };
 
+static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
 static void set_topology_timer(void);
 static void topology_work_fn(struct work_struct *work);
 static struct sysinfo_15_1_x *tl_info;
@@ -59,11 +69,26 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
        cpumask_t mask;
 
        cpumask_copy(&mask, cpumask_of(cpu));
-       if (!MACHINE_HAS_TOPOLOGY)
-               return mask;
-       for (; info; info = info->next) {
-               if (cpumask_test_cpu(cpu, &info->mask))
-                       return info->mask;
+       switch (topology_mode) {
+       case TOPOLOGY_MODE_HW:
+               while (info) {
+                       if (cpumask_test_cpu(cpu, &info->mask)) {
+                               mask = info->mask;
+                               break;
+                       }
+                       info = info->next;
+               }
+               if (cpumask_empty(&mask))
+                       cpumask_copy(&mask, cpumask_of(cpu));
+               break;
+       case TOPOLOGY_MODE_PACKAGE:
+               cpumask_copy(&mask, cpu_present_mask);
+               break;
+       default:
+               /* fallthrough */
+       case TOPOLOGY_MODE_SINGLE:
+               cpumask_copy(&mask, cpumask_of(cpu));
+               break;
        }
        return mask;
 }
@@ -74,7 +99,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
        int i;
 
        cpumask_copy(&mask, cpumask_of(cpu));
-       if (!MACHINE_HAS_TOPOLOGY)
+       if (topology_mode != TOPOLOGY_MODE_HW)
                return mask;
        cpu -= cpu % (smp_cpu_mtid + 1);
        for (i = 0; i <= smp_cpu_mtid; i++)
@@ -184,10 +209,8 @@ static void topology_update_polarization_simple(void)
 {
        int cpu;
 
-       mutex_lock(&smp_cpu_state_mutex);
        for_each_possible_cpu(cpu)
                smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
-       mutex_unlock(&smp_cpu_state_mutex);
 }
 
 static int ptf(unsigned long fc)
@@ -223,7 +246,7 @@ int topology_set_cpu_management(int fc)
 static void update_cpu_masks(void)
 {
        struct cpu_topology_s390 *topo;
-       int cpu;
+       int cpu, id;
 
        for_each_possible_cpu(cpu) {
                topo = &cpu_topology[cpu];
@@ -231,12 +254,13 @@ static void update_cpu_masks(void)
                topo->core_mask = cpu_group_map(&socket_info, cpu);
                topo->book_mask = cpu_group_map(&book_info, cpu);
                topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
-               if (!MACHINE_HAS_TOPOLOGY) {
+               if (topology_mode != TOPOLOGY_MODE_HW) {
+                       id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
                        topo->thread_id = cpu;
                        topo->core_id = cpu;
-                       topo->socket_id = cpu;
-                       topo->book_id = cpu;
-                       topo->drawer_id = cpu;
+                       topo->socket_id = id;
+                       topo->book_id = id;
+                       topo->drawer_id = id;
                        if (cpu_present(cpu))
                                cpumask_set_cpu(cpu, &cpus_with_topology);
                }
@@ -254,6 +278,7 @@ static int __arch_update_cpu_topology(void)
        struct sysinfo_15_1_x *info = tl_info;
        int rc = 0;
 
+       mutex_lock(&smp_cpu_state_mutex);
        cpumask_clear(&cpus_with_topology);
        if (MACHINE_HAS_TOPOLOGY) {
                rc = 1;
@@ -263,6 +288,7 @@ static int __arch_update_cpu_topology(void)
        update_cpu_masks();
        if (!MACHINE_HAS_TOPOLOGY)
                topology_update_polarization_simple();
+       mutex_unlock(&smp_cpu_state_mutex);
        return rc;
 }
 
@@ -289,6 +315,11 @@ void topology_schedule_update(void)
        schedule_work(&topology_work);
 }
 
+static void topology_flush_work(void)
+{
+       flush_work(&topology_work);
+}
+
 static void topology_timer_fn(unsigned long ignored)
 {
        if (ptf(PTF_CHECK))
@@ -459,6 +490,12 @@ void __init topology_init_early(void)
        struct sysinfo_15_1_x *info;
 
        set_sched_topology(s390_topology);
+       if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
+               if (MACHINE_HAS_TOPOLOGY)
+                       topology_mode = TOPOLOGY_MODE_HW;
+               else
+                       topology_mode = TOPOLOGY_MODE_SINGLE;
+       }
        if (!MACHINE_HAS_TOPOLOGY)
                goto out;
        tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
@@ -474,12 +511,97 @@ out:
        __arch_update_cpu_topology();
 }
 
+static inline int topology_get_mode(int enabled)
+{
+       if (!enabled)
+               return TOPOLOGY_MODE_SINGLE;
+       return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
+}
+
+static inline int topology_is_enabled(void)
+{
+       return topology_mode != TOPOLOGY_MODE_SINGLE;
+}
+
+static int __init topology_setup(char *str)
+{
+       bool enabled;
+       int rc;
+
+       rc = kstrtobool(str, &enabled);
+       if (rc)
+               return rc;
+       topology_mode = topology_get_mode(enabled);
+       return 0;
+}
+early_param("topology", topology_setup);
+
+static int topology_ctl_handler(struct ctl_table *ctl, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       unsigned int len;
+       int new_mode;
+       char buf[2];
+
+       if (!*lenp || *ppos) {
+               *lenp = 0;
+               return 0;
+       }
+       if (!write) {
+               strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
+                       ARRAY_SIZE(buf));
+               len = strnlen(buf, ARRAY_SIZE(buf));
+               if (len > *lenp)
+                       len = *lenp;
+               if (copy_to_user(buffer, buf, len))
+                       return -EFAULT;
+               goto out;
+       }
+       len = *lenp;
+       if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
+               return -EFAULT;
+       if (buf[0] != '0' && buf[0] != '1')
+               return -EINVAL;
+       mutex_lock(&smp_cpu_state_mutex);
+       new_mode = topology_get_mode(buf[0] == '1');
+       if (topology_mode != new_mode) {
+               topology_mode = new_mode;
+               topology_schedule_update();
+       }
+       mutex_unlock(&smp_cpu_state_mutex);
+       topology_flush_work();
+out:
+       *lenp = len;
+       *ppos += len;
+       return 0;
+}
+
+static struct ctl_table topology_ctl_table[] = {
+       {
+               .procname       = "topology",
+               .mode           = 0644,
+               .proc_handler   = topology_ctl_handler,
+       },
+       { },
+};
+
+static struct ctl_table topology_dir_table[] = {
+       {
+               .procname       = "s390",
+               .maxlen         = 0,
+               .mode           = 0555,
+               .child          = topology_ctl_table,
+       },
+       { },
+};
+
 static int __init topology_init(void)
 {
        if (MACHINE_HAS_TOPOLOGY)
                set_topology_timer();
        else
                topology_update_polarization_simple();
+       register_sysctl_table(topology_dir_table);
        return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);
index 8ecc25e760fa6dfba0f082cfd8dff38b8c33d9e9..98ffe3ee9411ad23798c7ef896783ebd4d6edcfc 100644 (file)
@@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
 static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
-       unsigned long mask, result;
        struct page *head, *page;
+       unsigned long mask;
        int refs;
 
-       result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
-       mask = result | _SEGMENT_ENTRY_INVALID;
-       if ((pmd_val(pmd) & mask) != result)
+       mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
+       if ((pmd_val(pmd) & mask) != 0)
                return 0;
        VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
 
index 18e0377f72bbb9ad8f559b00fc2452532d0ad312..88ce1e22237b2b9a22f100e753d00456699ece47 100644 (file)
@@ -136,10 +136,6 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned lo
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm)   do { } while(0)
-#define release_segments(mm)   do { } while(0)
-
 /*
  * FPU lazy state save handling.
  */
index eedd4f625d07650e1f392a37b45e22d765e51602..777a16318aff1fd41df535ccc91453d7d91d318f 100644 (file)
@@ -170,10 +170,6 @@ struct mm_struct;
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm)   do { } while (0)
-#define release_segments(mm)   do { } while (0)
-#define forget_segments()      do { } while (0)
 /*
  * FPU lazy state save handling.
  */
index 4d1ef6d74bd6050cb34d0f2fdd91d114fb0baa96..2ae0e938b657a109963dc2059b401f02a673d4fc 100644 (file)
@@ -43,9 +43,7 @@ enum {
        GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4,
        GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0,
 
-       /* Port H */
-       GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4,
-       GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0,
+       /* Port H - Port H does not have a Data Register */
 
        /* Port I - not on device */
 
index 2a0ca8780f0d8c343fb87e434cf6acec73acaddb..13c495a9fc0007b414bb8f8e36c0e9618e356c3e 100644 (file)
@@ -45,9 +45,7 @@ enum {
        GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4,
        GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0,
 
-       /* Port H */
-       GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4,
-       GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0,
+       /* Port H - Port H does not have a Data Register */
 
        /* Port I - not on device */
 
index 3bb74e534d0f8ca44208827d762e9dec03b2525a..78961ab78a5a9c83320706171123e59115d00ba5 100644 (file)
@@ -67,7 +67,7 @@ enum {
        GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0,
 
        /* PTQ */
-       GPIO_PTQ7, GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4,
+       GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4,
        GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0,
 
        /* PTR */
index 5340f3bc1863c3890dde9a8286e05d111a8393ed..b40fb541e72a78ae18002aa79702fa2e72e298b7 100644 (file)
@@ -40,7 +40,7 @@ enum {
 
        /* PTJ */
        GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3,
-       GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, GPIO_PTJ7_RESV,
+       GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6,
 
        /* PTK */
        GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3,
@@ -48,7 +48,7 @@ enum {
 
        /* PTL */
        GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3,
-       GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, GPIO_PTL7_RESV,
+       GPIO_PTL4, GPIO_PTL5, GPIO_PTL6,
 
        /* PTM */
        GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3,
@@ -56,7 +56,7 @@ enum {
 
        /* PTN */
        GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3,
-       GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, GPIO_PTN7_RESV,
+       GPIO_PTN4, GPIO_PTN5, GPIO_PTN6,
 
        /* PTO */
        GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3,
@@ -68,7 +68,7 @@ enum {
 
        /* PTQ */
        GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3,
-       GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, GPIO_PTQ7_RESV,
+       GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6,
 
        /* PTR */
        GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3,
index 0be3828752e5bc9ddf33ba74c9e5b9efd03691e4..4e83f950713e9f9837da01406586613a9c07cb3f 100644 (file)
@@ -44,7 +44,6 @@ config SPARC
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
        select LOCKDEP_SMALL if LOCKDEP
-       select ARCH_WANT_RELAX_ORDER
 
 config SPARC32
        def_bool !64BIT
index 0d925fa0f0c1f270c7bb6ffcacd8f548ede225ec..9f94435cc44f455597fad4afda30fca3e5238675 100644 (file)
@@ -409,5 +409,4 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
index 149d8e8eacb83c223d186f86d342de9db21f6063..1c5bd4f8ffca34110fe68fd3ffd96e98743aef2f 100644 (file)
@@ -189,7 +189,6 @@ CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=y
 CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_ECN=m
 CONFIG_IP_NF_TARGET_TTL=m
@@ -521,7 +520,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRC_CCITT=m
 CONFIG_CRC7=m
index 6becb96c60a03c5515cb443b5cdc7c683e259433..ad83c1e66dbd23c5ba46e20c8e5deefdf6f3655e 100644 (file)
@@ -140,7 +140,7 @@ static int __init setup_maxnodemem(char *str)
 {
        char *endp;
        unsigned long long maxnodemem;
-       long node;
+       unsigned long node;
 
        node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
        if (node >= MAX_NUMNODES || *endp != ':')
index f6d1a3f747a9b58b3f33ce0ee25c3bf889985c0b..86942a492454c296bcc5cfc7f5fdd23544a46dc9 100644 (file)
@@ -58,11 +58,6 @@ static inline void release_thread(struct task_struct *task)
 {
 }
 
-static inline void mm_copy_segments(struct mm_struct *from_mm,
-                                   struct mm_struct *new_mm)
-{
-}
-
 #define init_stack     (init_thread_union.stack)
 
 /*
index 0b034ebbda2a1c2dd0125a6d4c2135dd886eb440..7f69d17de3540ca8491270408946a00654493cd2 100644 (file)
@@ -98,7 +98,7 @@ static struct clocksource timer_clocksource = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static void __init timer_setup(void)
+static void __init um_timer_setup(void)
 {
        int err;
 
@@ -132,5 +132,5 @@ void read_persistent_clock(struct timespec *ts)
 void __init time_init(void)
 {
        timer_set_signal_handler();
-       late_time_init = timer_setup;
+       late_time_init = um_timer_setup;
 }
index 246c67006ed06ad84516b3a56ceac06e05734a1c..8c1fcb6bad21f91604f0b61357db834f560b7fdb 100644 (file)
@@ -33,7 +33,7 @@
 #define s3     ((16 + 2 + (3 * 256)) * 4)
 
 /* register macros */
-#define CTX %rdi
+#define CTX %r12
 #define RIO %rsi
 
 #define RX0 %rax
 #define RX2bh %ch
 #define RX3bh %dh
 
-#define RT0 %rbp
+#define RT0 %rdi
 #define RT1 %rsi
 #define RT2 %r8
 #define RT3 %r9
 
-#define RT0d %ebp
+#define RT0d %edi
 #define RT1d %esi
 #define RT2d %r8d
 #define RT3d %r9d
 
 ENTRY(__blowfish_enc_blk)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         *      %rcx: bool, if true: xor output
         */
-       movq %rbp, %r11;
+       movq %r12, %r11;
 
+       movq %rdi, CTX;
        movq %rsi, %r10;
        movq %rdx, RIO;
 
@@ -142,7 +143,7 @@ ENTRY(__blowfish_enc_blk)
        round_enc(14);
        add_roundkey_enc(16);
 
-       movq %r11, %rbp;
+       movq %r11, %r12;
 
        movq %r10, RIO;
        test %cl, %cl;
@@ -157,12 +158,13 @@ ENDPROC(__blowfish_enc_blk)
 
 ENTRY(blowfish_dec_blk)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
-       movq %rbp, %r11;
+       movq %r12, %r11;
 
+       movq %rdi, CTX;
        movq %rsi, %r10;
        movq %rdx, RIO;
 
@@ -181,7 +183,7 @@ ENTRY(blowfish_dec_blk)
        movq %r10, RIO;
        write_block();
 
-       movq %r11, %rbp;
+       movq %r11, %r12;
 
        ret;
 ENDPROC(blowfish_dec_blk)
@@ -298,20 +300,21 @@ ENDPROC(blowfish_dec_blk)
 
 ENTRY(__blowfish_enc_blk_4way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         *      %rcx: bool, if true: xor output
         */
-       pushq %rbp;
+       pushq %r12;
        pushq %rbx;
        pushq %rcx;
 
-       preload_roundkey_enc(0);
-
+       movq %rdi, CTX
        movq %rsi, %r11;
        movq %rdx, RIO;
 
+       preload_roundkey_enc(0);
+
        read_block4();
 
        round_enc4(0);
@@ -324,39 +327,40 @@ ENTRY(__blowfish_enc_blk_4way)
        round_enc4(14);
        add_preloaded_roundkey4();
 
-       popq %rbp;
+       popq %r12;
        movq %r11, RIO;
 
-       test %bpl, %bpl;
+       test %r12b, %r12b;
        jnz .L__enc_xor4;
 
        write_block4();
 
        popq %rbx;
-       popq %rbp;
+       popq %r12;
        ret;
 
 .L__enc_xor4:
        xor_block4();
 
        popq %rbx;
-       popq %rbp;
+       popq %r12;
        ret;
 ENDPROC(__blowfish_enc_blk_4way)
 
 ENTRY(blowfish_dec_blk_4way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
-       pushq %rbp;
+       pushq %r12;
        pushq %rbx;
-       preload_roundkey_dec(17);
 
-       movq %rsi, %r11;
+       movq %rdi, CTX;
+       movq %rsi, %r11
        movq %rdx, RIO;
 
+       preload_roundkey_dec(17);
        read_block4();
 
        round_dec4(17);
@@ -373,7 +377,7 @@ ENTRY(blowfish_dec_blk_4way)
        write_block4();
 
        popq %rbx;
-       popq %rbp;
+       popq %r12;
 
        ret;
 ENDPROC(blowfish_dec_blk_4way)
index 310319c601ede2884b2d031ca4603593f304e4b3..95ba6956a7f6a1fc0b01e2938f94cee0ff6a2ac3 100644 (file)
 #define RCD1bh %dh
 
 #define RT0 %rsi
-#define RT1 %rbp
+#define RT1 %r12
 #define RT2 %r8
 
 #define RT0d %esi
-#define RT1d %ebp
+#define RT1d %r12d
 #define RT2d %r8d
 
 #define RT2bl %r8b
 
 #define RXOR %r9
-#define RRBP %r10
+#define RR12 %r10
 #define RDST %r11
 
 #define RXORd %r9d
@@ -197,7 +197,7 @@ ENTRY(__camellia_enc_blk)
         *      %rdx: src
         *      %rcx: bool xor
         */
-       movq %rbp, RRBP;
+       movq %r12, RR12;
 
        movq %rcx, RXOR;
        movq %rsi, RDST;
@@ -227,13 +227,13 @@ ENTRY(__camellia_enc_blk)
 
        enc_outunpack(mov, RT1);
 
-       movq RRBP, %rbp;
+       movq RR12, %r12;
        ret;
 
 .L__enc_xor:
        enc_outunpack(xor, RT1);
 
-       movq RRBP, %rbp;
+       movq RR12, %r12;
        ret;
 ENDPROC(__camellia_enc_blk)
 
@@ -248,7 +248,7 @@ ENTRY(camellia_dec_blk)
        movl $24, RXORd;
        cmovel RXORd, RT2d; /* max */
 
-       movq %rbp, RRBP;
+       movq %r12, RR12;
        movq %rsi, RDST;
        movq %rdx, RIO;
 
@@ -271,7 +271,7 @@ ENTRY(camellia_dec_blk)
 
        dec_outunpack();
 
-       movq RRBP, %rbp;
+       movq RR12, %r12;
        ret;
 ENDPROC(camellia_dec_blk)
 
@@ -433,7 +433,7 @@ ENTRY(__camellia_enc_blk_2way)
         */
        pushq %rbx;
 
-       movq %rbp, RRBP;
+       movq %r12, RR12;
        movq %rcx, RXOR;
        movq %rsi, RDST;
        movq %rdx, RIO;
@@ -461,14 +461,14 @@ ENTRY(__camellia_enc_blk_2way)
 
        enc_outunpack2(mov, RT2);
 
-       movq RRBP, %rbp;
+       movq RR12, %r12;
        popq %rbx;
        ret;
 
 .L__enc2_xor:
        enc_outunpack2(xor, RT2);
 
-       movq RRBP, %rbp;
+       movq RR12, %r12;
        popq %rbx;
        ret;
 ENDPROC(__camellia_enc_blk_2way)
@@ -485,7 +485,7 @@ ENTRY(camellia_dec_blk_2way)
        cmovel RXORd, RT2d; /* max */
 
        movq %rbx, RXOR;
-       movq %rbp, RRBP;
+       movq %r12, RR12;
        movq %rsi, RDST;
        movq %rdx, RIO;
 
@@ -508,7 +508,7 @@ ENTRY(camellia_dec_blk_2way)
 
        dec_outunpack2();
 
-       movq RRBP, %rbp;
+       movq RR12, %r12;
        movq RXOR, %rbx;
        ret;
 ENDPROC(camellia_dec_blk_2way)
index b4a8806234ea1c5f61f46898900a84035af72763..86107c961bb4627f08f6c7eac252589218c6c7e3 100644 (file)
@@ -47,7 +47,7 @@
 /**********************************************************************
   16-way AVX cast5
  **********************************************************************/
-#define CTX %rdi
+#define CTX %r15
 
 #define RL1 %xmm0
 #define RR1 %xmm1
@@ -70,8 +70,8 @@
 
 #define RTMP %xmm15
 
-#define RID1  %rbp
-#define RID1d %ebp
+#define RID1  %rdi
+#define RID1d %edi
 #define RID2  %rsi
 #define RID2d %esi
 
 .align 16
 __cast5_enc_blk16:
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      RL1: blocks 1 and 2
         *      RR1: blocks 3 and 4
         *      RL2: blocks 5 and 6
@@ -246,9 +246,11 @@ __cast5_enc_blk16:
         *      RR4: encrypted blocks 15 and 16
         */
 
-       pushq %rbp;
+       pushq %r15;
        pushq %rbx;
 
+       movq %rdi, CTX;
+
        vmovdqa .Lbswap_mask, RKM;
        vmovd .Lfirst_mask, R1ST;
        vmovd .L32_mask, R32;
@@ -283,7 +285,7 @@ __cast5_enc_blk16:
 
 .L__skip_enc:
        popq %rbx;
-       popq %rbp;
+       popq %r15;
 
        vmovdqa .Lbswap_mask, RKM;
 
@@ -298,7 +300,7 @@ ENDPROC(__cast5_enc_blk16)
 .align 16
 __cast5_dec_blk16:
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      RL1: encrypted blocks 1 and 2
         *      RR1: encrypted blocks 3 and 4
         *      RL2: encrypted blocks 5 and 6
@@ -318,9 +320,11 @@ __cast5_dec_blk16:
         *      RR4: decrypted blocks 15 and 16
         */
 
-       pushq %rbp;
+       pushq %r15;
        pushq %rbx;
 
+       movq %rdi, CTX;
+
        vmovdqa .Lbswap_mask, RKM;
        vmovd .Lfirst_mask, R1ST;
        vmovd .L32_mask, R32;
@@ -356,7 +360,7 @@ __cast5_dec_blk16:
 
        vmovdqa .Lbswap_mask, RKM;
        popq %rbx;
-       popq %rbp;
+       popq %r15;
 
        outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
        outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
@@ -372,12 +376,14 @@ ENDPROC(__cast5_dec_blk16)
 
 ENTRY(cast5_ecb_enc_16way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
        FRAME_BEGIN
+       pushq %r15;
 
+       movq %rdi, CTX;
        movq %rsi, %r11;
 
        vmovdqu (0*4*4)(%rdx), RL1;
@@ -400,18 +406,22 @@ ENTRY(cast5_ecb_enc_16way)
        vmovdqu RR4, (6*4*4)(%r11);
        vmovdqu RL4, (7*4*4)(%r11);
 
+       popq %r15;
        FRAME_END
        ret;
 ENDPROC(cast5_ecb_enc_16way)
 
 ENTRY(cast5_ecb_dec_16way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
 
        FRAME_BEGIN
+       pushq %r15;
+
+       movq %rdi, CTX;
        movq %rsi, %r11;
 
        vmovdqu (0*4*4)(%rdx), RL1;
@@ -434,20 +444,22 @@ ENTRY(cast5_ecb_dec_16way)
        vmovdqu RR4, (6*4*4)(%r11);
        vmovdqu RL4, (7*4*4)(%r11);
 
+       popq %r15;
        FRAME_END
        ret;
 ENDPROC(cast5_ecb_dec_16way)
 
 ENTRY(cast5_cbc_dec_16way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
        FRAME_BEGIN
-
        pushq %r12;
+       pushq %r15;
 
+       movq %rdi, CTX;
        movq %rsi, %r11;
        movq %rdx, %r12;
 
@@ -483,23 +495,24 @@ ENTRY(cast5_cbc_dec_16way)
        vmovdqu RR4, (6*16)(%r11);
        vmovdqu RL4, (7*16)(%r11);
 
+       popq %r15;
        popq %r12;
-
        FRAME_END
        ret;
 ENDPROC(cast5_cbc_dec_16way)
 
 ENTRY(cast5_ctr_16way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         *      %rcx: iv (big endian, 64bit)
         */
        FRAME_BEGIN
-
        pushq %r12;
+       pushq %r15;
 
+       movq %rdi, CTX;
        movq %rsi, %r11;
        movq %rdx, %r12;
 
@@ -558,8 +571,8 @@ ENTRY(cast5_ctr_16way)
        vmovdqu RR4, (6*16)(%r11);
        vmovdqu RL4, (7*16)(%r11);
 
+       popq %r15;
        popq %r12;
-
        FRAME_END
        ret;
 ENDPROC(cast5_ctr_16way)
index 952d3156a93312ff8c5a5c2c193132a89f7413da..7f30b6f0d72c15f402a69d621ecde1c283780266 100644 (file)
@@ -47,7 +47,7 @@
 /**********************************************************************
   8-way AVX cast6
  **********************************************************************/
-#define CTX %rdi
+#define CTX %r15
 
 #define RA1 %xmm0
 #define RB1 %xmm1
@@ -70,8 +70,8 @@
 
 #define RTMP %xmm15
 
-#define RID1  %rbp
-#define RID1d %ebp
+#define RID1  %rdi
+#define RID1d %edi
 #define RID2  %rsi
 #define RID2d %esi
 
 .align 8
 __cast6_enc_blk8:
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
         * output:
         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
         */
 
-       pushq %rbp;
+       pushq %r15;
        pushq %rbx;
 
+       movq %rdi, CTX;
+
        vmovdqa .Lbswap_mask, RKM;
        vmovd .Lfirst_mask, R1ST;
        vmovd .L32_mask, R32;
@@ -297,7 +299,7 @@ __cast6_enc_blk8:
        QBAR(11);
 
        popq %rbx;
-       popq %rbp;
+       popq %r15;
 
        vmovdqa .Lbswap_mask, RKM;
 
@@ -310,15 +312,17 @@ ENDPROC(__cast6_enc_blk8)
 .align 8
 __cast6_dec_blk8:
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
         * output:
         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
         */
 
-       pushq %rbp;
+       pushq %r15;
        pushq %rbx;
 
+       movq %rdi, CTX;
+
        vmovdqa .Lbswap_mask, RKM;
        vmovd .Lfirst_mask, R1ST;
        vmovd .L32_mask, R32;
@@ -343,7 +347,7 @@ __cast6_dec_blk8:
        QBAR(0);
 
        popq %rbx;
-       popq %rbp;
+       popq %r15;
 
        vmovdqa .Lbswap_mask, RKM;
        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
@@ -354,12 +358,14 @@ ENDPROC(__cast6_dec_blk8)
 
 ENTRY(cast6_ecb_enc_8way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
        FRAME_BEGIN
+       pushq %r15;
 
+       movq %rdi, CTX;
        movq %rsi, %r11;
 
        load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -368,18 +374,21 @@ ENTRY(cast6_ecb_enc_8way)
 
        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       popq %r15;
        FRAME_END
        ret;
 ENDPROC(cast6_ecb_enc_8way)
 
 ENTRY(cast6_ecb_dec_8way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
        FRAME_BEGIN
+       pushq %r15;
 
+       movq %rdi, CTX;
        movq %rsi, %r11;
 
        load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -388,20 +397,22 @@ ENTRY(cast6_ecb_dec_8way)
 
        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       popq %r15;
        FRAME_END
        ret;
 ENDPROC(cast6_ecb_dec_8way)
 
 ENTRY(cast6_cbc_dec_8way)
        /* input:
-        *      %rdi: ctx, CTX
+        *      %rdi: ctx
         *      %rsi: dst
         *      %rdx: src
         */
        FRAME_BEGIN
-
        pushq %r12;
+       pushq %r15;
 
+       movq %rdi, CTX;
        movq %rsi, %r11;
        movq %rdx, %r12;
 
@@ -411,8 +422,8 @@ ENTRY(cast6_cbc_dec_8way)
 
        store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       popq %r15;
        popq %r12;
-
        FRAME_END
        ret;
 ENDPROC(cast6_cbc_dec_8way)
@@ -425,9 +436,10 @@ ENTRY(cast6_ctr_8way)
         *      %rcx: iv (little endian, 128bit)
         */
        FRAME_BEGIN
-
        pushq %r12;
+       pushq %r15
 
+       movq %rdi, CTX;
        movq %rsi, %r11;
        movq %rdx, %r12;
 
@@ -438,8 +450,8 @@ ENTRY(cast6_ctr_8way)
 
        store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       popq %r15;
        popq %r12;
-
        FRAME_END
        ret;
 ENDPROC(cast6_ctr_8way)
@@ -452,7 +464,9 @@ ENTRY(cast6_xts_enc_8way)
         *      %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
         */
        FRAME_BEGIN
+       pushq %r15;
 
+       movq %rdi, CTX
        movq %rsi, %r11;
 
        /* regs <= src, dst <= IVs, regs <= regs xor IVs */
@@ -464,6 +478,7 @@ ENTRY(cast6_xts_enc_8way)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       popq %r15;
        FRAME_END
        ret;
 ENDPROC(cast6_xts_enc_8way)
@@ -476,7 +491,9 @@ ENTRY(cast6_xts_dec_8way)
         *      %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
         */
        FRAME_BEGIN
+       pushq %r15;
 
+       movq %rdi, CTX
        movq %rsi, %r11;
 
        /* regs <= src, dst <= IVs, regs <= regs xor IVs */
@@ -488,6 +505,7 @@ ENTRY(cast6_xts_dec_8way)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       popq %r15;
        FRAME_END
        ret;
 ENDPROC(cast6_xts_dec_8way)
index f3e91647ca274a4b20d4b18f0d7232a3c8ca4567..8e49ce1174947ea612fca65a6d143cdc2386534e 100644 (file)
 #define RW2bh %ch
 
 #define RT0 %r15
-#define RT1 %rbp
+#define RT1 %rsi
 #define RT2 %r14
 #define RT3 %rdx
 
 #define RT0d %r15d
-#define RT1d %ebp
+#define RT1d %esi
 #define RT2d %r14d
 #define RT3d %edx
 
@@ -177,13 +177,14 @@ ENTRY(des3_ede_x86_64_crypt_blk)
         *      %rsi: dst
         *      %rdx: src
         */
-       pushq %rbp;
        pushq %rbx;
        pushq %r12;
        pushq %r13;
        pushq %r14;
        pushq %r15;
 
+       pushq %rsi; /* dst */
+
        read_block(%rdx, RL0, RR0);
        initial_permutation(RL0, RR0);
 
@@ -241,6 +242,8 @@ ENTRY(des3_ede_x86_64_crypt_blk)
        round1(32+15, RL0, RR0, dummy2);
 
        final_permutation(RR0, RL0);
+
+       popq %rsi /* dst */
        write_block(%rsi, RR0, RL0);
 
        popq %r15;
@@ -248,7 +251,6 @@ ENTRY(des3_ede_x86_64_crypt_blk)
        popq %r13;
        popq %r12;
        popq %rbx;
-       popq %rbp;
 
        ret;
 ENDPROC(des3_ede_x86_64_crypt_blk)
@@ -432,13 +434,14 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way)
         *      %rdx: src (3 blocks)
         */
 
-       pushq %rbp;
        pushq %rbx;
        pushq %r12;
        pushq %r13;
        pushq %r14;
        pushq %r15;
 
+       pushq %rsi /* dst */
+
        /* load input */
        movl 0 * 4(%rdx), RL0d;
        movl 1 * 4(%rdx), RR0d;
@@ -520,6 +523,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way)
        bswapl RR2d;
        bswapl RL2d;
 
+       popq %rsi /* dst */
        movl RR0d, 0 * 4(%rsi);
        movl RL0d, 1 * 4(%rsi);
        movl RR1d, 2 * 4(%rsi);
@@ -532,7 +536,6 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way)
        popq %r13;
        popq %r12;
        popq %rbx;
-       popq %rbp;
 
        ret;
 ENDPROC(des3_ede_x86_64_crypt_blk_3way)
index 1eab79c9ac484172a63d9cc0c4a409b7fffe7e8b..9f712a7dfd797cc499e1ef52ba7999078530494a 100644 (file)
@@ -89,7 +89,7 @@
 #define        REG_RE  %rdx
 #define        REG_RTA %r12
 #define        REG_RTB %rbx
-#define        REG_T1  %ebp
+#define        REG_T1  %r11d
 #define        xmm_mov vmovups
 #define        avx2_zeroupper  vzeroupper
 #define        RND_F1  1
@@ -637,7 +637,6 @@ _loop3:
        ENTRY(\name)
 
        push    %rbx
-       push    %rbp
        push    %r12
        push    %r13
        push    %r14
@@ -673,7 +672,6 @@ _loop3:
        pop     %r14
        pop     %r13
        pop     %r12
-       pop     %rbp
        pop     %rbx
 
        ret
index a4109506a5e8884ba54562aed09bc36e5e800d12..6204bd53528c65c0d4a70f05e76a60bcc476499e 100644 (file)
@@ -37,7 +37,7 @@
 #define REG_A  %ecx
 #define REG_B  %esi
 #define REG_C  %edi
-#define REG_D  %ebp
+#define REG_D  %r12d
 #define REG_E  %edx
 
 #define REG_T1 %eax
        ENTRY(\name)
 
        push    %rbx
-       push    %rbp
        push    %r12
+       push    %rbp
+       mov     %rsp, %rbp
 
-       mov     %rsp, %r12
        sub     $64, %rsp               # allocate workspace
        and     $~15, %rsp              # align stack
 
        xor     %rax, %rax
        rep stosq
 
-       mov     %r12, %rsp              # deallocate workspace
-
-       pop     %r12
+       mov     %rbp, %rsp              # deallocate workspace
        pop     %rbp
+       pop     %r12
        pop     %rbx
        ret
 
index e08888a1a5f2cf5f8a6809d86d497664cece52a1..001bbcf93c79ba08628abcf30dd49ad77158420d 100644 (file)
@@ -103,7 +103,7 @@ SRND = %rsi       # clobbers INP
 c = %ecx
 d = %r8d
 e = %edx
-TBL = %rbp
+TBL = %r12
 a = %eax
 b = %ebx
 
@@ -350,13 +350,13 @@ a = TMP_
 ENTRY(sha256_transform_avx)
 .align 32
        pushq   %rbx
-       pushq   %rbp
+       pushq   %r12
        pushq   %r13
        pushq   %r14
        pushq   %r15
-       pushq   %r12
+       pushq   %rbp
+       movq    %rsp, %rbp
 
-       mov     %rsp, %r12
        subq    $STACK_SIZE, %rsp       # allocate stack space
        and     $~15, %rsp              # align stack pointer
 
@@ -452,13 +452,12 @@ loop2:
 
 done_hash:
 
-       mov     %r12, %rsp
-
-       popq    %r12
+       mov     %rbp, %rsp
+       popq    %rbp
        popq    %r15
        popq    %r14
        popq    %r13
-       popq    %rbp
+       popq    %r12
        popq    %rbx
        ret
 ENDPROC(sha256_transform_avx)
index 89c8f09787d20552c12230ded345282bb2f2bf42..1420db15dcddc8505b57faad5e6e6701f972b517 100644 (file)
@@ -98,8 +98,6 @@ d     = %r8d
 e       = %edx # clobbers NUM_BLKS
 y3     = %esi  # clobbers INP
 
-
-TBL    = %rbp
 SRND   = CTX   # SRND is same register as CTX
 
 a = %eax
@@ -531,7 +529,6 @@ STACK_SIZE  = _RSP      + _RSP_SIZE
 ENTRY(sha256_transform_rorx)
 .align 32
        pushq   %rbx
-       pushq   %rbp
        pushq   %r12
        pushq   %r13
        pushq   %r14
@@ -568,8 +565,6 @@ ENTRY(sha256_transform_rorx)
        mov     CTX, _CTX(%rsp)
 
 loop0:
-       lea     K256(%rip), TBL
-
        ## Load first 16 dwords from two blocks
        VMOVDQ  0*32(INP),XTMP0
        VMOVDQ  1*32(INP),XTMP1
@@ -597,19 +592,19 @@ last_block_enter:
 
 .align 16
 loop1:
-       vpaddd  0*32(TBL, SRND), X0, XFER
+       vpaddd  K256+0*32(SRND), X0, XFER
        vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 0*32
 
-       vpaddd  1*32(TBL, SRND), X0, XFER
+       vpaddd  K256+1*32(SRND), X0, XFER
        vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 1*32
 
-       vpaddd  2*32(TBL, SRND), X0, XFER
+       vpaddd  K256+2*32(SRND), X0, XFER
        vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 2*32
 
-       vpaddd  3*32(TBL, SRND), X0, XFER
+       vpaddd  K256+3*32(SRND), X0, XFER
        vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 3*32
 
@@ -619,10 +614,11 @@ loop1:
 
 loop2:
        ## Do last 16 rounds with no scheduling
-       vpaddd  0*32(TBL, SRND), X0, XFER
+       vpaddd  K256+0*32(SRND), X0, XFER
        vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
        DO_4ROUNDS      _XFER + 0*32
-       vpaddd  1*32(TBL, SRND), X1, XFER
+
+       vpaddd  K256+1*32(SRND), X1, XFER
        vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
        DO_4ROUNDS      _XFER + 1*32
        add     $2*32, SRND
@@ -676,9 +672,6 @@ loop3:
        ja      done_hash
 
 do_last_block:
-       #### do last block
-       lea     K256(%rip), TBL
-
        VMOVDQ  0*16(INP),XWORD0
        VMOVDQ  1*16(INP),XWORD1
        VMOVDQ  2*16(INP),XWORD2
@@ -718,7 +711,6 @@ done_hash:
        popq    %r14
        popq    %r13
        popq    %r12
-       popq    %rbp
        popq    %rbx
        ret
 ENDPROC(sha256_transform_rorx)
index 39b83c93e7fd6cdbc658a35ef4698756cccef612..c6c05ed2c16a593390ddf7617070f45e58a40ad4 100644 (file)
@@ -95,7 +95,7 @@ SRND = %rsi       # clobbers INP
 c = %ecx
 d = %r8d
 e = %edx
-TBL = %rbp
+TBL = %r12
 a = %eax
 b = %ebx
 
@@ -356,13 +356,13 @@ a = TMP_
 ENTRY(sha256_transform_ssse3)
 .align 32
        pushq   %rbx
-       pushq   %rbp
+       pushq   %r12
        pushq   %r13
        pushq   %r14
        pushq   %r15
-       pushq   %r12
+       pushq   %rbp
+       mov     %rsp, %rbp
 
-       mov     %rsp, %r12
        subq    $STACK_SIZE, %rsp
        and     $~15, %rsp
 
@@ -462,13 +462,12 @@ loop2:
 
 done_hash:
 
-       mov     %r12, %rsp
-
-       popq    %r12
+       mov     %rbp, %rsp
+       popq    %rbp
        popq    %r15
        popq    %r14
        popq    %r13
-       popq    %rbp
+       popq    %r12
        popq    %rbx
 
        ret
index 7f5f6c6ec72e9cb0a47c2b5babbbedff487fbbf3..b16d560051629a1c6d518321b93be2672b861c9e 100644 (file)
@@ -69,8 +69,9 @@ XFER  = YTMP0
 
 BYTE_FLIP_MASK  = %ymm9
 
-# 1st arg
-CTX         = %rdi
+# 1st arg is %rdi, which is saved to the stack and accessed later via %r12
+CTX1        = %rdi
+CTX2        = %r12
 # 2nd arg
 INP         = %rsi
 # 3rd arg
@@ -81,7 +82,7 @@ d           = %r8
 e           = %rdx
 y3          = %rsi
 
-TBL   = %rbp
+TBL   = %rdi # clobbers CTX1
 
 a     = %rax
 b     = %rbx
@@ -91,26 +92,26 @@ g     = %r10
 h     = %r11
 old_h = %r11
 
-T1    = %r12
+T1    = %r12 # clobbers CTX2
 y0    = %r13
 y1    = %r14
 y2    = %r15
 
-y4    = %r12
-
 # Local variables (stack frame)
 XFER_SIZE = 4*8
 SRND_SIZE = 1*8
 INP_SIZE = 1*8
 INPEND_SIZE = 1*8
+CTX_SIZE = 1*8
 RSPSAVE_SIZE = 1*8
-GPRSAVE_SIZE = 6*8
+GPRSAVE_SIZE = 5*8
 
 frame_XFER = 0
 frame_SRND = frame_XFER + XFER_SIZE
 frame_INP = frame_SRND + SRND_SIZE
 frame_INPEND = frame_INP + INP_SIZE
-frame_RSPSAVE = frame_INPEND + INPEND_SIZE
+frame_CTX = frame_INPEND + INPEND_SIZE
+frame_RSPSAVE = frame_CTX + CTX_SIZE
 frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
 frame_size = frame_GPRSAVE + GPRSAVE_SIZE
 
@@ -576,12 +577,11 @@ ENTRY(sha512_transform_rorx)
        mov     %rax, frame_RSPSAVE(%rsp)
 
        # Save GPRs
-       mov     %rbp, frame_GPRSAVE(%rsp)
-       mov     %rbx, 8*1+frame_GPRSAVE(%rsp)
-       mov     %r12, 8*2+frame_GPRSAVE(%rsp)
-       mov     %r13, 8*3+frame_GPRSAVE(%rsp)
-       mov     %r14, 8*4+frame_GPRSAVE(%rsp)
-       mov     %r15, 8*5+frame_GPRSAVE(%rsp)
+       mov     %rbx, 8*0+frame_GPRSAVE(%rsp)
+       mov     %r12, 8*1+frame_GPRSAVE(%rsp)
+       mov     %r13, 8*2+frame_GPRSAVE(%rsp)
+       mov     %r14, 8*3+frame_GPRSAVE(%rsp)
+       mov     %r15, 8*4+frame_GPRSAVE(%rsp)
 
        shl     $7, NUM_BLKS    # convert to bytes
        jz      done_hash
@@ -589,14 +589,17 @@ ENTRY(sha512_transform_rorx)
        mov     NUM_BLKS, frame_INPEND(%rsp)
 
        ## load initial digest
-       mov     8*0(CTX),a
-       mov     8*1(CTX),b
-       mov     8*2(CTX),c
-       mov     8*3(CTX),d
-       mov     8*4(CTX),e
-       mov     8*5(CTX),f
-       mov     8*6(CTX),g
-       mov     8*7(CTX),h
+       mov     8*0(CTX1), a
+       mov     8*1(CTX1), b
+       mov     8*2(CTX1), c
+       mov     8*3(CTX1), d
+       mov     8*4(CTX1), e
+       mov     8*5(CTX1), f
+       mov     8*6(CTX1), g
+       mov     8*7(CTX1), h
+
+       # save %rdi (CTX) before it gets clobbered
+       mov     %rdi, frame_CTX(%rsp)
 
        vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
 
@@ -652,14 +655,15 @@ loop2:
        subq    $1, frame_SRND(%rsp)
        jne     loop2
 
-       addm    8*0(CTX),a
-       addm    8*1(CTX),b
-       addm    8*2(CTX),c
-       addm    8*3(CTX),d
-       addm    8*4(CTX),e
-       addm    8*5(CTX),f
-       addm    8*6(CTX),g
-       addm    8*7(CTX),h
+       mov     frame_CTX(%rsp), CTX2
+       addm    8*0(CTX2), a
+       addm    8*1(CTX2), b
+       addm    8*2(CTX2), c
+       addm    8*3(CTX2), d
+       addm    8*4(CTX2), e
+       addm    8*5(CTX2), f
+       addm    8*6(CTX2), g
+       addm    8*7(CTX2), h
 
        mov     frame_INP(%rsp), INP
        add     $128, INP
@@ -669,12 +673,11 @@ loop2:
 done_hash:
 
 # Restore GPRs
-       mov     frame_GPRSAVE(%rsp)     ,%rbp
-       mov     8*1+frame_GPRSAVE(%rsp) ,%rbx
-       mov     8*2+frame_GPRSAVE(%rsp) ,%r12
-       mov     8*3+frame_GPRSAVE(%rsp) ,%r13
-       mov     8*4+frame_GPRSAVE(%rsp) ,%r14
-       mov     8*5+frame_GPRSAVE(%rsp) ,%r15
+       mov     8*0+frame_GPRSAVE(%rsp), %rbx
+       mov     8*1+frame_GPRSAVE(%rsp), %r12
+       mov     8*2+frame_GPRSAVE(%rsp), %r13
+       mov     8*3+frame_GPRSAVE(%rsp), %r14
+       mov     8*4+frame_GPRSAVE(%rsp), %r15
 
        # Restore Stack Pointer
        mov     frame_RSPSAVE(%rsp), %rsp
index b3f49d2863480334e8b2d12828e4ca20045b8be9..73b471da36226386112a23c0036b7f5da14b3266 100644 (file)
@@ -76,8 +76,8 @@
 #define RT %xmm14
 #define RR %xmm15
 
-#define RID1  %rbp
-#define RID1d %ebp
+#define RID1  %r13
+#define RID1d %r13d
 #define RID2  %rsi
 #define RID2d %esi
 
@@ -259,7 +259,7 @@ __twofish_enc_blk8:
 
        vmovdqu w(CTX), RK1;
 
-       pushq %rbp;
+       pushq %r13;
        pushq %rbx;
        pushq %rcx;
 
@@ -282,7 +282,7 @@ __twofish_enc_blk8:
 
        popq %rcx;
        popq %rbx;
-       popq %rbp;
+       popq %r13;
 
        outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
        outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
@@ -301,7 +301,7 @@ __twofish_dec_blk8:
 
        vmovdqu (w+4*4)(CTX), RK1;
 
-       pushq %rbp;
+       pushq %r13;
        pushq %rbx;
 
        inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
@@ -322,7 +322,7 @@ __twofish_dec_blk8:
        vmovdqu (w)(CTX), RK1;
 
        popq %rbx;
-       popq %rbp;
+       popq %r13;
 
        outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
        outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
index 8a13d468635a8f56f772a6879d3e907a1945dd6a..50e0d2bc45288cd8d741af14c23b2387be5eedfe 100644 (file)
 /*
  * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
  * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
- * is just setting the LSB, which makes it an invalid stack address and is also
+ * is just clearing the MSB, which makes it an invalid stack address and is also
  * a signal to the unwinder that it's a pt_regs pointer in disguise.
  *
  * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
 .macro ENCODE_FRAME_POINTER
 #ifdef CONFIG_FRAME_POINTER
        mov %esp, %ebp
-       orl $0x1, %ebp
+       andl $0x7fffffff, %ebp
 #endif
 .endm
 
index 49167258d587570673c5e515cb00b4d8f26263b7..f6cdb7a1455e82cce2c568e2add3564835a5352f 100644 (file)
@@ -808,7 +808,7 @@ apicinterrupt IRQ_WORK_VECTOR                       irq_work_interrupt              smp_irq_work_interrupt
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
-       UNWIND_HINT_IRET_REGS offset=8
+       UNWIND_HINT_IRET_REGS offset=\has_error_code*8
 
        /* Sanity check */
        .if \shift_ist != -1 && \paranoid == 0
index 16076eb34699691ce8d150ae82fc93b34d46b895..141e07b0621689e745582599c009f4af1d053c6c 100644 (file)
@@ -546,9 +546,6 @@ static int bts_event_init(struct perf_event *event)
        if (event->attr.type != bts_pmu.type)
                return -ENOENT;
 
-       if (x86_add_exclusive(x86_lbr_exclusive_bts))
-               return -EBUSY;
-
        /*
         * BTS leaks kernel addresses even when CPL0 tracing is
         * disabled, so disallow intel_bts driver for unprivileged
@@ -562,6 +559,9 @@ static int bts_event_init(struct perf_event *event)
            !capable(CAP_SYS_ADMIN))
                return -EACCES;
 
+       if (x86_add_exclusive(x86_lbr_exclusive_bts))
+               return -EBUSY;
+
        ret = x86_reserve_hardware();
        if (ret) {
                x86_del_exclusive(x86_lbr_exclusive_bts);
index 829e89cfcee2deff8b937806b16dd573d736a895..9fb9a1f1e47bd0d0db3f9be9d4722f956d48dcfe 100644 (file)
@@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void)
                return 0;
        }
 
-       if (lockup_detector_suspend() != 0) {
-               pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n");
-               return 0;
-       }
+       cpus_read_lock();
+
+       hardlockup_detector_perf_stop();
 
        x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
 
@@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void)
        x86_pmu.commit_scheduling = NULL;
        x86_pmu.stop_scheduling = NULL;
 
-       lockup_detector_resume();
-
-       cpus_read_lock();
+       hardlockup_detector_perf_restart();
 
        for_each_online_cpu(c)
                free_excl_cntrs(c);
index 4cf100ff2a3746f440049dd5c27c254e4dcc1776..72db0664a53dfd6fe64512ebda83caaa97db1abd 100644 (file)
@@ -552,6 +552,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
@@ -560,6 +561,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates),
+
+       X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index 8e2457cb6b4a416e1c84d9baa990ea512ec74ba0..005908ee9333f0e87cdd4db7ca14a278d52fa773 100644 (file)
@@ -775,6 +775,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init),
+
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init),
        {},
 };
 
index 1c5390f1cf0992787afa8d34bb361f622b00dcb2..d45e06346f14d8636f1b4348a84a6e503012c686 100644 (file)
@@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
                pmus[i].type    = type;
                pmus[i].boxes   = kzalloc(size, GFP_KERNEL);
                if (!pmus[i].boxes)
-                       return -ENOMEM;
+                       goto err;
        }
 
        type->pmus = pmus;
@@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
                attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
                                        sizeof(*attr_group), GFP_KERNEL);
                if (!attr_group)
-                       return -ENOMEM;
+                       goto err;
 
                attrs = (struct attribute **)(attr_group + 1);
                attr_group->name = "events";
@@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
        }
 
        type->pmu_group = &uncore_pmu_attr_group;
+
        return 0;
+
+err:
+       for (i = 0; i < type->num_boxes; i++)
+               kfree(pmus[i].boxes);
+       kfree(pmus);
+
+       return -ENOMEM;
 }
 
 static int __init
index db1fe377e6dd9ddfcfbc006a7346879f263d408a..a7196818416a57381b513254628206a9660ed578 100644 (file)
@@ -3462,7 +3462,7 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
 static struct intel_uncore_type skx_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
-       .num_boxes              = 5,
+       .num_boxes              = 6,
        .perf_ctr_bits          = 48,
        .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
        .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
@@ -3492,7 +3492,7 @@ static const struct attribute_group skx_uncore_format_group = {
 static struct intel_uncore_type skx_uncore_irp = {
        .name                   = "irp",
        .num_counters           = 2,
-       .num_boxes              = 5,
+       .num_boxes              = 6,
        .perf_ctr_bits          = 48,
        .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
        .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
index 4bb3ec69e8ea10537c25ef2d792be94f1d7a4127..06723671ae4e91d53b7327b44fe96b588826d838 100644 (file)
@@ -63,6 +63,14 @@ static bool test_intel(int idx)
        case INTEL_FAM6_ATOM_SILVERMONT1:
        case INTEL_FAM6_ATOM_SILVERMONT2:
        case INTEL_FAM6_ATOM_AIRMONT:
+
+       case INTEL_FAM6_ATOM_GOLDMONT:
+       case INTEL_FAM6_ATOM_DENVERTON:
+
+       case INTEL_FAM6_ATOM_GEMINI_LAKE:
+
+       case INTEL_FAM6_XEON_PHI_KNL:
+       case INTEL_FAM6_XEON_PHI_KNM:
                if (idx == PERF_MSR_SMI)
                        return true;
                break;
index 1a8eb550c40f13d8f4f4f5d049e09d323a2bb185..a5db63f728a2f985bde0f1b98f87be4537913cdc 100644 (file)
@@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
 u32 *hv_vp_index;
 EXPORT_SYMBOL_GPL(hv_vp_index);
 
+u32 hv_max_vp_index;
+
 static int hv_cpu_init(unsigned int cpu)
 {
        u64 msr_vp_index;
@@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
 
        hv_vp_index[smp_processor_id()] = msr_vp_index;
 
+       if (msr_vp_index > hv_max_vp_index)
+               hv_max_vp_index = msr_vp_index;
+
        return 0;
 }
 
index 39e7f6e50919117087fe663e013cd77d12b16b7b..9cc9e1c1e2dbcf6047c9ebd37f280c66b9e7e29a 100644 (file)
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
 /* Each gva in gva_list encodes up to 4096 pages to flush */
 #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
 
-static struct hv_flush_pcpu __percpu *pcpu_flush;
+static struct hv_flush_pcpu __percpu **pcpu_flush;
 
-static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
+static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
 
 /*
  * Fills in gva_list starting from offset. Returns the number of items added.
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
 {
        int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
 
+       /* valid_bank_mask can represent up to 64 banks */
+       if (hv_max_vp_index / 64 >= 64)
+               return 0;
+
+       /*
+        * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
+        * structs are not cleared between calls, we risk flushing unneeded
+        * vCPUs otherwise.
+        */
+       for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+               flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
+
        /*
         * Some banks may end up being empty but this is acceptable.
         */
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
                vcpu = hv_cpu_number_to_vp_number(cpu);
                vcpu_bank = vcpu / 64;
                vcpu_offset = vcpu % 64;
-
-               /* valid_bank_mask can represent up to 64 banks */
-               if (vcpu_bank >= 64)
-                       return 0;
-
                __set_bit(vcpu_offset, (unsigned long *)
                          &flush->hv_vp_set.bank_contents[vcpu_bank]);
                if (vcpu_bank >= nr_bank)
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
                                    const struct flush_tlb_info *info)
 {
        int cpu, vcpu, gva_n, max_gvas;
+       struct hv_flush_pcpu **flush_pcpu;
        struct hv_flush_pcpu *flush;
        u64 status = U64_MAX;
        unsigned long flags;
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
 
        local_irq_save(flags);
 
-       flush = this_cpu_ptr(pcpu_flush);
+       flush_pcpu = this_cpu_ptr(pcpu_flush);
+
+       if (unlikely(!*flush_pcpu))
+               *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+       flush = *flush_pcpu;
+
+       if (unlikely(!flush)) {
+               local_irq_restore(flags);
+               goto do_native;
+       }
 
        if (info->mm) {
                flush->address_space = virt_to_phys(info->mm->pgd);
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
                                       const struct flush_tlb_info *info)
 {
        int nr_bank = 0, max_gvas, gva_n;
+       struct hv_flush_pcpu_ex **flush_pcpu;
        struct hv_flush_pcpu_ex *flush;
        u64 status = U64_MAX;
        unsigned long flags;
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
 
        local_irq_save(flags);
 
-       flush = this_cpu_ptr(pcpu_flush_ex);
+       flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
+
+       if (unlikely(!*flush_pcpu))
+               *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+       flush = *flush_pcpu;
+
+       if (unlikely(!flush)) {
+               local_irq_restore(flags);
+               goto do_native;
+       }
 
        if (info->mm) {
                flush->address_space = virt_to_phys(info->mm->pgd);
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
                flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
                status = hv_do_rep_hypercall(
                        HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
-                       0, nr_bank + 2, flush, NULL);
+                       0, nr_bank, flush, NULL);
        } else if (info->end &&
                   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
                status = hv_do_rep_hypercall(
                        HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
-                       0, nr_bank + 2, flush, NULL);
+                       0, nr_bank, flush, NULL);
        } else {
                gva_n = fill_gva_list(flush->gva_list, nr_bank,
                                      info->start, info->end);
                status = hv_do_rep_hypercall(
                        HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
-                       gva_n, nr_bank + 2, flush, NULL);
+                       gva_n, nr_bank, flush, NULL);
        }
 
        local_irq_restore(flags);
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
                return;
 
        if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
-               pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+               pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
        else
-               pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+               pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
 }
index e0bb46c0285752e73c8eb55420e29f97d88a818e..0e2a5edbce00111f6a41f1b3070610b5316d1e71 100644 (file)
@@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
                 ksig->ka.sa.sa_restorer)
                sp = (unsigned long) ksig->ka.sa.sa_restorer;
 
-       if (fpu->fpstate_active) {
+       if (fpu->initialized) {
                unsigned long fx_aligned, math_size;
 
                sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
index e7636bac7372d41d4b8077f4d7df7d81de32ee32..6c98821fef5ed9f0b953141a905b99893663652e 100644 (file)
 #define new_len2               145f-144f
 
 /*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
  */
 #define alt_max_short(a, b)    ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
 
index 1b020381ab38965ae2a80d2d86df32c66c92e2e8..ccbe24e697c46cee2890d00fda413b9f31b41af6 100644 (file)
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
        alt_end_marker ":\n"
 
 /*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
  *
- * The additional "-" is needed because gas works with s32s.
+ * The additional "-" is needed because gas uses a "true" value of -1.
  */
-#define alt_max_short(a, b)    "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+#define alt_max_short(a, b)    "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
 
 /*
  * Pad the second replacement alternative with additional NOPs if it is
@@ -218,10 +218,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
 #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2,   \
                           output, input...)                                  \
 {                                                                            \
-       register void *__sp asm(_ASM_SP);                                     \
        asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
                "call %P[new2]", feature2)                                    \
-               : output, "+r" (__sp)                                         \
+               : output, ASM_CALL_CONSTRAINT                                 \
                : [old] "i" (oldfunc), [new1] "i" (newfunc1),                 \
                  [new2] "i" (newfunc2), ## input);                           \
 }
index 676ee5807d864d94538bf16d2ed7535655045128..b0dc91f4bedc680ac69d7ecc481a71c863bade77 100644 (file)
 # define __ASM_FORM_COMMA(x) " " #x ","
 #endif
 
-#ifdef CONFIG_X86_32
+#ifndef __x86_64__
+/* 32 bit */
 # define __ASM_SEL(a,b) __ASM_FORM(a)
 # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
 #else
+/* 64 bit */
 # define __ASM_SEL(a,b) __ASM_FORM(b)
 # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
 #endif
 /* For C file, we already have NOKPROBE_SYMBOL macro */
 #endif
 
+#ifndef __ASSEMBLY__
+/*
+ * This output constraint should be used for any inline asm which has a "call"
+ * instruction.  Otherwise the asm may be inserted before the frame pointer
+ * gets set up by the containing function.  If you forget to do this, objtool
+ * may print a "call without frame pointer save/setup" warning.
+ */
+register unsigned long current_stack_pointer asm(_ASM_SP);
+#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
+#endif
+
 #endif /* _ASM_X86_ASM_H */
index 554cdb205d17586887e6b599c991b2fc090ba38a..e3221ffa304e301858db746a03d6cf8600790004 100644 (file)
 /*
  * High level FPU state handling functions:
  */
-extern void fpu__activate_curr(struct fpu *fpu);
-extern void fpu__activate_fpstate_read(struct fpu *fpu);
-extern void fpu__activate_fpstate_write(struct fpu *fpu);
-extern void fpu__current_fpstate_write_begin(void);
-extern void fpu__current_fpstate_write_end(void);
+extern void fpu__initialize(struct fpu *fpu);
+extern void fpu__prepare_read(struct fpu *fpu);
+extern void fpu__prepare_write(struct fpu *fpu);
 extern void fpu__save(struct fpu *fpu);
 extern void fpu__restore(struct fpu *fpu);
 extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
@@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
        err;                                                            \
 })
 
-#define check_insn(insn, output, input...)                             \
-({                                                                     \
-       int err;                                                        \
+#define kernel_insn(insn, output, input...)                            \
        asm volatile("1:" #insn "\n\t"                                  \
                     "2:\n"                                             \
-                    ".section .fixup,\"ax\"\n"                         \
-                    "3:  movl $-1,%[err]\n"                            \
-                    "    jmp  2b\n"                                    \
-                    ".previous\n"                                      \
-                    _ASM_EXTABLE(1b, 3b)                               \
-                    : [err] "=r" (err), output                         \
-                    : "0"(0), input);                                  \
-       err;                                                            \
-})
+                    _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore)  \
+                    : output : input)
 
 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
 {
@@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
 
 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
 {
-       int err;
-
        if (IS_ENABLED(CONFIG_X86_32)) {
-               err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+               kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
        } else {
                if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
-                       err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+                       kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
                } else {
                        /* See comment in copy_fxregs_to_kernel() below. */
-                       err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
+                       kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
                }
        }
-       /* Copying from a kernel buffer to FPU registers should never fail: */
-       WARN_ON_FPU(err);
 }
 
 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
@@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
 
 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
 {
-       int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-
-       WARN_ON_FPU(err);
+       kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
@@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
  * XSAVE area format.
  */
-#define XSTATE_XRESTORE(st, lmask, hmask, err)                         \
+#define XSTATE_XRESTORE(st, lmask, hmask)                              \
        asm volatile(ALTERNATIVE(XRSTOR,                                \
                                 XRSTORS, X86_FEATURE_XSAVES)           \
                     "\n"                                               \
-                    "xor %[err], %[err]\n"                             \
                     "3:\n"                                             \
-                    ".pushsection .fixup,\"ax\"\n"                     \
-                    "4: movl $-2, %[err]\n"                            \
-                    "jmp 3b\n"                                         \
-                    ".popsection\n"                                    \
-                    _ASM_EXTABLE(661b, 4b)                             \
-                    : [err] "=r" (err)                                 \
+                    _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
+                    :                                                  \
                     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
                     : "memory")
 
@@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
 
-       /* We should never fault when copying from a kernel buffer: */
+       /*
+        * We should never fault when copying from a kernel buffer, and the FPU
+        * state we set at boot time should be valid.
+        */
        WARN_ON_FPU(err);
 }
 
@@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
        u32 hmask = mask >> 32;
        int err;
 
-       WARN_ON(!alternatives_patched);
+       WARN_ON_FPU(!alternatives_patched);
 
        XSTATE_XSAVE(xstate, lmask, hmask, err);
 
@@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
 {
        u32 lmask = mask;
        u32 hmask = mask >> 32;
-       int err;
-
-       XSTATE_XRESTORE(xstate, lmask, hmask, err);
 
-       /* We should never fault when copying from a kernel buffer: */
-       WARN_ON_FPU(err);
+       XSTATE_XRESTORE(xstate, lmask, hmask);
 }
 
 /*
@@ -526,37 +503,16 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
  */
 static inline void fpregs_deactivate(struct fpu *fpu)
 {
-       WARN_ON_FPU(!fpu->fpregs_active);
-
-       fpu->fpregs_active = 0;
        this_cpu_write(fpu_fpregs_owner_ctx, NULL);
        trace_x86_fpu_regs_deactivated(fpu);
 }
 
 static inline void fpregs_activate(struct fpu *fpu)
 {
-       WARN_ON_FPU(fpu->fpregs_active);
-
-       fpu->fpregs_active = 1;
        this_cpu_write(fpu_fpregs_owner_ctx, fpu);
        trace_x86_fpu_regs_activated(fpu);
 }
 
-/*
- * The question "does this thread have fpu access?"
- * is slightly racy, since preemption could come in
- * and revoke it immediately after the test.
- *
- * However, even in that very unlikely scenario,
- * we can just assume we have FPU access - typically
- * to save the FP state - we'll just take a #NM
- * fault and get the FPU access back.
- */
-static inline int fpregs_active(void)
-{
-       return current->thread.fpu.fpregs_active;
-}
-
 /*
  * FPU state switching for scheduling.
  *
@@ -571,14 +527,13 @@ static inline int fpregs_active(void)
 static inline void
 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-       if (old_fpu->fpregs_active) {
+       if (old_fpu->initialized) {
                if (!copy_fpregs_to_fpstate(old_fpu))
                        old_fpu->last_cpu = -1;
                else
                        old_fpu->last_cpu = cpu;
 
                /* But leave fpu_fpregs_owner_ctx! */
-               old_fpu->fpregs_active = 0;
                trace_x86_fpu_regs_deactivated(old_fpu);
        } else
                old_fpu->last_cpu = -1;
@@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
 {
        bool preload = static_cpu_has(X86_FEATURE_FPU) &&
-                      new_fpu->fpstate_active;
+                      new_fpu->initialized;
 
        if (preload) {
                if (!fpregs_state_valid(new_fpu, cpu))
@@ -617,8 +572,7 @@ static inline void user_fpu_begin(void)
        struct fpu *fpu = &current->thread.fpu;
 
        preempt_disable();
-       if (!fpregs_active())
-               fpregs_activate(fpu);
+       fpregs_activate(fpu);
        preempt_enable();
 }
 
index 3c80f5b9c09d8cf5407378a39ba27cfa380bafbf..a1520575d86b81fd2521d7e4013856b7e70d4db2 100644 (file)
@@ -68,6 +68,9 @@ struct fxregs_state {
 /* Default value for fxregs_state.mxcsr: */
 #define MXCSR_DEFAULT          0x1f80
 
+/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */
+#define MXCSR_AND_FLAGS_SIZE sizeof(u64)
+
 /*
  * Software based FPU emulation state. This is arbitrary really,
  * it matches the x87 format to make it easier to understand:
@@ -290,36 +293,13 @@ struct fpu {
        unsigned int                    last_cpu;
 
        /*
-        * @fpstate_active:
+        * @initialized:
         *
-        * This flag indicates whether this context is active: if the task
+        * This flag indicates whether this context is initialized: if the task
         * is not running then we can restore from this context, if the task
         * is running then we should save into this context.
         */
-       unsigned char                   fpstate_active;
-
-       /*
-        * @fpregs_active:
-        *
-        * This flag determines whether a given context is actively
-        * loaded into the FPU's registers and that those registers
-        * represent the task's current FPU state.
-        *
-        * Note the interaction with fpstate_active:
-        *
-        *   # task does not use the FPU:
-        *   fpstate_active == 0
-        *
-        *   # task uses the FPU and regs are active:
-        *   fpstate_active == 1 && fpregs_active == 1
-        *
-        *   # the regs are inactive but still match fpstate:
-        *   fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
-        *
-        * The third state is what we use for the lazy restore optimization
-        * on lazy-switching CPUs.
-        */
-       unsigned char                   fpregs_active;
+       unsigned char                   initialized;
 
        /*
         * @state:
index 1b2799e0699a4d47a3ef191f6177f0a4abc13ceb..83fee2469eb76079771c668545db95e929d1446c 100644 (file)
@@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void);
 void *get_xsave_addr(struct xregs_state *xsave, int xstate);
 const void *get_xsave_field_ptr(int xstate_field);
 int using_compacted_format(void);
-int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
-                       void __user *ubuf, struct xregs_state *xsave);
-int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
-                    struct xregs_state *xsave);
+int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
+int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
+int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
+int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
+
+/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
+extern int validate_xstate_header(const struct xstate_header *hdr);
+
 #endif
index bc62e7cbf1b1f883fc9acb0a145305384a3bf062..59ad3d132353280cae5a1ba4f3a8ae0efe996331 100644 (file)
@@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
 bool kvm_para_available(void);
 unsigned int kvm_arch_para_features(void);
 void __init kvm_guest_init(void);
-void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
 void kvm_async_pf_task_wake(u32 token);
 u32 kvm_read_and_reset_pf_reason(void);
 extern void kvm_disable_steal_time(void);
@@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void)
 
 #else /* CONFIG_KVM_GUEST */
 #define kvm_guest_init() do {} while (0)
-#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wait(T, I) do {} while(0)
 #define kvm_async_pf_task_wake(T) do {} while(0)
 
 static inline bool kvm_para_available(void)
index 181264989db572a8533c00bbe6392c880c0875e5..8edac1de2e356dde61029939adbaa535aa2fa793 100644 (file)
@@ -187,7 +187,6 @@ struct mca_msr_regs {
 
 extern struct mce_vendor_flags mce_flags;
 
-extern struct mca_config mca_cfg;
 extern struct mca_msr_regs msr_ops;
 
 enum mce_notifier_prios {
index 7ae318c340d9b5d0fca2697dbb7e977b0bda3892..3c856a15b98e8edda98cc5ba7e40fc67f49be230 100644 (file)
@@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
        DEBUG_LOCKS_WARN_ON(preemptible());
 }
 
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-       int cpu = smp_processor_id();
-
-       if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
-               cpumask_clear_cpu(cpu, mm_cpumask(mm));
-}
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
@@ -286,6 +280,32 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
        return __pkru_allows_pkey(vma_pkey(vma), write);
 }
 
+/*
+ * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
+ * bits.  This serves two purposes.  It prevents a nasty situation in
+ * which PCID-unaware code saves CR3, loads some other value (with PCID
+ * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
+ * the saved ASID was nonzero.  It also means that any bugs involving
+ * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
+ * deterministically.
+ */
+
+static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
+{
+       if (static_cpu_has(X86_FEATURE_PCID)) {
+               VM_WARN_ON_ONCE(asid > 4094);
+               return __sme_pa(mm->pgd) | (asid + 1);
+       } else {
+               VM_WARN_ON_ONCE(asid != 0);
+               return __sme_pa(mm->pgd);
+       }
+}
+
+static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
+{
+       VM_WARN_ON_ONCE(asid > 4094);
+       return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
+}
 
 /*
  * This can be used from process context to figure out what the value of
@@ -296,10 +316,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
  */
 static inline unsigned long __get_current_cr3_fast(void)
 {
-       unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
-
-       if (static_cpu_has(X86_FEATURE_PCID))
-               cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+       unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
+               this_cpu_read(cpu_tlbstate.loaded_mm_asid));
 
        /* For now, be very restrictive about when this can be called. */
        VM_WARN_ON(in_nmi() || preemptible());
index 63cc96f064dc4987813352eb8ceba869d3738856..530f448fddafff3297f0e97d697309f1a2cf15bf 100644 (file)
@@ -179,7 +179,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
        u64 input_address = input ? virt_to_phys(input) : 0;
        u64 output_address = output ? virt_to_phys(output) : 0;
        u64 hv_status;
-       register void *__sp asm(_ASM_SP);
 
 #ifdef CONFIG_X86_64
        if (!hv_hypercall_pg)
@@ -187,7 +186,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
 
        __asm__ __volatile__("mov %4, %%r8\n"
                             "call *%5"
-                            : "=a" (hv_status), "+r" (__sp),
+                            : "=a" (hv_status), ASM_CALL_CONSTRAINT,
                               "+c" (control), "+d" (input_address)
                             :  "r" (output_address), "m" (hv_hypercall_pg)
                             : "cc", "memory", "r8", "r9", "r10", "r11");
@@ -202,7 +201,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
 
        __asm__ __volatile__("call *%7"
                             : "=A" (hv_status),
-                              "+c" (input_address_lo), "+r" (__sp)
+                              "+c" (input_address_lo), ASM_CALL_CONSTRAINT
                             : "A" (control),
                               "b" (input_address_hi),
                               "D"(output_address_hi), "S"(output_address_lo),
@@ -224,12 +223,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
 {
        u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
-       register void *__sp asm(_ASM_SP);
 
 #ifdef CONFIG_X86_64
        {
                __asm__ __volatile__("call *%4"
-                                    : "=a" (hv_status), "+r" (__sp),
+                                    : "=a" (hv_status), ASM_CALL_CONSTRAINT,
                                       "+c" (control), "+d" (input1)
                                     : "m" (hv_hypercall_pg)
                                     : "cc", "r8", "r9", "r10", "r11");
@@ -242,7 +240,7 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
                __asm__ __volatile__ ("call *%5"
                                      : "=A"(hv_status),
                                        "+c"(input1_lo),
-                                       "+r"(__sp)
+                                       ASM_CALL_CONSTRAINT
                                      : "A" (control),
                                        "b" (input1_hi),
                                        "m" (hv_hypercall_pg)
@@ -291,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
  * to this information.
  */
 extern u32 *hv_vp_index;
+extern u32 hv_max_vp_index;
 
 /**
  * hv_cpu_number_to_vp_number() - Map CPU to VP.
index 42873edd9f9d20cda2d1cfcb92b3798c4a75b3aa..280d94c36dada184fce42cd03d5d05fe7ebc8698 100644 (file)
@@ -459,8 +459,8 @@ int paravirt_disable_iospace(void);
  */
 #ifdef CONFIG_X86_32
 #define PVOP_VCALL_ARGS                                                        \
-       unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;      \
-       register void *__sp asm("esp")
+       unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
+
 #define PVOP_CALL_ARGS                 PVOP_VCALL_ARGS
 
 #define PVOP_CALL_ARG1(x)              "a" ((unsigned long)(x))
@@ -480,8 +480,8 @@ int paravirt_disable_iospace(void);
 /* [re]ax isn't an arg, but the return val */
 #define PVOP_VCALL_ARGS                                                \
        unsigned long __edi = __edi, __esi = __esi,             \
-               __edx = __edx, __ecx = __ecx, __eax = __eax;    \
-       register void *__sp asm("rsp")
+               __edx = __edx, __ecx = __ecx, __eax = __eax;
+
 #define PVOP_CALL_ARGS         PVOP_VCALL_ARGS
 
 #define PVOP_CALL_ARG1(x)              "D" ((unsigned long)(x))
@@ -532,7 +532,7 @@ int paravirt_disable_iospace(void);
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : call_clbr, "+r" (__sp)           \
+                                    : call_clbr, ASM_CALL_CONSTRAINT   \
                                     : paravirt_type(op),               \
                                       paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
@@ -542,7 +542,7 @@ int paravirt_disable_iospace(void);
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : call_clbr, "+r" (__sp)           \
+                                    : call_clbr, ASM_CALL_CONSTRAINT   \
                                     : paravirt_type(op),               \
                                       paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
@@ -569,7 +569,7 @@ int paravirt_disable_iospace(void);
                asm volatile(pre                                        \
                             paravirt_alt(PARAVIRT_CALL)                \
                             post                                       \
-                            : call_clbr, "+r" (__sp)                   \
+                            : call_clbr, ASM_CALL_CONSTRAINT           \
                             : paravirt_type(op),                       \
                               paravirt_clobber(clbr),                  \
                               ##__VA_ARGS__                            \
index ec1f3c6511506ee1f0ff5240a9ff95d0e6fa68c1..4f44505dbf870e79d4689af9db2f537fe764b831 100644 (file)
@@ -100,19 +100,14 @@ static __always_inline bool should_resched(int preempt_offset)
 
 #ifdef CONFIG_PREEMPT
   extern asmlinkage void ___preempt_schedule(void);
-# define __preempt_schedule()                                  \
-({                                                             \
-       register void *__sp asm(_ASM_SP);                       \
-       asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \
-})
+# define __preempt_schedule() \
+       asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
 
   extern asmlinkage void preempt_schedule(void);
   extern asmlinkage void ___preempt_schedule_notrace(void);
-# define __preempt_schedule_notrace()                                  \
-({                                                                     \
-       register void *__sp asm(_ASM_SP);                               \
-       asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \
-})
+# define __preempt_schedule_notrace() \
+       asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT)
+
   extern asmlinkage void preempt_schedule_notrace(void);
 #endif
 
index 3fa26a61eabcef0dc95b19221ca5e76a6ba2a01a..b390ff76e58fd9f28c62ba5e3a8ab169ac18ad62 100644 (file)
@@ -677,8 +677,6 @@ static inline void sync_core(void)
         * Like all of Linux's memory ordering operations, this is a
         * compiler barrier as well.
         */
-       register void *__sp asm(_ASM_SP);
-
 #ifdef CONFIG_X86_32
        asm volatile (
                "pushfl\n\t"
@@ -686,7 +684,7 @@ static inline void sync_core(void)
                "pushl $1f\n\t"
                "iret\n\t"
                "1:"
-               : "+r" (__sp) : : "memory");
+               : ASM_CALL_CONSTRAINT : : "memory");
 #else
        unsigned int tmp;
 
@@ -703,7 +701,7 @@ static inline void sync_core(void)
                "iretq\n\t"
                UNWIND_HINT_RESTORE
                "1:"
-               : "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
+               : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
 #endif
 }
 
index a34e0d4b957d639afb5978863e57fefb74a173f2..7116b7931c7b807766fef6d820ec929da43940c7 100644 (file)
@@ -103,7 +103,6 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
 ({                                                     \
        long tmp;                                       \
        struct rw_semaphore* ret;                       \
-       register void *__sp asm(_ASM_SP);               \
                                                        \
        asm volatile("# beginning down_write\n\t"       \
                     LOCK_PREFIX "  xadd      %1,(%4)\n\t"      \
@@ -114,7 +113,8 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
                     "  call " slow_path "\n"           \
                     "1:\n"                             \
                     "# ending down_write"              \
-                    : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
+                    : "+m" (sem->count), "=d" (tmp),   \
+                      "=a" (ret), ASM_CALL_CONSTRAINT  \
                     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
                     : "memory", "cc");                 \
        ret;                                            \
index 5161da1a0fa0a522c80b93f758a84914a88fad46..89e7eeb5cec1dfa6804d5bba2dfe010e6ad8f455 100644 (file)
@@ -158,17 +158,6 @@ struct thread_info {
  */
 #ifndef __ASSEMBLY__
 
-static inline unsigned long current_stack_pointer(void)
-{
-       unsigned long sp;
-#ifdef CONFIG_X86_64
-       asm("mov %%rsp,%0" : "=g" (sp));
-#else
-       asm("mov %%esp,%0" : "=g" (sp));
-#endif
-       return sp;
-}
-
 /*
  * Walks up the stack frames to make sure that the specified object is
  * entirely contained by a single stack frame.
index 4893abf7f74f9e264f5aaaec5a58211b6d324b3c..c4aed0de565ed891283b6e214069b2611374947e 100644 (file)
@@ -82,6 +82,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
 #endif
 
+static inline bool tlb_defer_switch_to_init_mm(void)
+{
+       /*
+        * If we have PCID, then switching to init_mm is reasonably
+        * fast.  If we don't have PCID, then switching to init_mm is
+        * quite slow, so we try to defer it in the hopes that we can
+        * avoid it entirely.  The latter approach runs the risk of
+        * receiving otherwise unnecessary IPIs.
+        *
+        * This choice is just a heuristic.  The tlb code can handle this
+        * function returning true or false regardless of whether we have
+        * PCID.
+        */
+       return !static_cpu_has(X86_FEATURE_PCID);
+}
+
 /*
  * 6 because 6 should be plenty and struct tlb_state will fit in
  * two cache lines.
@@ -104,6 +120,23 @@ struct tlb_state {
        u16 loaded_mm_asid;
        u16 next_asid;
 
+       /*
+        * We can be in one of several states:
+        *
+        *  - Actively using an mm.  Our CPU's bit will be set in
+        *    mm_cpumask(loaded_mm) and is_lazy == false;
+        *
+        *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
+        *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
+        *
+        *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
+        *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
+        *    We're heuristically guessing that the CR3 load we
+        *    skipped more than makes up for the overhead added by
+        *    lazy mode.
+        */
+       bool is_lazy;
+
        /*
         * Access to this CR4 shadow and to H/W CR4 is protected by
         * disabling interrupts when modifying either one.
index 342e59789fcdc2e62ce1fd94df32f99f12359bd0..39f7a27bef130fbb6c83a881951e440091ed9168 100644 (file)
@@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
 
        TP_STRUCT__entry(
                __field(struct fpu *, fpu)
-               __field(bool, fpregs_active)
-               __field(bool, fpstate_active)
+               __field(bool, initialized)
                __field(u64, xfeatures)
                __field(u64, xcomp_bv)
                ),
 
        TP_fast_assign(
                __entry->fpu            = fpu;
-               __entry->fpregs_active  = fpu->fpregs_active;
-               __entry->fpstate_active = fpu->fpstate_active;
+               __entry->initialized    = fpu->initialized;
                if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
                        __entry->xfeatures = fpu->state.xsave.header.xfeatures;
                        __entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
                }
        ),
-       TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
+       TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
                        __entry->fpu,
-                       __entry->fpregs_active,
-                       __entry->fpstate_active,
+                       __entry->initialized,
                        __entry->xfeatures,
                        __entry->xcomp_bv
        )
index 184eb9894dae3f2cca10bfe1813a348614830305..4b892917edeb787ce6380fd78a0d1c82f2299982 100644 (file)
@@ -166,11 +166,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 ({                                                                     \
        int __ret_gu;                                                   \
        register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
-       register void *__sp asm(_ASM_SP);                               \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
        asm volatile("call __get_user_%P4"                              \
-                    : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp)    \
+                    : "=a" (__ret_gu), "=r" (__val_gu),                \
+                       ASM_CALL_CONSTRAINT                             \
                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
        (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
        __builtin_expect(__ret_gu, 0);                                  \
@@ -337,7 +337,7 @@ do {                                                                        \
                     _ASM_EXTABLE(1b, 4b)                               \
                     _ASM_EXTABLE(2b, 4b)                               \
                     : "=r" (retval), "=&A"(x)                          \
-                    : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
+                    : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1),  \
                       "i" (errret), "0" (retval));                     \
 })
 
index 9606688caa4bea8db0175fe71fe61d9706108aba..7cb282e9e58777aeb2ffd86b344d39a5189c5f84 100644 (file)
@@ -113,10 +113,9 @@ extern struct { char _entry[32]; } hypercall_page[];
        register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
        register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
        register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
-       register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \
-       register void *__sp asm(_ASM_SP);
+       register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
 
-#define __HYPERCALL_0PARAM     "=r" (__res), "+r" (__sp)
+#define __HYPERCALL_0PARAM     "=r" (__res), ASM_CALL_CONSTRAINT
 #define __HYPERCALL_1PARAM     __HYPERCALL_0PARAM, "+r" (__arg1)
 #define __HYPERCALL_2PARAM     __HYPERCALL_1PARAM, "+r" (__arg2)
 #define __HYPERCALL_3PARAM     __HYPERCALL_2PARAM, "+r" (__arg3)
@@ -552,13 +551,13 @@ static inline void
 MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
                        struct desc_struct desc)
 {
-       u32 *p = (u32 *) &desc;
-
        mcl->op = __HYPERVISOR_update_descriptor;
        if (sizeof(maddr) == sizeof(long)) {
                mcl->args[0] = maddr;
                mcl->args[1] = *(unsigned long *)&desc;
        } else {
+               u32 *p = (u32 *)&desc;
+
                mcl->args[0] = maddr;
                mcl->args[1] = maddr >> 32;
                mcl->args[2] = *p++;
index 458da8509b75ecf9fb46769cc16965b02d7e4902..6db28f17ff2884e01122f2689b117e8ae63f9ec4 100644 (file)
@@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
        {}
 };
 
+#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
+
 const struct pci_device_id amd_nb_misc_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
        {}
 };
 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
        {}
 };
 
@@ -402,11 +406,48 @@ void amd_flush_garts(void)
 }
 EXPORT_SYMBOL_GPL(amd_flush_garts);
 
+static void __fix_erratum_688(void *info)
+{
+#define MSR_AMD64_IC_CFG 0xC0011021
+
+       msr_set_bit(MSR_AMD64_IC_CFG, 3);
+       msr_set_bit(MSR_AMD64_IC_CFG, 14);
+}
+
+/* Apply erratum 688 fix so machines without a BIOS fix work. */
+static __init void fix_erratum_688(void)
+{
+       struct pci_dev *F4;
+       u32 val;
+
+       if (boot_cpu_data.x86 != 0x14)
+               return;
+
+       if (!amd_northbridges.num)
+               return;
+
+       F4 = node_to_amd_nb(0)->link;
+       if (!F4)
+               return;
+
+       if (pci_read_config_dword(F4, 0x164, &val))
+               return;
+
+       if (val & BIT(2))
+               return;
+
+       on_each_cpu(__fix_erratum_688, NULL, 0);
+
+       pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
+}
+
 static __init int init_amd_nbs(void)
 {
        amd_cache_northbridges();
        amd_cache_gart();
 
+       fix_erratum_688();
+
        return 0;
 }
 
index d705c769f77d52ce55e4f7d5d32b9853ceb40394..ff891772c9f86492d7ca2721b66619b2be61ed9d 100644 (file)
@@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
        return ~0U;
 }
 
+static u32 skx_deadline_rev(void)
+{
+       switch (boot_cpu_data.x86_mask) {
+       case 0x03: return 0x01000136;
+       case 0x04: return 0x02000014;
+       }
+
+       return ~0U;
+}
+
 static const struct x86_cpu_id deadline_match[] = {
        DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X,        hsx_deadline_rev),
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X,      0x0b000020),
        DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
-       DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X,        0x02000014),
+       DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X,        skx_deadline_rev),
 
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE,     0x22),
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT,      0x20),
@@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
        const struct x86_cpu_id *m;
        u32 rev;
 
-       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
+           boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return;
 
        m = x86_match_cpu(deadline_match);
index 9862e2cd6d93da331052f664f49fd4ce1edb8d6b..d58184b7cd4438144e2d0ac3f4744d19ff4ffb31 100644 (file)
@@ -763,6 +763,16 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
        }
 }
 
+static void init_amd_zn(struct cpuinfo_x86 *c)
+{
+       /*
+        * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+        * all up to and including B1.
+        */
+       if (c->x86_model <= 1 && c->x86_mask <= 1)
+               set_cpu_cap(c, X86_FEATURE_CPB);
+}
+
 static void init_amd(struct cpuinfo_x86 *c)
 {
        early_init_amd(c);
@@ -791,6 +801,7 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x10: init_amd_gh(c); break;
        case 0x12: init_amd_ln(c); break;
        case 0x15: init_amd_bd(c); break;
+       case 0x17: init_amd_zn(c); break;
        }
 
        /* Enable workaround for FXSAVE leak */
index db684880d74ae47fbff37888ff31dd13b9a4653b..0af86d9242da0f6882f1f5252dfa659038c627ac 100644 (file)
 
 void __init check_bugs(void)
 {
-#ifdef CONFIG_X86_32
-       /*
-        * Regardless of whether PCID is enumerated, the SDM says
-        * that it can't be enabled in 32-bit mode.
-        */
-       setup_clear_cpu_cap(X86_FEATURE_PCID);
-#endif
-
        identify_boot_cpu();
 
        if (!IS_ENABLED(CONFIG_SMP)) {
index 775f10100d7febac27a84bb9c8deab24e119a8c8..c9176bae7fd8cdb0e85f6d60766a809b9b36aa42 100644 (file)
@@ -904,6 +904,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
        fpu__init_system(c);
+
+#ifdef CONFIG_X86_32
+       /*
+        * Regardless of whether PCID is enumerated, the SDM says
+        * that it can't be enabled in 32-bit mode.
+        */
+       setup_clear_cpu_cap(X86_FEATURE_PCID);
+#endif
 }
 
 void __init early_cpu_init(void)
index 24f749324c0f0f5d6f63b6810c71143f452d0fac..9990a71e311fbe321ce071a2b9870ef3098e3440 100644 (file)
@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
        } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                unsigned int apicid, nshared, first, last;
 
-               this_leaf = this_cpu_ci->info_list + index;
                nshared = base->eax.split.num_threads_sharing + 1;
                apicid = cpu_data(cpu).apicid;
                first = apicid - (apicid % nshared);
index 098530a93bb7cc4e451687ae29a0b86447105861..debb974fd17d0badaa7d1bbf3e0c671a2909b583 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef __X86_MCE_INTERNAL_H__
+#define __X86_MCE_INTERNAL_H__
+
 #include <linux/device.h>
 #include <asm/mce.h>
 
@@ -108,3 +111,7 @@ static inline void mce_work_trigger(void)   { }
 static inline void mce_register_injector_chain(struct notifier_block *nb)      { }
 static inline void mce_unregister_injector_chain(struct notifier_block *nb)    { }
 #endif
+
+extern struct mca_config mca_cfg;
+
+#endif /* __X86_MCE_INTERNAL_H__ */
index 40e28ed77fbf017aee3ddb5c8dd128ff8ac2b826..486f640b02efd1313911c494588bb7875e7e5759 100644 (file)
@@ -28,6 +28,8 @@
 #include <asm/msr.h>
 #include <asm/trace/irq_vectors.h>
 
+#include "mce-internal.h"
+
 #define NR_BLOCKS         5
 #define THRESHOLD_MAX     0xFFF
 #define INT_TYPE_APIC     0x00020000
index 86e8f0b2537b3eaedd2da204d67c94947a1b16f1..c4fa4a85d4cb6f4e0cb4b34cd20467879167463d 100644 (file)
@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
        bool *res = &dis_ucode_ldr;
 #endif
 
-       if (!have_cpuid_p())
-               return *res;
-
        /*
         * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
         * completely accurate as xen pv guests don't see that CPUID bit set but
@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
 void __init load_ucode_bsp(void)
 {
        unsigned int cpuid_1_eax;
+       bool intel = true;
 
-       if (check_loader_disabled_bsp())
+       if (!have_cpuid_p())
                return;
 
        cpuid_1_eax = native_cpuid_eax(1);
 
        switch (x86_cpuid_vendor()) {
        case X86_VENDOR_INTEL:
-               if (x86_family(cpuid_1_eax) >= 6)
-                       load_ucode_intel_bsp();
+               if (x86_family(cpuid_1_eax) < 6)
+                       return;
                break;
+
        case X86_VENDOR_AMD:
-               if (x86_family(cpuid_1_eax) >= 0x10)
-                       load_ucode_amd_bsp(cpuid_1_eax);
+               if (x86_family(cpuid_1_eax) < 0x10)
+                       return;
+               intel = false;
                break;
+
        default:
-               break;
+               return;
        }
+
+       if (check_loader_disabled_bsp())
+               return;
+
+       if (intel)
+               load_ucode_intel_bsp();
+       else
+               load_ucode_amd_bsp(cpuid_1_eax);
 }
 
 static bool check_loader_disabled_ap(void)
index 8f7a9bbad514efbec73ed1beb167c7d8432521b2..7dbcb7adf7975f7f29c38651c23c478ad315a34c 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mm.h>
 
 #include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
 #include <asm/setup.h>
@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
        return 0;
 }
 
+static bool is_blacklisted(unsigned int cpu)
+{
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+       if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
+               pr_err_once("late loading on model 79 is disabled.\n");
+               return true;
+       }
+
+       return false;
+}
+
 static enum ucode_state request_microcode_fw(int cpu, struct device *device,
                                             bool refresh_fw)
 {
@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
        const struct firmware *firmware;
        enum ucode_state ret;
 
+       if (is_blacklisted(cpu))
+               return UCODE_NFOUND;
+
        sprintf(name, "intel-ucode/%02x-%02x-%02x",
                c->x86, c->x86_model, c->x86_mask);
 
@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
 static enum ucode_state
 request_microcode_user(int cpu, const void __user *buf, size_t size)
 {
+       if (is_blacklisted(cpu))
+               return UCODE_NFOUND;
+
        return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
 }
 
index e1114f070c2dfdedf9911cd587afb7cca1769785..f92a6593de1ec651d244d43d16acbd8d49959e4b 100644 (file)
@@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)
 
        kernel_fpu_disable();
 
-       if (fpu->fpregs_active) {
+       if (fpu->initialized) {
                /*
                 * Ignore return value -- we don't care if reg state
                 * is clobbered.
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
 {
        struct fpu *fpu = &current->thread.fpu;
 
-       if (fpu->fpregs_active)
+       if (fpu->initialized)
                copy_kernel_to_fpregs(&fpu->state);
 
        kernel_fpu_enable();
@@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)
 
        preempt_disable();
        trace_x86_fpu_before_save(fpu);
-       if (fpu->fpregs_active) {
+       if (fpu->initialized) {
                if (!copy_fpregs_to_fpstate(fpu)) {
                        copy_kernel_to_fpregs(&fpu->state);
                }
@@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init);
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-       dst_fpu->fpregs_active = 0;
        dst_fpu->last_cpu = -1;
 
-       if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
+       if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
                return 0;
 
        WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
        /*
         * Save current FPU registers directly into the child
         * FPU context, without any memory-to-memory copying.
-        * In lazy mode, if the FPU context isn't loaded into
-        * fpregs, CR0.TS will be set and do_device_not_available
-        * will load the FPU context.
         *
-        * We have to do all this with preemption disabled,
-        * mostly because of the FNSAVE case, because in that
-        * case we must not allow preemption in the window
-        * between the FNSAVE and us marking the context lazy.
-        *
-        * It shouldn't be an issue as even FNSAVE is plenty
-        * fast in terms of critical section length.
+        * ( The function 'fails' in the FNSAVE case, which destroys
+        *   register contents so we have to copy them back. )
         */
-       preempt_disable();
        if (!copy_fpregs_to_fpstate(dst_fpu)) {
-               memcpy(&src_fpu->state, &dst_fpu->state,
-                      fpu_kernel_xstate_size);
-
+               memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
                copy_kernel_to_fpregs(&src_fpu->state);
        }
-       preempt_enable();
 
        trace_x86_fpu_copy_src(src_fpu);
        trace_x86_fpu_copy_dst(dst_fpu);
@@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
  * Activate the current task's in-memory FPU context,
  * if it has not been used before:
  */
-void fpu__activate_curr(struct fpu *fpu)
+void fpu__initialize(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu != &current->thread.fpu);
 
-       if (!fpu->fpstate_active) {
+       if (!fpu->initialized) {
                fpstate_init(&fpu->state);
                trace_x86_fpu_init_state(fpu);
 
                trace_x86_fpu_activate_state(fpu);
                /* Safe to do for the current task: */
-               fpu->fpstate_active = 1;
+               fpu->initialized = 1;
        }
 }
-EXPORT_SYMBOL_GPL(fpu__activate_curr);
+EXPORT_SYMBOL_GPL(fpu__initialize);
 
 /*
  * This function must be called before we read a task's fpstate.
  *
- * If the task has not used the FPU before then initialize its
- * fpstate.
+ * There's two cases where this gets called:
+ *
+ * - for the current task (when coredumping), in which case we have
+ *   to save the latest FPU registers into the fpstate,
+ *
+ * - or it's called for stopped tasks (ptrace), in which case the
+ *   registers were already saved by the context-switch code when
+ *   the task scheduled out - we only have to initialize the registers
+ *   if they've never been initialized.
  *
  * If the task has used the FPU before then save it.
  */
-void fpu__activate_fpstate_read(struct fpu *fpu)
+void fpu__prepare_read(struct fpu *fpu)
 {
-       /*
-        * If fpregs are active (in the current CPU), then
-        * copy them to the fpstate:
-        */
-       if (fpu->fpregs_active) {
+       if (fpu == &current->thread.fpu) {
                fpu__save(fpu);
        } else {
-               if (!fpu->fpstate_active) {
+               if (!fpu->initialized) {
                        fpstate_init(&fpu->state);
                        trace_x86_fpu_init_state(fpu);
 
                        trace_x86_fpu_activate_state(fpu);
                        /* Safe to do for current and for stopped child tasks: */
-                       fpu->fpstate_active = 1;
+                       fpu->initialized = 1;
                }
        }
 }
@@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
 /*
  * This function must be called before we write a task's fpstate.
  *
- * If the task has used the FPU before then unlazy it.
+ * If the task has used the FPU before then invalidate any cached FPU registers.
  * If the task has not used the FPU before then initialize its fpstate.
  *
  * After this function call, after registers in the fpstate are
  * modified and the child task has woken up, the child task will
  * restore the modified FPU state from the modified context. If we
- * didn't clear its lazy status here then the lazy in-registers
+ * didn't clear its cached status here then the cached in-registers
  * state pending on its former CPU could be restored, corrupting
  * the modifications.
  */
-void fpu__activate_fpstate_write(struct fpu *fpu)
+void fpu__prepare_write(struct fpu *fpu)
 {
        /*
         * Only stopped child tasks can be used to modify the FPU
@@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
         */
        WARN_ON_FPU(fpu == &current->thread.fpu);
 
-       if (fpu->fpstate_active) {
-               /* Invalidate any lazy state: */
+       if (fpu->initialized) {
+               /* Invalidate any cached state: */
                __fpu_invalidate_fpregs_state(fpu);
        } else {
                fpstate_init(&fpu->state);
@@ -310,73 +300,10 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
 
                trace_x86_fpu_activate_state(fpu);
                /* Safe to do for stopped child tasks: */
-               fpu->fpstate_active = 1;
+               fpu->initialized = 1;
        }
 }
 
-/*
- * This function must be called before we write the current
- * task's fpstate.
- *
- * This call gets the current FPU register state and moves
- * it in to the 'fpstate'.  Preemption is disabled so that
- * no writes to the 'fpstate' can occur from context
- * swiches.
- *
- * Must be followed by a fpu__current_fpstate_write_end().
- */
-void fpu__current_fpstate_write_begin(void)
-{
-       struct fpu *fpu = &current->thread.fpu;
-
-       /*
-        * Ensure that the context-switching code does not write
-        * over the fpstate while we are doing our update.
-        */
-       preempt_disable();
-
-       /*
-        * Move the fpregs in to the fpu's 'fpstate'.
-        */
-       fpu__activate_fpstate_read(fpu);
-
-       /*
-        * The caller is about to write to 'fpu'.  Ensure that no
-        * CPU thinks that its fpregs match the fpstate.  This
-        * ensures we will not be lazy and skip a XRSTOR in the
-        * future.
-        */
-       __fpu_invalidate_fpregs_state(fpu);
-}
-
-/*
- * This function must be paired with fpu__current_fpstate_write_begin()
- *
- * This will ensure that the modified fpstate gets placed back in
- * the fpregs if necessary.
- *
- * Note: This function may be called whether or not an _actual_
- * write to the fpstate occurred.
- */
-void fpu__current_fpstate_write_end(void)
-{
-       struct fpu *fpu = &current->thread.fpu;
-
-       /*
-        * 'fpu' now has an updated copy of the state, but the
-        * registers may still be out of date.  Update them with
-        * an XRSTOR if they are active.
-        */
-       if (fpregs_active())
-               copy_kernel_to_fpregs(&fpu->state);
-
-       /*
-        * Our update is done and the fpregs/fpstate are in sync
-        * if necessary.  Context switches can happen again.
-        */
-       preempt_enable();
-}
-
 /*
  * 'fpu__restore()' is called to copy FPU registers from
  * the FPU fpstate to the live hw registers and to activate
@@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void)
  */
 void fpu__restore(struct fpu *fpu)
 {
-       fpu__activate_curr(fpu);
+       fpu__initialize(fpu);
 
        /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
        kernel_fpu_disable();
@@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu)
 {
        preempt_disable();
 
-       if (fpu->fpregs_active) {
-               /* Ignore delayed exceptions from user space */
-               asm volatile("1: fwait\n"
-                            "2:\n"
-                            _ASM_EXTABLE(1b, 2b));
-               fpregs_deactivate(fpu);
+       if (fpu == &current->thread.fpu) {
+               if (fpu->initialized) {
+                       /* Ignore delayed exceptions from user space */
+                       asm volatile("1: fwait\n"
+                                    "2:\n"
+                                    _ASM_EXTABLE(1b, 2b));
+                       fpregs_deactivate(fpu);
+               }
        }
 
-       fpu->fpstate_active = 0;
+       fpu->initialized = 0;
 
        trace_x86_fpu_dropped(fpu);
 
@@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu)
         * Make sure fpstate is cleared and initialized.
         */
        if (static_cpu_has(X86_FEATURE_FPU)) {
-               fpu__activate_curr(fpu);
+               preempt_disable();
+               fpu__initialize(fpu);
                user_fpu_begin();
                copy_init_fpstate_to_fpregs();
+               preempt_enable();
        }
 }
 
index d5d44c452624c88e3abb5c75e68d00b55fee6019..7affb7e3d9a5b94326b51528119787f4f956640b 100644 (file)
@@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void)
        WARN_ON_FPU(!on_boot_cpu);
        on_boot_cpu = 0;
 
-       WARN_ON_FPU(current->thread.fpu.fpstate_active);
+       WARN_ON_FPU(current->thread.fpu.initialized);
 }
 
 /*
index b188b16841e376574c5f39e55b7b687ffdd09495..3ea15137238964cc6f972b2d11c83cb619de55bd 100644 (file)
@@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r
 {
        struct fpu *target_fpu = &target->thread.fpu;
 
-       return target_fpu->fpstate_active ? regset->n : 0;
+       return target_fpu->initialized ? regset->n : 0;
 }
 
 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
 {
        struct fpu *target_fpu = &target->thread.fpu;
 
-       if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
+       if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
                return regset->n;
        else
                return 0;
@@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
        if (!boot_cpu_has(X86_FEATURE_FXSR))
                return -ENODEV;
 
-       fpu__activate_fpstate_read(fpu);
+       fpu__prepare_read(fpu);
        fpstate_sanitize_xstate(fpu);
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
        if (!boot_cpu_has(X86_FEATURE_FXSR))
                return -ENODEV;
 
-       fpu__activate_fpstate_write(fpu);
+       fpu__prepare_write(fpu);
        fpstate_sanitize_xstate(fpu);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
 
        xsave = &fpu->state.xsave;
 
-       fpu__activate_fpstate_read(fpu);
+       fpu__prepare_read(fpu);
 
        if (using_compacted_format()) {
-               ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave);
+               if (kbuf)
+                       ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
+               else
+                       ret = copy_xstate_to_user(ubuf, xsave, pos, count);
        } else {
                fpstate_sanitize_xstate(fpu);
                /*
@@ -129,28 +132,29 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
 
        xsave = &fpu->state.xsave;
 
-       fpu__activate_fpstate_write(fpu);
+       fpu__prepare_write(fpu);
 
-       if (boot_cpu_has(X86_FEATURE_XSAVES))
-               ret = copyin_to_xsaves(kbuf, ubuf, xsave);
-       else
+       if (using_compacted_format()) {
+               if (kbuf)
+                       ret = copy_kernel_to_xstate(xsave, kbuf);
+               else
+                       ret = copy_user_to_xstate(xsave, ubuf);
+       } else {
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
-
-       /*
-        * In case of failure, mark all states as init:
-        */
-       if (ret)
-               fpstate_init(&fpu->state);
+               if (!ret)
+                       ret = validate_xstate_header(&xsave->header);
+       }
 
        /*
         * mxcsr reserved bits must be masked to zero for security reasons.
         */
        xsave->i387.mxcsr &= mxcsr_feature_mask;
-       xsave->header.xfeatures &= xfeatures_mask;
+
        /*
-        * These bits must be zero.
+        * In case of failure, mark all states as init:
         */
-       memset(&xsave->header.reserved, 0, 48);
+       if (ret)
+               fpstate_init(&fpu->state);
 
        return ret;
 }
@@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
        struct fpu *fpu = &target->thread.fpu;
        struct user_i387_ia32_struct env;
 
-       fpu__activate_fpstate_read(fpu);
+       fpu__prepare_read(fpu);
 
        if (!boot_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
@@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
        struct user_i387_ia32_struct env;
        int ret;
 
-       fpu__activate_fpstate_write(fpu);
+       fpu__prepare_write(fpu);
        fpstate_sanitize_xstate(fpu);
 
        if (!boot_cpu_has(X86_FEATURE_FPU))
@@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
        struct fpu *fpu = &tsk->thread.fpu;
        int fpvalid;
 
-       fpvalid = fpu->fpstate_active;
+       fpvalid = fpu->initialized;
        if (fpvalid)
                fpvalid = !fpregs_get(tsk, NULL,
                                      0, sizeof(struct user_i387_ia32_struct),
index 83c23c230b4c4fc78664ba08663a04c684c2105b..fb639e70048f58dfaef5576ab73e6998fa8ebbd1 100644 (file)
@@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
  */
 int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
 {
-       struct xregs_state *xsave = &current->thread.fpu.state.xsave;
+       struct fpu *fpu = &current->thread.fpu;
+       struct xregs_state *xsave = &fpu->state.xsave;
        struct task_struct *tsk = current;
        int ia32_fxstate = (buf != buf_fx);
 
@@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
                        sizeof(struct user_i387_ia32_struct), NULL,
                        (struct _fpstate_32 __user *) buf) ? -1 : 1;
 
-       if (fpregs_active() || using_compacted_format()) {
+       if (fpu->initialized || using_compacted_format()) {
                /* Save the live register state to the user directly. */
                if (copy_fpregs_to_sigframe(buf_fx))
                        return -1;
                /* Update the thread's fxstate to save the fsave header. */
                if (ia32_fxstate)
-                       copy_fxregs_to_kernel(&tsk->thread.fpu);
+                       copy_fxregs_to_kernel(fpu);
        } else {
                /*
                 * It is a *bug* if kernel uses compacted-format for xsave
@@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
                        return -1;
                }
 
-               fpstate_sanitize_xstate(&tsk->thread.fpu);
+               fpstate_sanitize_xstate(fpu);
                if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
                        return -1;
        }
@@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk,
        struct xstate_header *header = &xsave->header;
 
        if (use_xsave()) {
-               /* These bits must be zero. */
-               memset(header->reserved, 0, 48);
+               /*
+                * Note: we don't need to zero the reserved bits in the
+                * xstate_header here because we either didn't copy them at all,
+                * or we checked earlier that they aren't set.
+                */
 
                /*
                 * Init the state that is not present in the memory
@@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
                if (fx_only)
                        header->xfeatures = XFEATURE_MASK_FPSSE;
                else
-                       header->xfeatures &= (xfeatures_mask & xfeatures);
+                       header->xfeatures &= xfeatures;
        }
 
        if (use_fxsr()) {
@@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
        if (!access_ok(VERIFY_READ, buf, size))
                return -EACCES;
 
-       fpu__activate_curr(fpu);
+       fpu__initialize(fpu);
 
        if (!static_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_set(current, NULL,
@@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                /*
                 * For 32-bit frames with fxstate, copy the user state to the
                 * thread's fpu state, reconstruct fxstate from the fsave
-                * header. Sanitize the copied state etc.
+                * header. Validate and sanitize the copied state.
                 */
                struct fpu *fpu = &tsk->thread.fpu;
                struct user_i387_ia32_struct env;
                int err = 0;
 
                /*
-                * Drop the current fpu which clears fpu->fpstate_active. This ensures
+                * Drop the current fpu which clears fpu->initialized. This ensures
                 * that any context-switch during the copy of the new state,
                 * avoids the intermediate state from getting restored/saved.
                 * Thus avoiding the new restored state from getting corrupted.
                 * We will be ready to restore/save the state only after
-                * fpu->fpstate_active is again set.
+                * fpu->initialized is again set.
                 */
                fpu__drop(fpu);
 
                if (using_compacted_format()) {
-                       err = copyin_to_xsaves(NULL, buf_fx,
-                                              &fpu->state.xsave);
+                       err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
                } else {
-                       err = __copy_from_user(&fpu->state.xsave,
-                                              buf_fx, state_size);
+                       err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
+
+                       if (!err && state_size > offsetof(struct xregs_state, header))
+                               err = validate_xstate_header(&fpu->state.xsave.header);
                }
 
                if (err || __copy_from_user(&env, buf, sizeof(env))) {
@@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                        sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
                }
 
-               fpu->fpstate_active = 1;
+               fpu->initialized = 1;
                preempt_disable();
                fpu__restore(fpu);
                preempt_enable();
index c24ac1efb12d7a1574450a1359699309148afcd6..f1d5476c902209eebeae83d3cd30fe5b6226921c 100644 (file)
@@ -483,6 +483,30 @@ int using_compacted_format(void)
        return boot_cpu_has(X86_FEATURE_XSAVES);
 }
 
+/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
+int validate_xstate_header(const struct xstate_header *hdr)
+{
+       /* No unknown or supervisor features may be set */
+       if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR))
+               return -EINVAL;
+
+       /* Userspace must use the uncompacted format */
+       if (hdr->xcomp_bv)
+               return -EINVAL;
+
+       /*
+        * If 'reserved' is shrunken to add a new field, make sure to validate
+        * that new field here!
+        */
+       BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
+
+       /* No reserved bits may be set */
+       if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
+               return -EINVAL;
+
+       return 0;
+}
+
 static void __xstate_dump_leaves(void)
 {
        int i;
@@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state)
 {
        struct fpu *fpu = &current->thread.fpu;
 
-       if (!fpu->fpstate_active)
+       if (!fpu->initialized)
                return NULL;
        /*
         * fpu__save() takes the CPU's xstate registers
@@ -920,39 +944,130 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
 }
 #endif /* ! CONFIG_ARCH_HAS_PKEYS */
 
+/*
+ * Weird legacy quirk: SSE and YMM states store information in the
+ * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
+ * area is marked as unused in the xfeatures header, we need to copy
+ * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
+ */
+static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
+{
+       if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
+               return false;
+
+       if (xfeatures & XFEATURE_MASK_FP)
+               return false;
+
+       return true;
+}
+
 /*
  * This is similar to user_regset_copyout(), but will not add offset to
  * the source data pointer or increment pos, count, kbuf, and ubuf.
  */
-static inline int xstate_copyout(unsigned int pos, unsigned int count,
-                                void *kbuf, void __user *ubuf,
-                                const void *data, const int start_pos,
-                                const int end_pos)
+static inline void
+__copy_xstate_to_kernel(void *kbuf, const void *data,
+                       unsigned int offset, unsigned int size, unsigned int size_total)
 {
-       if ((count == 0) || (pos < start_pos))
-               return 0;
+       if (offset < size_total) {
+               unsigned int copy = min(size, size_total - offset);
 
-       if (end_pos < 0 || pos < end_pos) {
-               unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
+               memcpy(kbuf + offset, data, copy);
+       }
+}
 
-               if (kbuf) {
-                       memcpy(kbuf + pos, data, copy);
-               } else {
-                       if (__copy_to_user(ubuf + pos, data, copy))
-                               return -EFAULT;
+/*
+ * Convert from kernel XSAVES compacted format to standard format and copy
+ * to a kernel-space ptrace buffer.
+ *
+ * It supports partial copy but pos always starts from zero. This is called
+ * from xstateregs_get() and there we check the CPU has XSAVES.
+ */
+int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
+{
+       unsigned int offset, size;
+       struct xstate_header header;
+       int i;
+
+       /*
+        * Currently copy_regset_to_user() starts from pos 0:
+        */
+       if (unlikely(offset_start != 0))
+               return -EFAULT;
+
+       /*
+        * The destination is a ptrace buffer; we put in only user xstates:
+        */
+       memset(&header, 0, sizeof(header));
+       header.xfeatures = xsave->header.xfeatures;
+       header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
+
+       /*
+        * Copy xregs_state->header:
+        */
+       offset = offsetof(struct xregs_state, header);
+       size = sizeof(header);
+
+       __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
+
+       for (i = 0; i < XFEATURE_MAX; i++) {
+               /*
+                * Copy only in-use xstates:
+                */
+               if ((header.xfeatures >> i) & 1) {
+                       void *src = __raw_xsave_addr(xsave, 1 << i);
+
+                       offset = xstate_offsets[i];
+                       size = xstate_sizes[i];
+
+                       /* The next component has to fit fully into the output buffer: */
+                       if (offset + size > size_total)
+                               break;
+
+                       __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
                }
+
+       }
+
+       if (xfeatures_mxcsr_quirk(header.xfeatures)) {
+               offset = offsetof(struct fxregs_state, mxcsr);
+               size = MXCSR_AND_FLAGS_SIZE;
+               __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
+       }
+
+       /*
+        * Fill xsave->i387.sw_reserved value for ptrace frame:
+        */
+       offset = offsetof(struct fxregs_state, sw_reserved);
+       size = sizeof(xstate_fx_sw_bytes);
+
+       __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
+
+       return 0;
+}
+
+static inline int
+__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
+{
+       if (!size)
+               return 0;
+
+       if (offset < size_total) {
+               unsigned int copy = min(size, size_total - offset);
+
+               if (__copy_to_user(ubuf + offset, data, copy))
+                       return -EFAULT;
        }
        return 0;
 }
 
 /*
  * Convert from kernel XSAVES compacted format to standard format and copy
- * to a ptrace buffer. It supports partial copy but pos always starts from
+ * to a user-space buffer. It supports partial copy but pos always starts from
  * zero. This is called from xstateregs_get() and there we check the CPU
  * has XSAVES.
  */
-int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
-                       void __user *ubuf, struct xregs_state *xsave)
+int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
 {
        unsigned int offset, size;
        int ret, i;
@@ -961,7 +1076,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
        /*
         * Currently copy_regset_to_user() starts from pos 0:
         */
-       if (unlikely(pos != 0))
+       if (unlikely(offset_start != 0))
                return -EFAULT;
 
        /*
@@ -977,8 +1092,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
        offset = offsetof(struct xregs_state, header);
        size = sizeof(header);
 
-       ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count);
-
+       ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
        if (ret)
                return ret;
 
@@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
                        offset = xstate_offsets[i];
                        size = xstate_sizes[i];
 
-                       ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count);
+                       /* The next component has to fit fully into the output buffer: */
+                       if (offset + size > size_total)
+                               break;
 
+                       ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
                        if (ret)
                                return ret;
-
-                       if (offset + size >= count)
-                               break;
                }
 
        }
 
+       if (xfeatures_mxcsr_quirk(header.xfeatures)) {
+               offset = offsetof(struct fxregs_state, mxcsr);
+               size = MXCSR_AND_FLAGS_SIZE;
+               __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
+       }
+
        /*
         * Fill xsave->i387.sw_reserved value for ptrace frame:
         */
        offset = offsetof(struct fxregs_state, sw_reserved);
        size = sizeof(xstate_fx_sw_bytes);
 
-       ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count);
-
+       ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
        if (ret)
                return ret;
 
@@ -1018,55 +1137,98 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
 }
 
 /*
- * Convert from a ptrace standard-format buffer to kernel XSAVES format
- * and copy to the target thread. This is called from xstateregs_set() and
- * there we check the CPU has XSAVES and a whole standard-sized buffer
- * exists.
+ * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
+ * and copy to the target thread. This is called from xstateregs_set().
  */
-int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
-                    struct xregs_state *xsave)
+int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
 {
        unsigned int offset, size;
        int i;
-       u64 xfeatures;
-       u64 allowed_features;
+       struct xstate_header hdr;
 
        offset = offsetof(struct xregs_state, header);
-       size = sizeof(xfeatures);
+       size = sizeof(hdr);
 
-       if (kbuf) {
-               memcpy(&xfeatures, kbuf + offset, size);
-       } else {
-               if (__copy_from_user(&xfeatures, ubuf + offset, size))
-                       return -EFAULT;
+       memcpy(&hdr, kbuf + offset, size);
+
+       if (validate_xstate_header(&hdr))
+               return -EINVAL;
+
+       for (i = 0; i < XFEATURE_MAX; i++) {
+               u64 mask = ((u64)1 << i);
+
+               if (hdr.xfeatures & mask) {
+                       void *dst = __raw_xsave_addr(xsave, 1 << i);
+
+                       offset = xstate_offsets[i];
+                       size = xstate_sizes[i];
+
+                       memcpy(dst, kbuf + offset, size);
+               }
+       }
+
+       if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
+               offset = offsetof(struct fxregs_state, mxcsr);
+               size = MXCSR_AND_FLAGS_SIZE;
+               memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
        }
 
        /*
-        * Reject if the user sets any disabled or supervisor features:
+        * The state that came in from userspace was user-state only.
+        * Mask all the user states out of 'xfeatures':
+        */
+       xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
+
+       /*
+        * Add back in the features that came in from userspace:
         */
-       allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR;
+       xsave->header.xfeatures |= hdr.xfeatures;
 
-       if (xfeatures & ~allowed_features)
+       return 0;
+}
+
+/*
+ * Convert from a ptrace or sigreturn standard-format user-space buffer to
+ * kernel XSAVES format and copy to the target thread. This is called from
+ * xstateregs_set(), as well as potentially from the sigreturn() and
+ * rt_sigreturn() system calls.
+ */
+int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
+{
+       unsigned int offset, size;
+       int i;
+       struct xstate_header hdr;
+
+       offset = offsetof(struct xregs_state, header);
+       size = sizeof(hdr);
+
+       if (__copy_from_user(&hdr, ubuf + offset, size))
+               return -EFAULT;
+
+       if (validate_xstate_header(&hdr))
                return -EINVAL;
 
        for (i = 0; i < XFEATURE_MAX; i++) {
                u64 mask = ((u64)1 << i);
 
-               if (xfeatures & mask) {
+               if (hdr.xfeatures & mask) {
                        void *dst = __raw_xsave_addr(xsave, 1 << i);
 
                        offset = xstate_offsets[i];
                        size = xstate_sizes[i];
 
-                       if (kbuf) {
-                               memcpy(dst, kbuf + offset, size);
-                       } else {
-                               if (__copy_from_user(dst, ubuf + offset, size))
-                                       return -EFAULT;
-                       }
+                       if (__copy_from_user(dst, ubuf + offset, size))
+                               return -EFAULT;
                }
        }
 
+       if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
+               offset = offsetof(struct fxregs_state, mxcsr);
+               size = MXCSR_AND_FLAGS_SIZE;
+               if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
+                       return -EFAULT;
+       }
+
        /*
         * The state that came in from userspace was user-state only.
         * Mask all the user states out of 'xfeatures':
@@ -1076,7 +1238,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
        /*
         * Add back in the features that came in from userspace:
         */
-       xsave->header.xfeatures |= xfeatures;
+       xsave->header.xfeatures |= hdr.xfeatures;
 
        return 0;
 }
index cf2ce063f65aff6166d51aa74e56d1ebdd22bb41..2902ca4d5993e4194db0959c223a6d2d4dd3b98c 100644 (file)
@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void)
 
 asmlinkage __visible void __init i386_start_kernel(void)
 {
-       cr4_init_shadow();
-
+       /* Make sure IDT is set up before any exception happens */
        idt_setup_early_handler();
 
+       cr4_init_shadow();
+
        sanitize_boot_params(&boot_params);
 
        x86_early_init_platform_quirks();
index 1f38d9a4d9deaf707af2b7e658bd9e9022ba8d75..d4eb450144fdb42c4f667b9f5b1a69d91aa78a2e 100644 (file)
@@ -64,7 +64,7 @@ static void call_on_stack(void *func, void *stack)
 
 static inline void *current_stack(void)
 {
-       return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
+       return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
 }
 
 static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
@@ -88,7 +88,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
 
        /* Save the next esp at the bottom of the stack */
        prev_esp = (u32 *)irqstk;
-       *prev_esp = current_stack_pointer();
+       *prev_esp = current_stack_pointer;
 
        if (unlikely(overflow))
                call_on_stack(print_stack_overflow, isp);
@@ -139,7 +139,7 @@ void do_softirq_own_stack(void)
 
        /* Push the previous esp onto the stack */
        prev_esp = (u32 *)irqstk;
-       *prev_esp = current_stack_pointer();
+       *prev_esp = current_stack_pointer;
 
        call_on_stack(__do_softirq, isp);
 }
index db2182d63ed0c40ae9057ea100314d0a7c7de8fb..3fc0f9a794cbdecb3b8749766af6255ec156bd71 100644 (file)
@@ -3,6 +3,15 @@
 
 /* Kprobes and Optprobes common header */
 
+#include <asm/asm.h>
+
+#ifdef CONFIG_FRAME_POINTER
+# define SAVE_RBP_STRING "     push %" _ASM_BP "\n" \
+                        "      mov  %" _ASM_SP ", %" _ASM_BP "\n"
+#else
+# define SAVE_RBP_STRING "     push %" _ASM_BP "\n"
+#endif
+
 #ifdef CONFIG_X86_64
 #define SAVE_REGS_STRING                       \
        /* Skip cs, ip, orig_ax. */             \
@@ -17,7 +26,7 @@
        "       pushq %r10\n"                   \
        "       pushq %r11\n"                   \
        "       pushq %rbx\n"                   \
-       "       pushq %rbp\n"                   \
+       SAVE_RBP_STRING                         \
        "       pushq %r12\n"                   \
        "       pushq %r13\n"                   \
        "       pushq %r14\n"                   \
@@ -48,7 +57,7 @@
        "       pushl %es\n"                    \
        "       pushl %ds\n"                    \
        "       pushl %eax\n"                   \
-       "       pushl %ebp\n"                   \
+       SAVE_RBP_STRING                         \
        "       pushl %edi\n"                   \
        "       pushl %esi\n"                   \
        "       pushl %edx\n"                   \
index f0153714ddac6b2305645ef5c0e35fd3a6c9fb2c..0742491cbb734d29e1be790d890aeb5271ea6eb4 100644 (file)
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
         * raw stack chunk with redzones:
         */
        __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
-       regs->flags &= ~X86_EFLAGS_IF;
-       trace_hardirqs_off();
        regs->ip = (unsigned long)(jp->entry);
 
        /*
index 4b0592ca9e47b332d0ce67f8bcf5f555653587b2..8c1cc08f514f4362bdaefa933fda3cd3769b04f9 100644 (file)
@@ -299,7 +299,7 @@ static int __init create_setup_data_nodes(struct kobject *parent)
        return 0;
 
 out_clean_nodes:
-       for (j = i - 1; j > 0; j--)
+       for (j = i - 1; j >= 0; j--)
                cleanup_setup_data_node(*(kobjp + j));
        kfree(kobjp);
 out_setup_data_kobj:
index aa60a08b65b1090392b542ec7dc642e9827b7606..8bb9594d076166ee0f6bd4f70350fe3ecf8c3d8b 100644 (file)
@@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
        return NULL;
 }
 
-void kvm_async_pf_task_wait(u32 token)
+/*
+ * @interrupt_kernel: Is this called from a routine which interrupts the kernel
+ *                   (other than user space)?
+ */
+void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
 {
        u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
        struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
@@ -140,7 +144,10 @@ void kvm_async_pf_task_wait(u32 token)
 
        n.token = token;
        n.cpu = smp_processor_id();
-       n.halted = is_idle_task(current) || preempt_count() > 1;
+       n.halted = is_idle_task(current) ||
+                  (IS_ENABLED(CONFIG_PREEMPT_COUNT)
+                   ? preempt_count() > 1 || rcu_preempt_depth()
+                   : interrupt_kernel);
        init_swait_queue_head(&n.wq);
        hlist_add_head(&n.link, &b->list);
        raw_spin_unlock(&b->lock);
@@ -268,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                /* page is swapped out by the host. */
                prev_state = exception_enter();
-               kvm_async_pf_task_wait((u32)read_cr2());
+               kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
                exception_exit(prev_state);
                break;
        case KVM_PV_REASON_PAGE_READY:
index 54180fa6f66fa8fe04c7528f054d642db522b617..add33f600531d7ca01c41d79be31383a49464f5e 100644 (file)
@@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
        load_cr3(initial_page_table);
 #else
        write_cr3(real_mode_header->trampoline_pgd);
+
+       /* Exiting long mode will fail if CR4.PCIDE is set. */
+       if (static_cpu_has(X86_FEATURE_PCID))
+               cr4_clear_bits(X86_CR4_PCIDE);
 #endif
 
        /* Jump to the identity-mapped low memory code */
index e04442345fc0977cf73f2573f77b3df71310f0a8..4e188fda59612ed70b98342e8580cd1f311ed141 100644 (file)
@@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
                sp = (unsigned long) ka->sa.sa_restorer;
        }
 
-       if (fpu->fpstate_active) {
+       if (fpu->initialized) {
                sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
                                          &buf_fx, &math_size);
                *fpstate = (void __user *)sp;
@@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
                return (void __user *)-1L;
 
        /* save i387 and extended state */
-       if (fpu->fpstate_active &&
+       if (fpu->initialized &&
            copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
                return (void __user *)-1L;
 
@@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                /*
                 * Ensure the signal handler starts with the new fpu state.
                 */
-               if (fpu->fpstate_active)
+               if (fpu->initialized)
                        fpu__clear(fpu);
        }
        signal_setup_done(failed, ksig, stepping);
index 0854ff1692745adf4831e2deea0cb203977847d9..ad59edd84de70cfb978b8c0bc2ac38b892418b71 100644 (file)
@@ -232,12 +232,6 @@ static void notrace start_secondary(void *unused)
         */
        if (boot_cpu_has(X86_FEATURE_PCID))
                __write_cr4(__read_cr4() | X86_CR4_PCIDE);
-       cpu_init();
-       x86_cpuinit.early_percpu_clock_init();
-       preempt_disable();
-       smp_callin();
-
-       enable_start_cpu0 = 0;
 
 #ifdef CONFIG_X86_32
        /* switch away from the initial page table */
@@ -245,6 +239,13 @@ static void notrace start_secondary(void *unused)
        __flush_tlb_all();
 #endif
 
+       cpu_init();
+       x86_cpuinit.early_percpu_clock_init();
+       preempt_disable();
+       smp_callin();
+
+       enable_start_cpu0 = 0;
+
        /* otherwise gcc will move up smp_processor_id before the cpu_init */
        barrier();
        /*
index 34ea3651362ef8383fb456f24125ec4881f42668..67db4f43309ecadc86f4d7e95c6a0db0650a0d18 100644 (file)
@@ -142,7 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
         * from double_fault.
         */
        BUG_ON((unsigned long)(current_top_of_stack() -
-                              current_stack_pointer()) >= THREAD_SIZE);
+                              current_stack_pointer) >= THREAD_SIZE);
 
        preempt_enable_no_resched();
 }
index d145a0b1f529877b67e16992ca4c3830869f9950..3dc26f95d46e8a1ea439dba8ae144bae0dc9444d 100644 (file)
@@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
                        state->stack_info.type, state->stack_info.next_sp,
                        state->stack_mask, state->graph_idx);
 
-       for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+       for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
+            sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
                if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
                        break;
 
@@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
  * This determines if the frame pointer actually contains an encoded pointer to
  * pt_regs on the stack.  See ENCODE_FRAME_POINTER.
  */
+#ifdef CONFIG_X86_64
 static struct pt_regs *decode_frame_pointer(unsigned long *bp)
 {
        unsigned long regs = (unsigned long)bp;
@@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
 
        return (struct pt_regs *)(regs & ~0x1);
 }
+#else
+static struct pt_regs *decode_frame_pointer(unsigned long *bp)
+{
+       unsigned long regs = (unsigned long)bp;
+
+       if (regs & 0x80000000)
+               return NULL;
+
+       return (struct pt_regs *)(regs | 0x80000000);
+}
+#endif
+
+#ifdef CONFIG_X86_32
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
+#else
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
+#endif
 
 static bool update_stack_state(struct unwind_state *state,
                               unsigned long *next_bp)
@@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
        regs = decode_frame_pointer(next_bp);
        if (regs) {
                frame = (unsigned long *)regs;
-               len = regs_size(regs);
+               len = KERNEL_REGS_SIZE;
                state->got_irq = true;
        } else {
                frame = next_bp;
@@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
            frame < prev_frame_end)
                return false;
 
+       /*
+        * On 32-bit with user mode regs, make sure the last two regs are safe
+        * to access:
+        */
+       if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
+           !on_stack(info, frame, len + 2*sizeof(long)))
+               return false;
+
        /* Move state to the next frame: */
        if (regs) {
                state->regs = regs;
@@ -328,6 +355,13 @@ bad_address:
            state->regs->sp < (unsigned long)task_pt_regs(state->task))
                goto the_end;
 
+       /*
+        * There are some known frame pointer issues on 32-bit.  Disable
+        * unwinder warnings on 32-bit until it gets objtool support.
+        */
+       if (IS_ENABLED(CONFIG_X86_32))
+               goto the_end;
+
        if (state->regs) {
                printk_deferred_once(KERN_WARNING
                        "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
index 570b70d3f604a12ebb06a77c6290ba0d72824ee5..b95007e7c1b305e24ee63e728e003d53ff7a5c31 100644 (file)
@@ -86,8 +86,8 @@ static struct orc_entry *orc_find(unsigned long ip)
                idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
 
                if (unlikely((idx >= lookup_num_blocks-1))) {
-                       orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n",
-                                idx, lookup_num_blocks, ip);
+                       orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
+                                idx, lookup_num_blocks, (void *)ip);
                        return NULL;
                }
 
@@ -96,8 +96,8 @@ static struct orc_entry *orc_find(unsigned long ip)
 
                if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
                             (__start_orc_unwind + stop > __stop_orc_unwind))) {
-                       orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n",
-                                idx, lookup_num_blocks, start, stop, ip);
+                       orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
+                                idx, lookup_num_blocks, start, stop, (void *)ip);
                        return NULL;
                }
 
@@ -373,7 +373,7 @@ bool unwind_next_frame(struct unwind_state *state)
 
        case ORC_REG_R10:
                if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg R10 at ip %p\n",
+                       orc_warn("missing regs for base reg R10 at ip %pB\n",
                                 (void *)state->ip);
                        goto done;
                }
@@ -382,7 +382,7 @@ bool unwind_next_frame(struct unwind_state *state)
 
        case ORC_REG_R13:
                if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg R13 at ip %p\n",
+                       orc_warn("missing regs for base reg R13 at ip %pB\n",
                                 (void *)state->ip);
                        goto done;
                }
@@ -391,7 +391,7 @@ bool unwind_next_frame(struct unwind_state *state)
 
        case ORC_REG_DI:
                if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg DI at ip %p\n",
+                       orc_warn("missing regs for base reg DI at ip %pB\n",
                                 (void *)state->ip);
                        goto done;
                }
@@ -400,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state)
 
        case ORC_REG_DX:
                if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg DX at ip %p\n",
+                       orc_warn("missing regs for base reg DX at ip %pB\n",
                                 (void *)state->ip);
                        goto done;
                }
@@ -408,7 +408,7 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        default:
-               orc_warn("unknown SP base reg %d for ip %p\n",
+               orc_warn("unknown SP base reg %d for ip %pB\n",
                         orc->sp_reg, (void *)state->ip);
                goto done;
        }
@@ -436,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
 
        case ORC_TYPE_REGS:
                if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
-                       orc_warn("can't dereference registers at %p for ip %p\n",
+                       orc_warn("can't dereference registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
                }
@@ -448,7 +448,7 @@ bool unwind_next_frame(struct unwind_state *state)
 
        case ORC_TYPE_REGS_IRET:
                if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
-                       orc_warn("can't dereference iret registers at %p for ip %p\n",
+                       orc_warn("can't dereference iret registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
                }
@@ -465,7 +465,8 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        default:
-               orc_warn("unknown .orc_unwind entry type %d\n", orc->type);
+               orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
+                        orc->type, (void *)orig_ip);
                break;
        }
 
@@ -487,7 +488,7 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        default:
-               orc_warn("unknown BP base reg %d for ip %p\n",
+               orc_warn("unknown BP base reg %d for ip %pB\n",
                         orc->bp_reg, (void *)orig_ip);
                goto done;
        }
@@ -496,7 +497,7 @@ bool unwind_next_frame(struct unwind_state *state)
        if (state->stack_info.type == prev_type &&
            on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
            state->sp <= prev_sp) {
-               orc_warn("stack going in the wrong direction? ip=%p\n",
+               orc_warn("stack going in the wrong direction? ip=%pB\n",
                         (void *)orig_ip);
                goto done;
        }
index 3ea624452f9327d252dda724100bd6e97f8c6e0c..3c48bc8bf08c7bc6afe896a2dae21d4f22d8f58e 100644 (file)
@@ -23,6 +23,7 @@ config KVM
        depends on HIGH_RES_TIMERS
        # for TASKSTATS/TASK_DELAY_ACCT:
        depends on NET && MULTIUSER
+       depends on X86_LOCAL_APIC
        select PREEMPT_NOTIFIERS
        select MMU_NOTIFIER
        select ANON_INODES
index 16bf6655aa858e2815135ada11a462329a0df386..d90cdc77e077354f1407235e6b73f2fda21c430c 100644 (file)
@@ -425,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
        #op " %al \n\t" \
        FOP_RET
 
-asm(".global kvm_fastop_exception \n"
-    "kvm_fastop_exception: xor %esi, %esi; ret");
+asm(".pushsection .fixup, \"ax\"\n"
+    ".global kvm_fastop_exception \n"
+    "kvm_fastop_exception: xor %esi, %esi; ret\n"
+    ".popsection");
 
 FOP_START(setcc)
 FOP_SETCC(seto)
@@ -4102,10 +4104,12 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
                ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
                if (efer & EFER_LMA) {
                        u64 maxphyaddr;
-                       u32 eax = 0x80000008;
+                       u32 eax, ebx, ecx, edx;
 
-                       if (ctxt->ops->get_cpuid(ctxt, &eax, NULL, NULL,
-                                                NULL, false))
+                       eax = 0x80000008;
+                       ecx = 0;
+                       if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
+                                                &edx, false))
                                maxphyaddr = eax & 0xff;
                        else
                                maxphyaddr = 36;
@@ -5296,7 +5300,6 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
 
 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
 {
-       register void *__sp asm(_ASM_SP);
        ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
 
        if (!(ctxt->d & ByteOp))
@@ -5304,7 +5307,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
 
        asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
            : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
-             [fastop]"+S"(fop), "+r"(__sp)
+             [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
            : "c"(ctxt->src2.val));
 
        ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
index eca30c1eb1d97cd367c6b2b76396991b79328b1a..7a69cf053711197df9a0f2ec284ef5a436c42514 100644 (file)
@@ -3837,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                vcpu->arch.apf.host_apf_reason = 0;
                local_irq_disable();
-               kvm_async_pf_task_wait(fault_address);
+               kvm_async_pf_task_wait(fault_address, 0);
                local_irq_enable();
                break;
        case KVM_PV_REASON_PAGE_READY:
@@ -3973,13 +3973,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
 static inline bool is_last_gpte(struct kvm_mmu *mmu,
                                unsigned level, unsigned gpte)
 {
-       /*
-        * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
-        * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
-        * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
-        */
-       gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
-
        /*
         * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
         * If it is clear, there are no large pages at this level, so clear
@@ -3987,6 +3980,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
         */
        gpte &= level - mmu->last_nonleaf_level;
 
+       /*
+        * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
+        * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+        * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+        */
+       gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
+
        return gpte & PT_PAGE_SIZE_MASK;
 }
 
@@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 
        update_permission_bitmask(vcpu, context, true);
        update_pkru_bitmask(vcpu, context, true);
+       update_last_nonleaf_level(vcpu, context);
        reset_rsvds_bits_mask_ept(vcpu, context, execonly);
        reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
index 86b68dc5a6493152b9032f1fda970b31ca8d755a..f18d1f8d332b8a2b0512c1f6d8d85fe4ca303ef3 100644 (file)
@@ -334,10 +334,11 @@ retry_walk:
                --walker->level;
 
                index = PT_INDEX(addr, walker->level);
-
                table_gfn = gpte_to_gfn(pte);
                offset    = index * sizeof(pt_element_t);
                pte_gpa   = gfn_to_gpa(table_gfn) + offset;
+
+               BUG_ON(walker->level < 1);
                walker->table_gfn[walker->level - 1] = table_gfn;
                walker->pte_gpa[walker->level - 1] = pte_gpa;
 
index 06c0c6d0541e9bf95eabbcaa8d20c8ec45f19496..95a01609d7eea13633a6d53383a6e2859463c3e5 100644 (file)
@@ -200,6 +200,8 @@ struct loaded_vmcs {
        int cpu;
        bool launched;
        bool nmi_known_unmasked;
+       unsigned long vmcs_host_cr3;    /* May not match real cr3 */
+       unsigned long vmcs_host_cr4;    /* May not match real cr4 */
        struct list_head loaded_vmcss_on_cpu_link;
 };
 
@@ -600,8 +602,6 @@ struct vcpu_vmx {
                int           gs_ldt_reload_needed;
                int           fs_reload_needed;
                u64           msr_host_bndcfgs;
-               unsigned long vmcs_host_cr3;    /* May not match real cr3 */
-               unsigned long vmcs_host_cr4;    /* May not match real cr4 */
        } host_state;
        struct {
                int vm86_active;
@@ -2202,46 +2202,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
        struct pi_desc old, new;
        unsigned int dest;
 
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       /*
+        * In case of hot-plug or hot-unplug, we may have to undo
+        * vmx_vcpu_pi_put even if there is no assigned device.  And we
+        * always keep PI.NDST up to date for simplicity: it makes the
+        * code easier, and CPU migration is not a fast path.
+        */
+       if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
+               return;
+
+       /*
+        * First handle the simple case where no cmpxchg is necessary; just
+        * allow posting non-urgent interrupts.
+        *
+        * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
+        * PI.NDST: pi_post_block will do it for us and the wakeup_handler
+        * expects the VCPU to be on the blocked_vcpu_list that matches
+        * PI.NDST.
+        */
+       if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
+           vcpu->cpu == cpu) {
+               pi_clear_sn(pi_desc);
                return;
+       }
 
+       /* The full case.  */
        do {
                old.control = new.control = pi_desc->control;
 
-               /*
-                * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
-                * are two possible cases:
-                * 1. After running 'pre_block', context switch
-                *    happened. For this case, 'sn' was set in
-                *    vmx_vcpu_put(), so we need to clear it here.
-                * 2. After running 'pre_block', we were blocked,
-                *    and woken up by some other guy. For this case,
-                *    we don't need to do anything, 'pi_post_block'
-                *    will do everything for us. However, we cannot
-                *    check whether it is case #1 or case #2 here
-                *    (maybe, not needed), so we also clear sn here,
-                *    I think it is not a big deal.
-                */
-               if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
-                       if (vcpu->cpu != cpu) {
-                               dest = cpu_physical_id(cpu);
-
-                               if (x2apic_enabled())
-                                       new.ndst = dest;
-                               else
-                                       new.ndst = (dest << 8) & 0xFF00;
-                       }
+               dest = cpu_physical_id(cpu);
 
-                       /* set 'NV' to 'notification vector' */
-                       new.nv = POSTED_INTR_VECTOR;
-               }
+               if (x2apic_enabled())
+                       new.ndst = dest;
+               else
+                       new.ndst = (dest << 8) & 0xFF00;
 
-               /* Allow posting non-urgent interrupts */
                new.sn = 0;
-       } while (cmpxchg(&pi_desc->control, old.control,
-                       new.control) != old.control);
+       } while (cmpxchg64(&pi_desc->control, old.control,
+                          new.control) != old.control);
 }
 
 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
@@ -5077,21 +5075,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
        int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
 
        if (vcpu->mode == IN_GUEST_MODE) {
-               struct vcpu_vmx *vmx = to_vmx(vcpu);
-
                /*
-                * Currently, we don't support urgent interrupt,
-                * all interrupts are recognized as non-urgent
-                * interrupt, so we cannot post interrupts when
-                * 'SN' is set.
+                * The vector of interrupt to be delivered to vcpu had
+                * been set in PIR before this function.
+                *
+                * Following cases will be reached in this block, and
+                * we always send a notification event in all cases as
+                * explained below.
+                *
+                * Case 1: vcpu keeps in non-root mode. Sending a
+                * notification event posts the interrupt to vcpu.
                 *
-                * If the vcpu is in guest mode, it means it is
-                * running instead of being scheduled out and
-                * waiting in the run queue, and that's the only
-                * case when 'SN' is set currently, warning if
-                * 'SN' is set.
+                * Case 2: vcpu exits to root mode and is still
+                * runnable. PIR will be synced to vIRR before the
+                * next vcpu entry. Sending a notification event in
+                * this case has no effect, as vcpu is not in root
+                * mode.
+                *
+                * Case 3: vcpu exits to root mode and is blocked.
+                * vcpu_block() has already synced PIR to vIRR and
+                * never blocks vcpu if vIRR is not cleared. Therefore,
+                * a blocked vcpu here does not wait for any requested
+                * interrupts in PIR, and sending a notification event
+                * which has no effect is safe here.
                 */
-               WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
 
                apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
                return true;
@@ -5169,12 +5176,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
         */
        cr3 = __read_cr3();
        vmcs_writel(HOST_CR3, cr3);             /* 22.2.3  FIXME: shadow tables */
-       vmx->host_state.vmcs_host_cr3 = cr3;
+       vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
 
        /* Save the most likely value for this task's CR4 in the VMCS. */
        cr4 = cr4_read_shadow();
        vmcs_writel(HOST_CR4, cr4);                     /* 22.2.3, 22.2.5 */
-       vmx->host_state.vmcs_host_cr4 = cr4;
+       vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
 
        vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
 #ifdef CONFIG_X86_64
@@ -9036,7 +9043,6 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 {
        u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-       register void *__sp asm(_ASM_SP);
 
        if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
                        == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
@@ -9065,7 +9071,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 #ifdef CONFIG_X86_64
                        [sp]"=&r"(tmp),
 #endif
-                       "+r"(__sp)
+                       ASM_CALL_CONSTRAINT
                        :
                        [entry]"r"(entry),
                        [ss]"i"(__KERNEL_DS),
@@ -9265,15 +9271,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
 
        cr3 = __get_current_cr3_fast();
-       if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
+       if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
                vmcs_writel(HOST_CR3, cr3);
-               vmx->host_state.vmcs_host_cr3 = cr3;
+               vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
        }
 
        cr4 = cr4_read_shadow();
-       if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
+       if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
                vmcs_writel(HOST_CR4, cr4);
-               vmx->host_state.vmcs_host_cr4 = cr4;
+               vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
        }
 
        /* When single-stepping over STI and MOV SS, we must clear the
@@ -9583,6 +9589,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
        vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
 
+       /*
+        * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
+        * or POSTED_INTR_WAKEUP_VECTOR.
+        */
+       vmx->pi_desc.nv = POSTED_INTR_VECTOR;
+       vmx->pi_desc.sn = 1;
+
        return &vmx->vcpu;
 
 free_vmcs:
@@ -9831,7 +9844,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
 
        WARN_ON(!is_guest_mode(vcpu));
 
-       if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) {
+       if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
+               !to_vmx(vcpu)->nested.nested_run_pending) {
                vmcs12->vm_exit_intr_error_code = fault->error_code;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
                                  PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
@@ -11283,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 
        /* Same as above - no reason to call set_cr4_guest_host_mask().  */
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
-       kvm_set_cr4(vcpu, vmcs12->host_cr4);
+       vmx_set_cr4(vcpu, vmcs12->host_cr4);
 
        nested_ept_uninit_mmu_context(vcpu);
 
@@ -11696,6 +11710,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
        kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
 }
 
+static void __pi_post_block(struct kvm_vcpu *vcpu)
+{
+       struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+       struct pi_desc old, new;
+       unsigned int dest;
+
+       do {
+               old.control = new.control = pi_desc->control;
+               WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
+                    "Wakeup handler not enabled while the VCPU is blocked\n");
+
+               dest = cpu_physical_id(vcpu->cpu);
+
+               if (x2apic_enabled())
+                       new.ndst = dest;
+               else
+                       new.ndst = (dest << 8) & 0xFF00;
+
+               /* set 'NV' to 'notification vector' */
+               new.nv = POSTED_INTR_VECTOR;
+       } while (cmpxchg64(&pi_desc->control, old.control,
+                          new.control) != old.control);
+
+       if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
+               spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+               list_del(&vcpu->blocked_vcpu_list);
+               spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+               vcpu->pre_pcpu = -1;
+       }
+}
+
 /*
  * This routine does the following things for vCPU which is going
  * to be blocked if VT-d PI is enabled.
@@ -11711,7 +11756,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
  */
 static int pi_pre_block(struct kvm_vcpu *vcpu)
 {
-       unsigned long flags;
        unsigned int dest;
        struct pi_desc old, new;
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@@ -11721,34 +11765,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
                !kvm_vcpu_apicv_active(vcpu))
                return 0;
 
-       vcpu->pre_pcpu = vcpu->cpu;
-       spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
-                         vcpu->pre_pcpu), flags);
-       list_add_tail(&vcpu->blocked_vcpu_list,
-                     &per_cpu(blocked_vcpu_on_cpu,
-                     vcpu->pre_pcpu));
-       spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
-                              vcpu->pre_pcpu), flags);
+       WARN_ON(irqs_disabled());
+       local_irq_disable();
+       if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
+               vcpu->pre_pcpu = vcpu->cpu;
+               spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+               list_add_tail(&vcpu->blocked_vcpu_list,
+                             &per_cpu(blocked_vcpu_on_cpu,
+                                      vcpu->pre_pcpu));
+               spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+       }
 
        do {
                old.control = new.control = pi_desc->control;
 
-               /*
-                * We should not block the vCPU if
-                * an interrupt is posted for it.
-                */
-               if (pi_test_on(pi_desc) == 1) {
-                       spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
-                                         vcpu->pre_pcpu), flags);
-                       list_del(&vcpu->blocked_vcpu_list);
-                       spin_unlock_irqrestore(
-                                       &per_cpu(blocked_vcpu_on_cpu_lock,
-                                       vcpu->pre_pcpu), flags);
-                       vcpu->pre_pcpu = -1;
-
-                       return 1;
-               }
-
                WARN((pi_desc->sn == 1),
                     "Warning: SN field of posted-interrupts "
                     "is set before blocking\n");
@@ -11770,10 +11800,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
 
                /* set 'NV' to 'wakeup vector' */
                new.nv = POSTED_INTR_WAKEUP_VECTOR;
-       } while (cmpxchg(&pi_desc->control, old.control,
-                       new.control) != old.control);
+       } while (cmpxchg64(&pi_desc->control, old.control,
+                          new.control) != old.control);
 
-       return 0;
+       /* We should not block the vCPU if an interrupt is posted for it.  */
+       if (pi_test_on(pi_desc) == 1)
+               __pi_post_block(vcpu);
+
+       local_irq_enable();
+       return (vcpu->pre_pcpu == -1);
 }
 
 static int vmx_pre_block(struct kvm_vcpu *vcpu)
@@ -11789,44 +11824,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
 
 static void pi_post_block(struct kvm_vcpu *vcpu)
 {
-       struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
-       struct pi_desc old, new;
-       unsigned int dest;
-       unsigned long flags;
-
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       if (vcpu->pre_pcpu == -1)
                return;
 
-       do {
-               old.control = new.control = pi_desc->control;
-
-               dest = cpu_physical_id(vcpu->cpu);
-
-               if (x2apic_enabled())
-                       new.ndst = dest;
-               else
-                       new.ndst = (dest << 8) & 0xFF00;
-
-               /* Allow posting non-urgent interrupts */
-               new.sn = 0;
-
-               /* set 'NV' to 'notification vector' */
-               new.nv = POSTED_INTR_VECTOR;
-       } while (cmpxchg(&pi_desc->control, old.control,
-                       new.control) != old.control);
-
-       if(vcpu->pre_pcpu != -1) {
-               spin_lock_irqsave(
-                       &per_cpu(blocked_vcpu_on_cpu_lock,
-                       vcpu->pre_pcpu), flags);
-               list_del(&vcpu->blocked_vcpu_list);
-               spin_unlock_irqrestore(
-                       &per_cpu(blocked_vcpu_on_cpu_lock,
-                       vcpu->pre_pcpu), flags);
-               vcpu->pre_pcpu = -1;
-       }
+       WARN_ON(irqs_disabled());
+       local_irq_disable();
+       __pi_post_block(vcpu);
+       local_irq_enable();
 }
 
 static void vmx_post_block(struct kvm_vcpu *vcpu)
@@ -11911,12 +11915,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
 
                if (set)
                        ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
-               else {
-                       /* suppress notification event before unposting */
-                       pi_set_sn(vcpu_to_pi_desc(vcpu));
+               else
                        ret = irq_set_vcpu_affinity(host_irq, NULL);
-                       pi_clear_sn(vcpu_to_pi_desc(vcpu));
-               }
 
                if (ret < 0) {
                        printk(KERN_INFO "%s: failed to update PI IRTE\n",
index cd17b7d9a1076c1d28904bc4cd1ea06af5e0f2c0..03869eb7fcd67b64e54dcb67acd559a70fd61139 100644 (file)
@@ -7225,7 +7225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        int r;
        sigset_t sigsaved;
 
-       fpu__activate_curr(fpu);
+       fpu__initialize(fpu);
 
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
index d4a7df2205b8de2576bc7cdbf8455f2fa9987b66..220638a4cb94ea3065e1aaca9caaad7d447c8560 100644 (file)
@@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info)
        struct desc_struct code_descriptor;
        struct fpu *fpu = &current->thread.fpu;
 
-       fpu__activate_curr(fpu);
+       fpu__initialize(fpu);
 
 #ifdef RE_ENTRANT_CHECKING
        if (emulating) {
index 72bf8c01c6e3a58254cc915aded88eea8146f41a..e1f095884386d40f4a8dcc6a8500be6d67fd6dbd 100644 (file)
@@ -1,5 +1,12 @@
-# Kernel does not boot with instrumentation of tlb.c.
-KCOV_INSTRUMENT_tlb.o  := n
+# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
+KCOV_INSTRUMENT_tlb.o          := n
+KCOV_INSTRUMENT_mem_encrypt.o  := n
+
+KASAN_SANITIZE_mem_encrypt.o   := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_mem_encrypt.o    = -pg
+endif
 
 obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
            pat.o pgtable.o physaddr.o setup_nx.o tlb.o
index c076f710de4cb4e283125f706226fc059fbebb5b..c3521e2be39610c3d932c34126fadd5203f28958 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/uaccess.h>
 #include <linux/sched/debug.h>
 
+#include <asm/fpu/internal.h>
 #include <asm/traps.h>
 #include <asm/kdebug.h>
 
@@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
 }
 EXPORT_SYMBOL_GPL(ex_handler_refcount);
 
+/*
+ * Handler for when we fail to restore a task's FPU state.  We should never get
+ * here because the FPU state of a task using the FPU (task->thread.fpu.state)
+ * should always be valid.  However, past bugs have allowed userspace to set
+ * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
+ * These caused XRSTOR to fail when switching to the task, leaking the FPU
+ * registers of the task previously executing on the CPU.  Mitigate this class
+ * of vulnerability by restoring from the initial state (essentially, zeroing
+ * out all the FPU registers) if we can't restore from the task's FPU state.
+ */
+bool ex_handler_fprestore(const struct exception_table_entry *fixup,
+                         struct pt_regs *regs, int trapnr)
+{
+       regs->ip = ex_fixup_addr(fixup);
+
+       WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
+                 (void *)instruction_pointer(regs));
+
+       __copy_kernel_to_fpregs(&init_fpstate, -1);
+       return true;
+}
+EXPORT_SYMBOL_GPL(ex_handler_fprestore);
+
 bool ex_handler_ext(const struct exception_table_entry *fixup,
                   struct pt_regs *regs, int trapnr)
 {
index b836a7274e123af88edd7b4f261da88609778be9..e2baeaa053a5b9feb76a1587fb4756786ffd76db 100644 (file)
@@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
  *          faulted on a pte with its pkey=4.
  */
-static void fill_sig_info_pkey(int si_code, siginfo_t *info,
-               struct vm_area_struct *vma)
+static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
 {
        /* This is effectively an #ifdef */
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
@@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
         * valid VMA, so we should never reach this without a
         * valid VMA.
         */
-       if (!vma) {
+       if (!pkey) {
                WARN_ONCE(1, "PKU fault with no VMA passed in");
                info->si_pkey = 0;
                return;
@@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
         * absolutely guranteed to be 100% accurate because of
         * the race explained above.
         */
-       info->si_pkey = vma_pkey(vma);
+       info->si_pkey = *pkey;
 }
 
 static void
 force_sig_info_fault(int si_signo, int si_code, unsigned long address,
-                    struct task_struct *tsk, struct vm_area_struct *vma,
-                    int fault)
+                    struct task_struct *tsk, u32 *pkey, int fault)
 {
        unsigned lsb = 0;
        siginfo_t info;
@@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
                lsb = PAGE_SHIFT;
        info.si_addr_lsb = lsb;
 
-       fill_sig_info_pkey(si_code, &info, vma);
+       fill_sig_info_pkey(si_code, &info, pkey);
 
        force_sig_info(si_signo, &info, tsk);
 }
@@ -762,8 +760,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
        struct task_struct *tsk = current;
        unsigned long flags;
        int sig;
-       /* No context means no VMA to pass down */
-       struct vm_area_struct *vma = NULL;
 
        /* Are we prepared to handle this kernel fault? */
        if (fixup_exception(regs, X86_TRAP_PF)) {
@@ -788,7 +784,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 
                        /* XXX: hwpoison faults will set the wrong code. */
                        force_sig_info_fault(signal, si_code, address,
-                                            tsk, vma, 0);
+                                            tsk, NULL, 0);
                }
 
                /*
@@ -806,7 +802,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
        if (is_vmalloc_addr((void *)address) &&
            (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
             address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
-               register void *__sp asm("rsp");
                unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
                /*
                 * We're likely to be running with very little stack space
@@ -821,7 +816,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
                asm volatile ("movq %[stack], %%rsp\n\t"
                              "call handle_stack_overflow\n\t"
                              "1: jmp 1b"
-                             : "+r" (__sp)
+                             : ASM_CALL_CONSTRAINT
                              : "D" ("kernel stack overflow (page fault)"),
                                "S" (regs), "d" (address),
                                [stack] "rm" (stack));
@@ -897,8 +892,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 
 static void
 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
-                      unsigned long address, struct vm_area_struct *vma,
-                      int si_code)
+                      unsigned long address, u32 *pkey, int si_code)
 {
        struct task_struct *tsk = current;
 
@@ -946,7 +940,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
                tsk->thread.error_code  = error_code;
                tsk->thread.trap_nr     = X86_TRAP_PF;
 
-               force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
+               force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
 
                return;
        }
@@ -959,9 +953,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 
 static noinline void
 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
-                    unsigned long address, struct vm_area_struct *vma)
+                    unsigned long address, u32 *pkey)
 {
-       __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
+       __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
 }
 
 static void
@@ -969,6 +963,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
           unsigned long address,  struct vm_area_struct *vma, int si_code)
 {
        struct mm_struct *mm = current->mm;
+       u32 pkey;
+
+       if (vma)
+               pkey = vma_pkey(vma);
 
        /*
         * Something tried to access memory that isn't in our memory map..
@@ -976,7 +974,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
         */
        up_read(&mm->mmap_sem);
 
-       __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
+       __bad_area_nosemaphore(regs, error_code, address,
+                              (vma) ? &pkey : NULL, si_code);
 }
 
 static noinline void
@@ -1019,7 +1018,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 
 static void
 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
-         struct vm_area_struct *vma, unsigned int fault)
+         u32 *pkey, unsigned int fault)
 {
        struct task_struct *tsk = current;
        int code = BUS_ADRERR;
@@ -1046,13 +1045,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
                code = BUS_MCEERR_AR;
        }
 #endif
-       force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
+       force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
 }
 
 static noinline void
 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
-              unsigned long address, struct vm_area_struct *vma,
-              unsigned int fault)
+              unsigned long address, u32 *pkey, unsigned int fault)
 {
        if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
                no_context(regs, error_code, address, 0, 0);
@@ -1076,9 +1074,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
        } else {
                if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
                             VM_FAULT_HWPOISON_LARGE))
-                       do_sigbus(regs, error_code, address, vma, fault);
+                       do_sigbus(regs, error_code, address, pkey, fault);
                else if (fault & VM_FAULT_SIGSEGV)
-                       bad_area_nosemaphore(regs, error_code, address, vma);
+                       bad_area_nosemaphore(regs, error_code, address, pkey);
                else
                        BUG();
        }
@@ -1268,6 +1266,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
        struct mm_struct *mm;
        int fault, major = 0;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+       u32 pkey;
 
        tsk = current;
        mm = tsk->mm;
@@ -1468,9 +1467,10 @@ good_area:
                return;
        }
 
+       pkey = vma_pkey(vma);
        up_read(&mm->mmap_sem);
        if (unlikely(fault & VM_FAULT_ERROR)) {
-               mm_fault_error(regs, error_code, address, vma, fault);
+               mm_fault_error(regs, error_code, address, &pkey, fault);
                return;
        }
 
index 3fcc8e01683bef96b219d65dbdd0315db1f60605..16c5f37933a2ae2d120402f1871af93872aebb07 100644 (file)
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
  */
 
+#define DISABLE_BRANCH_PROFILING
+
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <linux/mm.h>
index 2dab69a706ec062affe14eb7d9f10fd8538a9018..d7bc0eea20a5ed2fc8ec43ebc06429517cbb362b 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <asm/cpufeature.h>             /* boot_cpu_has, ...            */
 #include <asm/mmu_context.h>            /* vma_pkey()                   */
-#include <asm/fpu/internal.h>           /* fpregs_active()              */
 
 int __execute_only_pkey(struct mm_struct *mm)
 {
@@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm)
         */
        preempt_disable();
        if (!need_to_set_mm_pkey &&
-           fpregs_active() &&
+           current->thread.fpu.initialized &&
            !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
                preempt_enable();
                return execute_only_pkey;
index 1ab3821f9e2629df571544077d63be950361bc20..0f3d0cea4d00ca253ee5b46ed78158d460797a56 100644 (file)
@@ -30,6 +30,7 @@
 
 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
 
+
 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
                            u16 *new_asid, bool *need_flush)
 {
@@ -80,7 +81,7 @@ void leave_mm(int cpu)
                return;
 
        /* Warn if we're not lazy. */
-       WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
+       WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
 
        switch_mm(NULL, &init_mm, NULL);
 }
@@ -126,8 +127,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
         * isn't free.
         */
 #ifdef CONFIG_DEBUG_VM
-       if (WARN_ON_ONCE(__read_cr3() !=
-                        (__sme_pa(real_prev->pgd) | prev_asid))) {
+       if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
                /*
                 * If we were to BUG here, we'd be very likely to kill
                 * the system so hard that we don't see the call trace.
@@ -143,45 +143,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                __flush_tlb_all();
        }
 #endif
+       this_cpu_write(cpu_tlbstate.is_lazy, false);
 
        if (real_prev == next) {
-               VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
-                         next->context.ctx_id);
-
-               if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
-                       /*
-                        * There's nothing to do: we weren't lazy, and we
-                        * aren't changing our mm.  We don't need to flush
-                        * anything, nor do we need to update CR3, CR4, or
-                        * LDTR.
-                        */
-                       return;
-               }
-
-               /* Resume remote flushes and then read tlb_gen. */
-               cpumask_set_cpu(cpu, mm_cpumask(next));
-               next_tlb_gen = atomic64_read(&next->context.tlb_gen);
-
-               if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
-                   next_tlb_gen) {
-                       /*
-                        * Ideally, we'd have a flush_tlb() variant that
-                        * takes the known CR3 value as input.  This would
-                        * be faster on Xen PV and on hypothetical CPUs
-                        * on which INVPCID is fast.
-                        */
-                       this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
-                                      next_tlb_gen);
-                       write_cr3(__sme_pa(next->pgd) | prev_asid);
-                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
-                                       TLB_FLUSH_ALL);
-               }
+               VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+                          next->context.ctx_id);
 
                /*
-                * We just exited lazy mode, which means that CR4 and/or LDTR
-                * may be stale.  (Changes to the required CR4 and LDTR states
-                * are not reflected in tlb_gen.)
+                * We don't currently support having a real mm loaded without
+                * our cpu set in mm_cpumask().  We have all the bookkeeping
+                * in place to figure out whether we would need to flush
+                * if our cpu were cleared in mm_cpumask(), but we don't
+                * currently use it.
                 */
+               if (WARN_ON_ONCE(real_prev != &init_mm &&
+                                !cpumask_test_cpu(cpu, mm_cpumask(next))))
+                       cpumask_set_cpu(cpu, mm_cpumask(next));
+
+               return;
        } else {
                u16 new_asid;
                bool need_flush;
@@ -192,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                         * mapped in the new pgd, we'll double-fault.  Forcibly
                         * map it.
                         */
-                       unsigned int index = pgd_index(current_stack_pointer());
+                       unsigned int index = pgd_index(current_stack_pointer);
                        pgd_t *pgd = next->pgd + index;
 
                        if (unlikely(pgd_none(*pgd)))
@@ -200,10 +179,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                }
 
                /* Stop remote flushes for the previous mm */
-               if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
-                       cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
-
-               VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
+               VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
+                               real_prev != &init_mm);
+               cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
 
                /*
                 * Start remote flushes and then read tlb_gen.
@@ -216,12 +194,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                if (need_flush) {
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
-                       write_cr3(__sme_pa(next->pgd) | new_asid);
+                       write_cr3(build_cr3(next, new_asid));
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
                                        TLB_FLUSH_ALL);
                } else {
                        /* The new ASID is already up to date. */
-                       write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH);
+                       write_cr3(build_cr3_noflush(next, new_asid));
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
                }
 
@@ -233,6 +211,40 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
        switch_ldt(real_prev, next);
 }
 
+/*
+ * Please ignore the name of this function.  It should be called
+ * switch_to_kernel_thread().
+ *
+ * enter_lazy_tlb() is a hint from the scheduler that we are entering a
+ * kernel thread or other context without an mm.  Acceptable implementations
+ * include doing nothing whatsoever, switching to init_mm, or various clever
+ * lazy tricks to try to minimize TLB flushes.
+ *
+ * The scheduler reserves the right to call enter_lazy_tlb() several times
+ * in a row.  It will notify us that we're going back to a real mm by
+ * calling switch_mm_irqs_off().
+ */
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+       if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
+               return;
+
+       if (tlb_defer_switch_to_init_mm()) {
+               /*
+                * There's a significant optimization that may be possible
+                * here.  We have accurate enough TLB flush tracking that we
+                * don't need to maintain coherence of TLB per se when we're
+                * lazy.  We do, however, need to maintain coherence of
+                * paging-structure caches.  We could, in principle, leave our
+                * old mm loaded and only switch to init_mm when
+                * tlb_remove_page() happens.
+                */
+               this_cpu_write(cpu_tlbstate.is_lazy, true);
+       } else {
+               switch_mm(NULL, &init_mm, NULL);
+       }
+}
+
 /*
  * Call this when reinitializing a CPU.  It fixes the following potential
  * problems:
@@ -265,7 +277,7 @@ void initialize_tlbstate_and_flush(void)
                !(cr4_read_shadow() & X86_CR4_PCIDE));
 
        /* Force ASID 0 and force a TLB flush. */
-       write_cr3(cr3 & ~CR3_PCID_MASK);
+       write_cr3(build_cr3(mm, 0));
 
        /* Reinitialize tlbstate. */
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
@@ -304,16 +316,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
        /* This code cannot presently handle being reentered. */
        VM_WARN_ON(!irqs_disabled());
 
+       if (unlikely(loaded_mm == &init_mm))
+               return;
+
        VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
                   loaded_mm->context.ctx_id);
 
-       if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
+       if (this_cpu_read(cpu_tlbstate.is_lazy)) {
                /*
-                * We're in lazy mode -- don't flush.  We can get here on
-                * remote flushes due to races and on local flushes if a
-                * kernel thread coincidentally flushes the mm it's lazily
-                * still using.
+                * We're in lazy mode.  We need to at least flush our
+                * paging-structure cache to avoid speculatively reading
+                * garbage into our TLB.  Since switching to init_mm is barely
+                * slower than a minimal flush, just switch to init_mm.
                 */
+               switch_mm_irqs_off(NULL, &init_mm, NULL);
                return;
        }
 
index 8c9573660d51e6b0297e4464b3af2056d93b9f3c..0554e8aef4d54522393f78b8b7973f8dfac4750c 100644 (file)
@@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog)
        /* if (index >= array->map.max_entries)
         *   goto out;
         */
-       EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
+       EMIT2(0x89, 0xD2);                        /* mov edx, edx */
+       EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
              offsetof(struct bpf_array, map.max_entries));
-       EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
 #define OFFSET1 43 /* number of bytes to jump */
        EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
        label1 = cnt;
index 0e7ef69e853151207c985d5306927c09d335acdc..d669e9d890017770456abe458f1161eb2509c09e 100644 (file)
@@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
        int rc;
 
        rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
-                                      "x86/xen/hvm_guest:prepare",
+                                      "x86/xen/guest:prepare",
                                       cpu_up_prepare_cb, cpu_dead_cb);
        if (rc >= 0) {
                rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-                                              "x86/xen/hvm_guest:online",
+                                              "x86/xen/guest:online",
                                               xen_cpu_up_online, NULL);
                if (rc < 0)
                        cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
index 509f560bd0c6d4731cac96fc64296184e6818b9c..71495f1a86d72f7df1464ce3ffe805d880948bbd 100644 (file)
@@ -1238,21 +1238,16 @@ static void __init xen_pagetable_cleanhighmap(void)
         * from _brk_limit way up to the max_pfn_mapped (which is the end of
         * the ramdisk). We continue on, erasing PMD entries that point to page
         * tables - do note that they are accessible at this stage via __va.
-        * For good measure we also round up to the PMD - which means that if
+        * As Xen is aligning the memory end to a 4MB boundary, for good
+        * measure we also round up to PMD_SIZE * 2 - which means that if
         * anybody is using __ka address to the initial boot-stack - and try
         * to use it - they are going to crash. The xen_start_info has been
         * taken care of already in xen_setup_kernel_pagetable. */
        addr = xen_start_info->pt_base;
-       size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
+       size = xen_start_info->nr_pt_frames * PAGE_SIZE;
 
-       xen_cleanhighmap(addr, addr + size);
+       xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
        xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
-#ifdef DEBUG
-       /* This is superfluous and is not necessary, but you know what
-        * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
-        * anything at this stage. */
-       xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
-#endif
 }
 #endif
 
@@ -2220,7 +2215,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
  * not the first page table in the page table pool.
  * Iterate through the initial page tables to find the real page table base.
  */
-static phys_addr_t xen_find_pt_base(pmd_t *pmd)
+static phys_addr_t __init xen_find_pt_base(pmd_t *pmd)
 {
        phys_addr_t pt_base, paddr;
        unsigned pmdidx;
index 30ee8c608853d4fb4b238a01319589d38ce018b7..5b0027d4ecc05cff651baec243a612d8deea20bc 100644 (file)
@@ -208,11 +208,6 @@ struct mm_struct;
 /* Free all resources held by a thread. */
 #define release_thread(thread) do { } while(0)
 
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm)   do { } while(0)
-#define release_segments(mm)   do { } while(0)
-#define forget_segments()      do { } while (0)
-
 extern unsigned long get_wchan(struct task_struct *p);
 
 #define KSTK_EIP(tsk)          (task_pt_regs(tsk)->pc)
index b38e962fa83e774c5c9121bbbe277124d13dd793..101c2a9b548150cd3f7bb5b28a460ef82c9e4a75 100644 (file)
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
         */
        bmd->is_our_pages = map_data ? 0 : 1;
        memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
-       iov_iter_init(&bmd->iter, iter->type, bmd->iov,
-                       iter->nr_segs, iter->count);
+       bmd->iter = *iter;
+       bmd->iter.iov = bmd->iov;
 
        ret = -ENOMEM;
        bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
        int ret, offset;
        struct iov_iter i;
        struct iovec iov;
+       struct bio_vec *bvec;
 
        iov_for_each(iov, i, *iter) {
                unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                ret = get_user_pages_fast(uaddr, local_nr_pages,
                                (iter->type & WRITE) != WRITE,
                                &pages[cur_page]);
-               if (ret < local_nr_pages) {
+               if (unlikely(ret < local_nr_pages)) {
+                       for (j = cur_page; j < page_limit; j++) {
+                               if (!pages[j])
+                                       break;
+                               put_page(pages[j]);
+                       }
                        ret = -EFAULT;
                        goto out_unmap;
                }
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                offset = offset_in_page(uaddr);
                for (j = cur_page; j < page_limit; j++) {
                        unsigned int bytes = PAGE_SIZE - offset;
+                       unsigned short prev_bi_vcnt = bio->bi_vcnt;
 
                        if (len <= 0)
                                break;
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                                            bytes)
                                break;
 
+                       /*
+                        * check if vector was merged with previous
+                        * drop page reference if needed
+                        */
+                       if (bio->bi_vcnt == prev_bi_vcnt)
+                               put_page(pages[j]);
+
                        len -= bytes;
                        offset = 0;
                }
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
        return bio;
 
  out_unmap:
-       for (j = 0; j < nr_pages; j++) {
-               if (!pages[j])
-                       break;
-               put_page(pages[j]);
+       bio_for_each_segment_all(bvec, bio, j) {
+               put_page(bvec->bv_page);
        }
  out:
        kfree(pages);
index aebe676225e6fdf360f39760cacf7326cf78cd9f..048be4aa602446f93e3e981f1ece5659e3d7287b 100644 (file)
@@ -854,6 +854,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+       mutex_init(&q->blk_trace_mutex);
+#endif
        mutex_init(&q->sysfs_lock);
        spin_lock_init(&q->__queue_lock);
 
index 980e7309564332ee1c23e0d96b13996f27c0aeca..de294d775acfa413854c109eeaa08b9b6bdfd354 100644 (file)
@@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q)
                goto err;
 
        /*
-        * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
+        * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
         * didn't exist yet (because we don't know what to name the directory
         * until the queue is registered to a gendisk).
         */
+       if (q->elevator && !q->sched_debugfs_dir)
+               blk_mq_debugfs_register_sched(q);
+
+       /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
                        goto err;
index 0fea76aa0f3ff0093c4867f10139d331589df53a..17816a028dcbb6fb7f7fe4e068fb90f313448ebf 100644 (file)
@@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td)
 
                tg->disptime = jiffies - 1;
                throtl_select_dispatch(sq);
-               throtl_schedule_next_dispatch(sq, false);
+               throtl_schedule_next_dispatch(sq, true);
        }
        rcu_read_unlock();
        throtl_select_dispatch(&td->service_queue);
-       throtl_schedule_next_dispatch(&td->service_queue, false);
+       throtl_schedule_next_dispatch(&td->service_queue, true);
        queue_work(kthrotld_workqueue, &td->dispatch_work);
 }
 
index c82408c7cc3c91181f91d32c0147e18781506691..15d25ccd51a5cad7c9505427888f88c15a9e5d99 100644 (file)
@@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
 failjob_rls_rqst_payload:
        kfree(job->request_payload.sg_list);
 failjob_rls_job:
-       kfree(job);
        return -ENOMEM;
 }
 
@@ -208,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
        struct bsg_job *job = blk_mq_rq_to_pdu(req);
        struct scsi_request *sreq = &job->sreq;
 
+       /* called right after the request is allocated for the request_queue */
+
+       sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
+       if (!sreq->sense)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void bsg_initialize_rq(struct request *req)
+{
+       struct bsg_job *job = blk_mq_rq_to_pdu(req);
+       struct scsi_request *sreq = &job->sreq;
+       void *sense = sreq->sense;
+
+       /* called right before the request is given to the request_queue user */
+
        memset(job, 0, sizeof(*job));
 
        scsi_req_init(sreq);
+
+       sreq->sense = sense;
        sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
-       sreq->sense = kzalloc(sreq->sense_len, gfp);
-       if (!sreq->sense)
-               return -ENOMEM;
 
        job->req = req;
-       job->reply = sreq->sense;
+       job->reply = sense;
        job->reply_len = sreq->sense_len;
        job->dd_data = job + 1;
-
-       return 0;
 }
 
 static void bsg_exit_rq(struct request_queue *q, struct request *req)
@@ -252,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
        q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
        q->init_rq_fn = bsg_init_rq;
        q->exit_rq_fn = bsg_exit_rq;
+       q->initialize_rq_fn = bsg_initialize_rq;
        q->request_fn = bsg_request_fn;
 
        ret = blk_init_allocated_queue(q);
index 86e8fe1adcdb7f1b10fd5acd8f5c36f712736971..88c555db4e5deee60d852b78b70a20589cc2813f 100644 (file)
@@ -112,7 +112,7 @@ ssize_t part_stat_show(struct device *dev,
                       struct device_attribute *attr, char *buf)
 {
        struct hd_struct *p = dev_to_part(dev);
-       struct request_queue *q = dev_to_disk(dev)->queue;
+       struct request_queue *q = part_to_disk(p)->queue;
        unsigned int inflight[2];
        int cpu;
 
index ffa9f4ccd9b455ef36c48d3ba57d4256574cd450..337cf382718ee3cd81ffe3cdc480052a6f634798 100644 (file)
@@ -619,14 +619,14 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
        struct af_alg_ctx *ctx = ask->private;
        struct af_alg_tsgl *sgl;
        struct scatterlist *sg;
-       unsigned int i, j;
+       unsigned int i, j = 0;
 
        while (!list_empty(&ctx->tsgl_list)) {
                sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
                                       list);
                sg = sgl->sg;
 
-               for (i = 0, j = 0; i < sgl->cur; i++) {
+               for (i = 0; i < sgl->cur; i++) {
                        size_t plen = min_t(size_t, used, sg[i].length);
                        struct page *page = sg_page(sg + i);
 
index e4b0ed386bc82f339829fea2dcedd42fe2f0aff5..39aecad286fe482ff3f44fe08b286c2edbf3553b 100644 (file)
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
        char *req, *p;
        int len;
 
+       BUG_ON(!id_0 && !id_1);
+
        if (id_0) {
                lookup = id_0->data;
                len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
        if (id_0 && id_1) {
                const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
 
-               if (!kids->id[0]) {
+               if (!kids->id[1]) {
                        pr_debug("First ID matches, but second is missing\n");
                        goto reject;
                }
index af4cd864911752478ba5f3c2732273f9624d434f..d140d8bb2c96140c408b1e3450f288e562372743 100644 (file)
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
        bool want = false;
 
        sinfo = msg->signed_infos;
+       if (!sinfo)
+               goto inconsistent;
+
        if (sinfo->authattrs) {
                want = true;
                msg->have_authattrs = true;
index 633a88e93ab0c421b7b01a041726ad9563ce2d49..70018397e59abf10d125864f5770c41cd13f2021 100644 (file)
@@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
 {
        if (!drbg)
                return;
-       kzfree(drbg->V);
-       drbg->Vbuf = NULL;
-       kzfree(drbg->C);
-       drbg->Cbuf = NULL;
+       kzfree(drbg->Vbuf);
+       drbg->V = NULL;
+       kzfree(drbg->Cbuf);
+       drbg->C = NULL;
        kzfree(drbg->scratchpadbuf);
        drbg->scratchpadbuf = NULL;
        drbg->reseed_ctr = 0;
index 5e31c8d776dfc8a144f25e70122ee77d335e196a..325a14da58278f01b8c1ffd92bdd8990db2860c4 100644 (file)
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
        int err;
 
        absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-       buffer = kmalloc(absize, GFP_KERNEL);
+       buffer = kmalloc(absize, GFP_ATOMIC);
        if (!buffer)
                return -ENOMEM;
 
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
 
 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
 {
-       struct scatterlist *sg = req->src;
-       unsigned int offset = sg->offset;
        unsigned int nbytes = req->nbytes;
+       struct scatterlist *sg;
+       unsigned int offset;
        int err;
 
-       if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+       if (nbytes &&
+           (sg = req->src, offset = sg->offset,
+            nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
                void *data;
 
                data = kmap_atomic(sg_page(sg));
index 4faa0fd53b0c120d39022ad726dbbe2c74f787bd..d5692e35fab1f069376f7c54358ff5e5f0cb352e 100644 (file)
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
 
 static int skcipher_walk_first(struct skcipher_walk *walk)
 {
-       walk->nbytes = 0;
-
        if (WARN_ON_ONCE(in_irq()))
                return -EDEADLK;
 
-       if (unlikely(!walk->total))
-               return 0;
-
        walk->buffer = NULL;
        if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
                int err = skcipher_copy_iv(walk);
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 
+       walk->total = req->cryptlen;
+       walk->nbytes = 0;
+
+       if (unlikely(!walk->total))
+               return 0;
+
        scatterwalk_start(&walk->in, req->src);
        scatterwalk_start(&walk->out, req->dst);
 
-       walk->total = req->cryptlen;
        walk->iv = req->iv;
        walk->oiv = req->iv;
 
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        int err;
 
+       walk->nbytes = 0;
+
+       if (unlikely(!walk->total))
+               return 0;
+
        walk->flags &= ~SKCIPHER_WALK_PHYS;
 
        scatterwalk_start(&walk->in, req->src);
index d86c11a8c882c37ee7a7c2c18e41d54aaec25af8..e31828ed00466cc08e8ee2e73bacb2d99c1e0a34 100644 (file)
@@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
                ctx->name[len - 1] = 0;
 
                if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-                            "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
-                       return -ENAMETOOLONG;
+                            "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
+                       err = -ENAMETOOLONG;
+                       goto err_drop_spawn;
+               }
        } else
                goto err_drop_spawn;
 
index bf22c29d25179e937523c026c8028f644f3379f6..11b113f8e36741aeb00e921ee64a5ae871f8d55f 100644 (file)
@@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void)
        for (i = 0; i < wdat->entries; i++) {
                const struct acpi_generic_address *gas;
                struct resource_entry *rentry;
-               struct resource res;
+               struct resource res = {};
                bool found;
 
                gas = &entries[i].register_region;
index 077f9bad6f44a57bd01e930f9dd740001eadcd76..3c3a37b8503bd43db4200230309ddeff96eb4e00 100644 (file)
@@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes)
        }
        ghes_do_proc(ghes, ghes->estatus);
 
+out:
+       ghes_clear_estatus(ghes);
+
+       if (rc == -ENOENT)
+               return rc;
+
        /*
         * GHESv2 type HEST entries introduce support for error acknowledgment,
         * so only acknowledge the error if this support is present.
         */
-       if (is_hest_type_generic_v2(ghes)) {
-               rc = ghes_ack_error(ghes->generic_v2);
-               if (rc)
-                       return rc;
-       }
-out:
-       ghes_clear_estatus(ghes);
+       if (is_hest_type_generic_v2(ghes))
+               return ghes_ack_error(ghes->generic_v2);
+
        return rc;
 }
 
index 9565d572f8dd2f3d65c0e16b8257f61100bbad50..de56394dd161f7813b7f5bce64321d3196bc4904 100644 (file)
@@ -1178,12 +1178,44 @@ dev_put:
        return ret;
 }
 
+static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
+{
+       if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+               struct acpi_iort_node *parent;
+               struct acpi_iort_id_mapping *map;
+               int i;
+
+               map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
+                                  iort_node->mapping_offset);
+
+               for (i = 0; i < iort_node->mapping_count; i++, map++) {
+                       if (!map->output_reference)
+                               continue;
+
+                       parent = ACPI_ADD_PTR(struct acpi_iort_node,
+                                       iort_table,  map->output_reference);
+                       /*
+                        * If we detect a RC->SMMU mapping, make sure
+                        * we enable ACS on the system.
+                        */
+                       if ((parent->type == ACPI_IORT_NODE_SMMU) ||
+                               (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
+                               pci_request_acs();
+                               return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
 static void __init iort_init_platform_devices(void)
 {
        struct acpi_iort_node *iort_node, *iort_end;
        struct acpi_table_iort *iort;
        struct fwnode_handle *fwnode;
        int i, ret;
+       bool acs_enabled = false;
 
        /*
         * iort_table and iort both point to the start of IORT table, but
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
                        return;
                }
 
+               if (!acs_enabled)
+                       acs_enabled = iort_enable_acs(iort_node);
+
                if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
                        (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
 
index c1c216163de3f0ab5b044c52527ed10e3090a9e5..e26ea209b63ef1b8f89a6112de5c983db3f4feee 100644 (file)
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
  *     }
  * }
  *
- * Calling this function with index %2 return %-ENOENT and with index %3
- * returns the last entry. If the property does not contain any more values
- * %-ENODATA is returned. The NULL entry must be single integer and
- * preferably contain value %0.
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
  *
  * Return: %0 on success, negative error code on failure.
  */
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
 
        data = acpi_device_data_of_node(fwnode);
        if (!data)
-               return -EINVAL;
+               return -ENOENT;
 
        ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
        if (ret)
-               return ret;
+               return ret == -EINVAL ? -ENOENT : -EINVAL;
 
        /*
         * The simplest case is when the value is a single reference.  Just
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
 
                ret = acpi_bus_get_device(obj->reference.handle, &device);
                if (ret)
-                       return ret;
+                       return ret == -ENODEV ? -EINVAL : ret;
 
                args->adev = device;
                args->nargs = 0;
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
         * The index argument is then used to determine which reference
         * the caller wants (along with the arguments).
         */
-       if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
-               return -EPROTO;
+       if (obj->type != ACPI_TYPE_PACKAGE)
+               return -EINVAL;
+       if (index >= obj->package.count)
+               return -ENOENT;
 
        element = obj->package.elements;
        end = element + obj->package.count;
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                        ret = acpi_bus_get_device(element->reference.handle,
                                                  &device);
                        if (ret)
-                               return -ENODEV;
+                               return -EINVAL;
 
                        nargs = 0;
                        element++;
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                                else if (type == ACPI_TYPE_LOCAL_REFERENCE)
                                        break;
                                else
-                                       return -EPROTO;
+                                       return -EINVAL;
                        }
 
                        if (nargs > MAX_ACPI_REFERENCE_ARGS)
-                               return -EPROTO;
+                               return -EINVAL;
 
                        if (idx == index) {
                                args->adev = device;
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                                return -ENOENT;
                        element++;
                } else {
-                       return -EPROTO;
+                       return -EINVAL;
                }
 
                idx++;
        }
 
-       return -ENODATA;
+       return -ENOENT;
 }
 EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
 
@@ -908,11 +909,12 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
                                            struct fwnode_handle *child)
 {
        const struct acpi_device *adev = to_acpi_device_node(fwnode);
-       struct acpi_device *child_adev = NULL;
        const struct list_head *head;
        struct list_head *next;
 
        if (!child || is_acpi_device_node(child)) {
+               struct acpi_device *child_adev;
+
                if (adev)
                        head = &adev->children;
                else
@@ -922,8 +924,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
                        goto nondev;
 
                if (child) {
-                       child_adev = to_acpi_device_node(child);
-                       next = child_adev->node.next;
+                       adev = to_acpi_device_node(child);
+                       next = adev->node.next;
                        if (next == head) {
                                child = NULL;
                                goto nondev;
@@ -941,8 +943,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
                const struct acpi_data_node *data = to_acpi_data_node(fwnode);
                struct acpi_data_node *dn;
 
-               if (child_adev)
-                       head = &child_adev->data.subnodes;
+               if (adev)
+                       head = &adev->data.subnodes;
                else if (data)
                        head = &data->data.subnodes;
                else
@@ -1293,3 +1295,16 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
 DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
 DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
 const struct fwnode_operations acpi_static_fwnode_ops;
+
+bool is_acpi_device_node(const struct fwnode_handle *fwnode)
+{
+       return !IS_ERR_OR_NULL(fwnode) &&
+               fwnode->ops == &acpi_device_fwnode_ops;
+}
+EXPORT_SYMBOL(is_acpi_device_node);
+
+bool is_acpi_data_node(const struct fwnode_handle *fwnode)
+{
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
+}
+EXPORT_SYMBOL(is_acpi_data_node);
index d055b3f2a2078cedb078b3359777146c66c910a9..fddf76ef5bd6d824e2017fedb9110418b45270de 100644 (file)
@@ -2217,7 +2217,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                                       debug_id, (u64)fda->num_fds);
                                continue;
                        }
-                       fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+                       fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
                        for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
                                task_close_fd(proc, fd_array[fd_index]);
                } break;
@@ -2326,7 +2326,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
                             (u64)node->ptr);
                binder_node_unlock(node);
        } else {
-               int ret;
                struct binder_ref_data dest_rdata;
 
                binder_node_unlock(node);
@@ -2442,7 +2441,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
         */
        parent_buffer = parent->buffer -
                binder_alloc_get_user_buffer_offset(&target_proc->alloc);
-       fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+       fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
        if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
                binder_user_error("%d:%d parent offset not aligned correctly.\n",
                                  proc->pid, thread->pid);
@@ -2508,7 +2507,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
                                  proc->pid, thread->pid);
                return -EINVAL;
        }
-       parent_buffer = (u8 *)(parent->buffer -
+       parent_buffer = (u8 *)((uintptr_t)parent->buffer -
                        binder_alloc_get_user_buffer_offset(
                                &target_proc->alloc));
        *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
@@ -2583,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
        return true;
 }
 
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node:         struct binder_node for which to get refs
+ * @proc:         returns @node->proc if valid
+ * @error:        if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+               struct binder_node *node,
+               struct binder_proc **procp,
+               uint32_t *error)
+{
+       struct binder_node *target_node = NULL;
+
+       binder_node_inner_lock(node);
+       if (node->proc) {
+               target_node = node;
+               binder_inc_node_nilocked(node, 1, 0, NULL);
+               binder_inc_node_tmpref_ilocked(node);
+               node->proc->tmp_ref++;
+               *procp = node->proc;
+       } else
+               *error = BR_DEAD_REPLY;
+       binder_node_inner_unlock(node);
+
+       return target_node;
+}
+
 static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
@@ -2686,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
                        ref = binder_get_ref_olocked(proc, tr->target.handle,
                                                     true);
                        if (ref) {
-                               binder_inc_node(ref->node, 1, 0, NULL);
-                               target_node = ref->node;
-                       }
-                       binder_proc_unlock(proc);
-                       if (target_node == NULL) {
+                               target_node = binder_get_node_refs_for_txn(
+                                               ref->node, &target_proc,
+                                               &return_error);
+                       } else {
                                binder_user_error("%d:%d got transaction to invalid handle\n",
-                                       proc->pid, thread->pid);
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               return_error_param = -EINVAL;
-                               return_error_line = __LINE__;
-                               goto err_invalid_target_handle;
                        }
+                       binder_proc_unlock(proc);
                } else {
                        mutex_lock(&context->context_mgr_node_lock);
                        target_node = context->binder_context_mgr_node;
-                       if (target_node == NULL) {
+                       if (target_node)
+                               target_node = binder_get_node_refs_for_txn(
+                                               target_node, &target_proc,
+                                               &return_error);
+                       else
                                return_error = BR_DEAD_REPLY;
-                               mutex_unlock(&context->context_mgr_node_lock);
-                               return_error_line = __LINE__;
-                               goto err_no_context_mgr_node;
-                       }
-                       binder_inc_node(target_node, 1, 0, NULL);
                        mutex_unlock(&context->context_mgr_node_lock);
                }
-               e->to_node = target_node->debug_id;
-               binder_node_lock(target_node);
-               target_proc = target_node->proc;
-               if (target_proc == NULL) {
-                       binder_node_unlock(target_node);
-                       return_error = BR_DEAD_REPLY;
+               if (!target_node) {
+                       /*
+                        * return_error is set above
+                        */
+                       return_error_param = -EINVAL;
                        return_error_line = __LINE__;
                        goto err_dead_binder;
                }
-               binder_inner_proc_lock(target_proc);
-               target_proc->tmp_ref++;
-               binder_inner_proc_unlock(target_proc);
-               binder_node_unlock(target_node);
+               e->to_node = target_node->debug_id;
                if (security_binder_transaction(proc->tsk,
                                                target_proc->tsk) < 0) {
                        return_error = BR_FAILED_REPLY;
@@ -3072,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
        if (target_thread)
                binder_thread_dec_tmpref(target_thread);
        binder_proc_dec_tmpref(target_proc);
+       if (target_node)
+               binder_dec_node_tmpref(target_node);
        /*
         * write barrier to synchronize with initialization
         * of log entry
@@ -3083,6 +3118,7 @@ static void binder_transaction(struct binder_proc *proc,
 err_dead_proc_or_thread:
        return_error = BR_DEAD_REPLY;
        return_error_line = __LINE__;
+       binder_dequeue_work(proc, tcomplete);
 err_translate_failed:
 err_bad_object_type:
 err_bad_offset:
@@ -3090,6 +3126,8 @@ err_bad_parent:
 err_copy_data_failed:
        trace_binder_transaction_failed_buffer_release(t->buffer);
        binder_transaction_buffer_release(target_proc, t->buffer, offp);
+       if (target_node)
+               binder_dec_node_tmpref(target_node);
        target_node = NULL;
        t->buffer->transaction = NULL;
        binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3104,13 +3142,14 @@ err_bad_call_stack:
 err_empty_call_stack:
 err_dead_binder:
 err_invalid_target_handle:
-err_no_context_mgr_node:
        if (target_thread)
                binder_thread_dec_tmpref(target_thread);
        if (target_proc)
                binder_proc_dec_tmpref(target_proc);
-       if (target_node)
+       if (target_node) {
                binder_dec_node(target_node, 1, 0);
+               binder_dec_node_tmpref(target_node);
+       }
 
        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
                     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
@@ -3623,12 +3662,6 @@ static void binder_stat_br(struct binder_proc *proc,
        }
 }
 
-static int binder_has_thread_work(struct binder_thread *thread)
-{
-       return !binder_worklist_empty(thread->proc, &thread->todo) ||
-               thread->looper_need_return;
-}
-
 static int binder_put_node_cmd(struct binder_proc *proc,
                               struct binder_thread *thread,
                               void __user **ptrp,
@@ -4258,12 +4291,9 @@ static unsigned int binder_poll(struct file *filp,
 
        binder_inner_proc_unlock(thread->proc);
 
-       if (binder_has_work(thread, wait_for_proc_work))
-               return POLLIN;
-
        poll_wait(filp, &thread->wait, wait);
 
-       if (binder_has_thread_work(thread))
+       if (binder_has_work(thread, wait_for_proc_work))
                return POLLIN;
 
        return 0;
index 8fe165844e4708a3646a69384fe55db592fbe87f..c2819a3d58a66e63e49cd7e91d3b7caa7a6551a1 100644 (file)
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
                }
        }
 
-       if (!vma && need_mm)
-               mm = get_task_mm(alloc->tsk);
+       if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
+               mm = alloc->vma_vm_mm;
 
        if (mm) {
                down_write(&mm->mmap_sem);
                vma = alloc->vma;
-               if (vma && mm != alloc->vma_vm_mm) {
-                       pr_err("%d: vma mm and task mm mismatch\n",
-                               alloc->pid);
-                       vma = NULL;
-               }
        }
 
        if (!vma && need_mm) {
@@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                                   "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
                                   alloc->pid, buffer->data,
-                                  prev->data, next->data);
+                                  prev->data, next ? next->data : NULL);
                binder_update_page_range(alloc, 0, buffer_start_page(buffer),
                                         buffer_start_page(buffer) + PAGE_SIZE,
                                         NULL);
@@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        barrier();
        alloc->vma = vma;
        alloc->vma_vm_mm = vma->vm_mm;
+       mmgrab(alloc->vma_vm_mm);
 
        return 0;
 
@@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
                vfree(alloc->buffer);
        }
        mutex_unlock(&alloc->mutex);
+       if (alloc->vma_vm_mm)
+               mmdrop(alloc->vma_vm_mm);
 
        binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
                     "%s: %d buffers %d, pages %d\n",
@@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
 void binder_alloc_vma_close(struct binder_alloc *alloc)
 {
        WRITE_ONCE(alloc->vma, NULL);
-       WRITE_ONCE(alloc->vma_vm_mm, NULL);
 }
 
 /**
@@ -913,6 +910,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        struct binder_alloc *alloc;
        uintptr_t page_addr;
        size_t index;
+       struct vm_area_struct *vma;
 
        alloc = page->alloc;
        if (!mutex_trylock(&alloc->mutex))
@@ -923,16 +921,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
-       if (alloc->vma) {
-               mm = get_task_mm(alloc->tsk);
-               if (!mm)
-                       goto err_get_task_mm_failed;
+       vma = alloc->vma;
+       if (vma) {
+               if (!mmget_not_zero(alloc->vma_vm_mm))
+                       goto err_mmget;
+               mm = alloc->vma_vm_mm;
                if (!down_write_trylock(&mm->mmap_sem))
                        goto err_down_write_mmap_sem_failed;
+       }
+
+       list_lru_isolate(lru, item);
+       spin_unlock(lock);
 
+       if (vma) {
                trace_binder_unmap_user_start(alloc, index);
 
-               zap_page_range(alloc->vma,
+               zap_page_range(vma,
                               page_addr + alloc->user_buffer_offset,
                               PAGE_SIZE);
 
@@ -950,14 +954,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        trace_binder_unmap_kernel_end(alloc, index);
 
-       list_lru_isolate(lru, item);
-
+       spin_lock(lock);
        mutex_unlock(&alloc->mutex);
-       return LRU_REMOVED;
+       return LRU_REMOVED_RETRY;
 
 err_down_write_mmap_sem_failed:
-       mmput(mm);
-err_get_task_mm_failed:
+       mmput_async(mm);
+err_mmget:
 err_page_already_freed:
        mutex_unlock(&alloc->mutex);
 err_get_alloc_mutex_failed:
@@ -996,7 +999,6 @@ struct shrinker binder_shrinker = {
  */
 void binder_alloc_init(struct binder_alloc *alloc)
 {
-       alloc->tsk = current->group_leader;
        alloc->pid = current->group_leader->pid;
        mutex_init(&alloc->mutex);
        INIT_LIST_HEAD(&alloc->buffers);
index a3a3602c689c9a905a05991fcdb121c9ac9446ba..2dd33b6df1044e64b785a6193bc30b84ddf9d1c5 100644 (file)
@@ -100,7 +100,6 @@ struct binder_lru_page {
  */
 struct binder_alloc {
        struct mutex mutex;
-       struct task_struct *tsk;
        struct vm_area_struct *vma;
        struct mm_struct *vma_vm_mm;
        void *buffer;
index cb9b0e9090e3b8ec7e32fff751b1e34332cb7068..9f78bb03bb763c8ba9577ffce47c405c9fa7fb27 100644 (file)
@@ -621,8 +621,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
 static int ahci_pci_reset_controller(struct ata_host *host)
 {
        struct pci_dev *pdev = to_pci_dev(host->dev);
+       int rc;
 
-       ahci_reset_controller(host);
+       rc = ahci_reset_controller(host);
+       if (rc)
+               return rc;
 
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                struct ahci_host_priv *hpriv = host->private_data;
index 8401c3b5be921335492ccc3f76f0a7d229306ec0..b702c20fbc2bbb2d9ebcb6188e5e1f522adf9271 100644 (file)
@@ -492,6 +492,7 @@ static const struct ich_laptop ich_laptop[] = {
        { 0x27DF, 0x152D, 0x0778 },     /* ICH7 on unknown Intel */
        { 0x24CA, 0x1025, 0x0061 },     /* ICH4 on ACER Aspire 2023WLMi */
        { 0x24CA, 0x1025, 0x003d },     /* ICH4 on ACER TM290 */
+       { 0x24CA, 0x10CF, 0x11AB },     /* ICH4M on Fujitsu-Siemens Lifebook S6120 */
        { 0x266F, 0x1025, 0x0066 },     /* ICH6 on ACER Aspire 1694WLMi */
        { 0x2653, 0x1043, 0x82D8 },     /* ICH6M on Asus Eee 701 */
        { 0x27df, 0x104d, 0x900e },     /* ICH7 on Sony TZ-90 */
index 1945a8ea20998490b48aa70dfe56c4fbaad744ea..ee4c1ec9dca0ef9e51f4abf56924aab2112d9f77 100644 (file)
@@ -3234,19 +3234,19 @@ static const struct ata_timing ata_timing[] = {
 };
 
 #define ENOUGH(v, unit)                (((v)-1)/(unit)+1)
-#define EZ(v, unit)            ((v)?ENOUGH(v, unit):0)
+#define EZ(v, unit)            ((v)?ENOUGH(((v) * 1000), unit):0)
 
 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
 {
-       q->setup        = EZ(t->setup      * 1000,  T);
-       q->act8b        = EZ(t->act8b      * 1000,  T);
-       q->rec8b        = EZ(t->rec8b      * 1000,  T);
-       q->cyc8b        = EZ(t->cyc8b      * 1000,  T);
-       q->active       = EZ(t->active     * 1000,  T);
-       q->recover      = EZ(t->recover    * 1000,  T);
-       q->dmack_hold   = EZ(t->dmack_hold * 1000,  T);
-       q->cycle        = EZ(t->cycle      * 1000,  T);
-       q->udma         = EZ(t->udma       * 1000, UT);
+       q->setup        = EZ(t->setup,       T);
+       q->act8b        = EZ(t->act8b,       T);
+       q->rec8b        = EZ(t->rec8b,       T);
+       q->cyc8b        = EZ(t->cyc8b,       T);
+       q->active       = EZ(t->active,      T);
+       q->recover      = EZ(t->recover,     T);
+       q->dmack_hold   = EZ(t->dmack_hold,  T);
+       q->cycle        = EZ(t->cycle,       T);
+       q->udma         = EZ(t->udma,       UT);
 }
 
 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
index cfeb049a01ef84425672682c50acb492cbfb04eb..642afd88870ba99ef1c79db74b457515079684b7 100644 (file)
@@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
 static int charlcd_open(struct inode *inode, struct file *file)
 {
        struct charlcd_priv *priv = to_priv(the_charlcd);
+       int ret;
 
+       ret = -EBUSY;
        if (!atomic_dec_and_test(&charlcd_available))
-               return -EBUSY;  /* open only once at a time */
+               goto fail;      /* open only once at a time */
 
+       ret = -EPERM;
        if (file->f_mode & FMODE_READ)  /* device is write-only */
-               return -EPERM;
+               goto fail;
 
        if (priv->must_clear) {
                charlcd_clear_display(&priv->lcd);
                priv->must_clear = false;
        }
        return nonseekable_open(inode, file);
+
+ fail:
+       atomic_inc(&charlcd_available);
+       return ret;
 }
 
 static int charlcd_release(struct inode *inode, struct file *file)
index df126dcdaf18e1b84951426b6f501810036a64a3..6911acd896d935946b3c805d07c88bc03bf41918 100644 (file)
@@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file,
 
 static int keypad_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = -EBUSY;
        if (!atomic_dec_and_test(&keypad_available))
-               return -EBUSY;  /* open only once at a time */
+               goto fail;      /* open only once at a time */
 
+       ret = -EPERM;
        if (file->f_mode & FMODE_WRITE) /* device is read-only */
-               return -EPERM;
+               goto fail;
 
        keypad_buflen = 0;      /* flush the buffer on opening */
        return 0;
+ fail:
+       atomic_inc(&keypad_available);
+       return ret;
 }
 
 static int keypad_release(struct inode *inode, struct file *file)
index 41be9ff7d70a96d7d901918c6186ea3be035f88c..6df7d6676a48104267b5e739c6e58b85a00724e1 100644 (file)
@@ -166,11 +166,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
 }
 
 #ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit;
-static void parsing_done_workfn(struct work_struct *work);
-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+static cpumask_var_t cpus_to_visit __initdata;
+static void __init parsing_done_workfn(struct work_struct *work);
+static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
 
-static int
+static int __init
 init_cpu_capacity_callback(struct notifier_block *nb,
                           unsigned long val,
                           void *data)
@@ -206,7 +206,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
        return 0;
 }
 
-static struct notifier_block init_cpu_capacity_notifier = {
+static struct notifier_block init_cpu_capacity_notifier __initdata = {
        .notifier_call = init_cpu_capacity_callback,
 };
 
@@ -232,7 +232,7 @@ static int __init register_cpufreq_notifier(void)
 }
 core_initcall(register_cpufreq_notifier);
 
-static void parsing_done_workfn(struct work_struct *work)
+static void __init parsing_done_workfn(struct work_struct *work)
 {
        cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
                                         CPUFREQ_POLICY_NOTIFIER);
index 321cd7b4d817fd6ffd9323362041c7d4cb29b7e9..227bac5f1191ff11ff8e96e0243a0df0af740be7 100644 (file)
@@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int num)
 
        per_cpu(cpu_sys_devices, num) = &cpu->dev;
        register_cpu_under_node(num, cpu_to_node(num));
-       dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
+       dev_pm_qos_expose_latency_limit(&cpu->dev,
+                                       PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
 
        return 0;
 }
index a39b2166b145616d2a6261da8da7252aa7e28b4d..744f64f43454314c35acc4341def004968aeff87 100644 (file)
@@ -348,16 +348,15 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
        struct dma_coherent_mem *mem = rmem->priv;
        int ret;
 
-       if (!mem)
-               return -ENODEV;
-
-       ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
-                                      DMA_MEMORY_EXCLUSIVE, &mem);
-
-       if (ret) {
-               pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
-                       &rmem->base, (unsigned long)rmem->size / SZ_1M);
-               return ret;
+       if (!mem) {
+               ret = dma_init_coherent_memory(rmem->base, rmem->base,
+                                              rmem->size,
+                                              DMA_MEMORY_EXCLUSIVE, &mem);
+               if (ret) {
+                       pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+                               &rmem->base, (unsigned long)rmem->size / SZ_1M);
+                       return ret;
+               }
        }
        mem->use_dev_dma_pfn_offset = true;
        rmem->priv = mem;
index 3855902f2c5b369dc538759950f5b4c951c484d6..aae2402f3791dbc12174cce7ae0552a3c59e5b02 100644 (file)
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
 
 static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
 {
+       ssize_t n;
+       cpumask_var_t mask;
        struct node *node_dev = to_node(dev);
-       const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
 
        /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
        BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
 
-       return cpumap_print_to_pagebuf(list, buf, mask);
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return 0;
+
+       cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+       n = cpumap_print_to_pagebuf(list, buf, mask);
+       free_cpumask_var(mask);
+
+       return n;
 }
 
 static inline ssize_t node_read_cpumask(struct device *dev,
index d1bd9927106638d7ad94b5cec29650942453b98a..9045c5f3734e8df88d19ec58106e01efbe8f5a31 100644 (file)
@@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev,
        struct platform_device *pdev = to_platform_device(dev);
        char *driver_override, *old, *cp;
 
-       if (count > PATH_MAX)
+       /* We need to keep extra room for a newline */
+       if (count >= (PAGE_SIZE - 1))
                return -EINVAL;
 
        driver_override = kstrndup(buf, count, GFP_KERNEL);
index 281f949c5ffeb22828e0363c6c4c302c7635eb92..51751cc8c9e62c6f234039a5eea86f136d9eceda 100644 (file)
 static int dev_update_qos_constraint(struct device *dev, void *data)
 {
        s64 *constraint_ns_p = data;
-       s32 constraint_ns = -1;
+       s64 constraint_ns = -1;
 
        if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
                constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
 
-       if (constraint_ns < 0) {
+       if (constraint_ns < 0)
                constraint_ns = dev_pm_qos_read_value(dev);
-               constraint_ns *= NSEC_PER_USEC;
-       }
-       if (constraint_ns == 0)
+
+       if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
                return 0;
 
-       /*
-        * constraint_ns cannot be negative here, because the device has been
-        * suspended.
-        */
-       if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
+       constraint_ns *= NSEC_PER_USEC;
+
+       if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0)
                *constraint_ns_p = constraint_ns;
 
        return 0;
@@ -63,10 +60,14 @@ static bool default_suspend_ok(struct device *dev)
 
        spin_unlock_irqrestore(&dev->power.lock, flags);
 
-       if (constraint_ns < 0)
+       if (constraint_ns == 0)
                return false;
 
-       constraint_ns *= NSEC_PER_USEC;
+       if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+               constraint_ns = -1;
+       else
+               constraint_ns *= NSEC_PER_USEC;
+
        /*
         * We can walk the children without any additional locking, because
         * they all have been suspended at this point and their
@@ -76,14 +77,19 @@ static bool default_suspend_ok(struct device *dev)
                device_for_each_child(dev, &constraint_ns,
                                      dev_update_qos_constraint);
 
-       if (constraint_ns > 0) {
-               constraint_ns -= td->suspend_latency_ns +
-                               td->resume_latency_ns;
-               if (constraint_ns == 0)
-                       return false;
+       if (constraint_ns < 0) {
+               /* The children have no constraints. */
+               td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+               td->cached_suspend_ok = true;
+       } else {
+               constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns;
+               if (constraint_ns > 0) {
+                       td->effective_constraint_ns = constraint_ns;
+                       td->cached_suspend_ok = true;
+               } else {
+                       td->effective_constraint_ns = 0;
+               }
        }
-       td->effective_constraint_ns = constraint_ns;
-       td->cached_suspend_ok = constraint_ns >= 0;
 
        /*
         * The children have been suspended already, so we don't need to take
@@ -145,13 +151,14 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
                td = &to_gpd_data(pdd)->td;
                constraint_ns = td->effective_constraint_ns;
                /* default_suspend_ok() need not be called before us. */
-               if (constraint_ns < 0) {
+               if (constraint_ns < 0)
                        constraint_ns = dev_pm_qos_read_value(pdd->dev);
-                       constraint_ns *= NSEC_PER_USEC;
-               }
-               if (constraint_ns == 0)
+
+               if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
                        continue;
 
+               constraint_ns *= NSEC_PER_USEC;
+
                /*
                 * constraint_ns cannot be negative here, because the device has
                 * been suspended.
index ea1732ed7a9d9071db73784c7e37d505ed8dea0a..770b1539a083d111ba1a65b569d3a75eaa81dd38 100644 (file)
@@ -1860,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev)
 {
        spin_lock_irq(&dev->power.lock);
        dev->power.no_pm_callbacks =
-               (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
-               (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+               (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
+                !dev->bus->suspend && !dev->bus->resume)) &&
+               (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
+                !dev->class->suspend && !dev->class->resume)) &&
                (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
                (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
-               (!dev->driver || pm_ops_is_empty(dev->driver->pm));
+               (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
+                !dev->driver->suspend && !dev->driver->resume));
        spin_unlock_irq(&dev->power.lock);
 }
index a8cc14fd8ae49ff92cb2fc3dd593273aefd6e10d..a6de325306933b57928b8d16d741247e93274102 100644 (file)
@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
 
        opp->available = availability_req;
 
+       dev_pm_opp_get(opp);
+       mutex_unlock(&opp_table->lock);
+
        /* Notify the change of the OPP availability */
        if (availability_req)
                blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
                blocking_notifier_call_chain(&opp_table->head,
                                             OPP_EVENT_DISABLE, opp);
 
+       dev_pm_opp_put(opp);
+       goto put_table;
+
 unlock:
        mutex_unlock(&opp_table->lock);
+put_table:
        dev_pm_opp_put_opp_table(opp_table);
        return r;
 }
index f850daeffba4417ae6b2cc69a0f4aa5bf065987e..7d29286d9313ba5ee219d552f7cbe18eb124468a 100644 (file)
@@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        plist_head_init(&c->list);
        c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
        c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
-       c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+       c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
        c->type = PM_QOS_MIN;
        c->notifiers = n;
 
@@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 }
 
-static bool dev_pm_qos_invalid_request(struct device *dev,
-                                      struct dev_pm_qos_request *req)
+static bool dev_pm_qos_invalid_req_type(struct device *dev,
+                                       enum dev_pm_qos_req_type type)
 {
-       return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
-                       && !dev->power.set_latency_tolerance);
+       return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
+              !dev->power.set_latency_tolerance;
 }
 
 static int __dev_pm_qos_add_request(struct device *dev,
@@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev,
 {
        int ret = 0;
 
-       if (!dev || dev_pm_qos_invalid_request(dev, req))
+       if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
                return -EINVAL;
 
        if (WARN(dev_pm_qos_request_active(req),
index 7bcf80fa9adad4d45b42d3c0eec10d9425dabbe0..13e0159055431b1988ad941d7afe7b34bcc3cef7 100644 (file)
@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
            || (dev->power.request_pending
                        && dev->power.request == RPM_REQ_RESUME))
                retval = -EAGAIN;
-       else if (__dev_pm_qos_read_value(dev) < 0)
+       else if (__dev_pm_qos_read_value(dev) == 0)
                retval = -EPERM;
        else if (dev->power.runtime_status == RPM_SUSPENDED)
                retval = 1;
index 156ab57bca7715238571165b14e23d54ebbbd23a..632077f05c5743cc528195666e4ca0c3faeaa077 100644 (file)
@@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev,
                                          struct device_attribute *attr,
                                          char *buf)
 {
-       return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
+       s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+       if (value == 0)
+               return sprintf(buf, "n/a\n");
+       else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+               value = 0;
+
+       return sprintf(buf, "%d\n", value);
 }
 
 static ssize_t pm_qos_resume_latency_store(struct device *dev,
@@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
        s32 value;
        int ret;
 
-       if (kstrtos32(buf, 0, &value))
-               return -EINVAL;
+       if (!kstrtos32(buf, 0, &value)) {
+               /*
+                * Prevent users from writing negative or "no constraint" values
+                * directly.
+                */
+               if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+                       return -EINVAL;
 
-       if (value < 0)
+               if (value == 0)
+                       value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+       } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+               value = 0;
+       } else {
                return -EINVAL;
+       }
 
        ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
                                        value);
index d0b65bbe7e1513f140f198aa839fd03ef925e5e6..7ed99c1b2a8b99cab9b5bc6bd002933615e9d00f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/phy.h>
 
 struct property_set {
+       struct device *dev;
        struct fwnode_handle fwnode;
        const struct property_entry *properties;
 };
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
  * Caller is responsible to call fwnode_handle_put() on the returned
  * args->fwnode pointer.
  *
+ * Returns: %0 on success
+ *         %-ENOENT when the index is out of bounds, the index has an empty
+ *                  reference or the property was not found
+ *         %-EINVAL on parse error
  */
 int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
                                       const char *prop, const char *nargs_prop,
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
 void device_remove_properties(struct device *dev)
 {
        struct fwnode_handle *fwnode;
+       struct property_set *pset;
 
        fwnode = dev_fwnode(dev);
        if (!fwnode)
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
         * the pset. If there is no real firmware node (ACPI/DT) primary
         * will hold the pset.
         */
-       if (is_pset_node(fwnode)) {
+       pset = to_pset_node(fwnode);
+       if (pset) {
                set_primary_fwnode(dev, NULL);
-               pset_free_set(to_pset_node(fwnode));
        } else {
-               fwnode = fwnode->secondary;
-               if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+               pset = to_pset_node(fwnode->secondary);
+               if (pset && dev == pset->dev)
                        set_secondary_fwnode(dev, NULL);
-                       pset_free_set(to_pset_node(fwnode));
-               }
        }
+       if (pset && dev == pset->dev)
+               pset_free_set(pset);
 }
 EXPORT_SYMBOL_GPL(device_remove_properties);
 
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
 
        p->fwnode.ops = &pset_fwnode_ops;
        set_secondary_fwnode(dev, &p->fwnode);
+       p->dev = dev;
        return 0;
 }
 EXPORT_SYMBOL_GPL(device_add_properties);
index 4a438b8abe27ad9c1774c89f608bd47fecc295a3..2dfe99b328f88e35d812b34bd4a48e3392fe5819 100644 (file)
@@ -17,7 +17,7 @@ if BLK_DEV
 
 config BLK_DEV_NULL_BLK
        tristate "Null test block driver"
-       depends on CONFIGFS_FS
+       select CONFIGFS_FS
 
 config BLK_DEV_FD
        tristate "Normal floppy disk support"
index bbd0d186cfc00ff89f9378b5816ff15ee90b1005..2d7178f7754edddf06278e46e87cc9aefc6d4427 100644 (file)
@@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
 
        if (!brd)
                return -ENODEV;
-       page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
+       page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
        if (!page)
                return -ENOSPC;
        *kaddr = page_address(page);
index f68c1d50802fddb3289480124fc1459c9b66ea9e..1f3956702993883859070b2ef39a9b164978a213 100644 (file)
@@ -67,10 +67,8 @@ struct loop_device {
 struct loop_cmd {
        struct kthread_work work;
        struct request *rq;
-       union {
-               bool use_aio; /* use AIO interface to handle I/O */
-               atomic_t ref; /* only for aio */
-       };
+       bool use_aio; /* use AIO interface to handle I/O */
+       atomic_t ref; /* only for aio */
        long ret;
        struct kiocb iocb;
        struct bio_vec *bvec;
index 2aa87cbdede0ed19b77c8e4aaffea2c2d8146758..9adfb5445f8dca5a88a4ffe59d3573ed5b854e02 100644 (file)
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
        struct nbd_config *config = nbd->config;
        config->blksize = blocksize;
        config->bytesize = blocksize * nr_blocks;
-       nbd_size_update(nbd);
 }
 
 static void nbd_complete_rq(struct request *req)
@@ -387,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
        return result;
 }
 
+/*
+ * Different settings for sk->sk_sndtimeo can result in different return values
+ * if there is a signal pending when we enter sendmsg, because reasons?
+ */
+static inline int was_interrupted(int result)
+{
+       return result == -ERESTARTSYS || result == -EINTR;
+}
+
 /* always call with the tx_lock held */
 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 {
@@ -459,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        result = sock_xmit(nbd, index, 1, &from,
                        (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
        if (result <= 0) {
-               if (result == -ERESTARTSYS) {
+               if (was_interrupted(result)) {
                        /* If we havne't sent anything we can just return BUSY,
                         * however if we have sent something we need to make
                         * sure we only allow this req to be sent until we are
@@ -503,7 +511,7 @@ send_pages:
                        }
                        result = sock_xmit(nbd, index, 1, &from, flags, &sent);
                        if (result <= 0) {
-                               if (result == -ERESTARTSYS) {
+                               if (was_interrupted(result)) {
                                        /* We've already sent the header, we
                                         * have no choice but to set pending and
                                         * return BUSY.
@@ -820,9 +828,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         * appropriate.
         */
        ret = nbd_handle_cmd(cmd, hctx->queue_num);
+       if (ret < 0)
+               ret = BLK_STS_IOERR;
+       else if (!ret)
+               ret = BLK_STS_OK;
        complete(&cmd->send_complete);
 
-       return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
+       return ret;
 }
 
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -1090,6 +1102,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                args->index = i;
                queue_work(recv_workqueue, &args->work);
        }
+       nbd_size_update(nbd);
        return error;
 }
 
@@ -1194,6 +1207,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       /* The block layer will pass back some non-nbd ioctls in case we have
+        * special handling for them, but we don't so just return an error.
+        */
+       if (_IOC_TYPE(cmd) != 0xab)
+               return -EINVAL;
+
        mutex_lock(&nbd->config_lock);
 
        /* Don't allow ioctl operations on a nbd device that was created with
index 7cedb4295e9d325343e296b8b299cb407b7b2a55..64d0fc17c1742ab74aa232da503d08e344b594b2 100644 (file)
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
                return NULL;
        *dma_handle = dma_map_single(dev, buf, s->size, dir);
        if (dma_mapping_error(dev, *dma_handle)) {
-               kfree(buf);
+               kmem_cache_free(s, buf);
                buf = NULL;
        }
        return buf;
index 2981c27d3aae316fedd267c6c79d76eb68a9395e..f149d3e612341d1f5e70797a8f62d0330fa3973d 100644 (file)
@@ -766,27 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
        bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
 }
 
-static bool zram_same_page_read(struct zram *zram, u32 index,
-                               struct page *page,
-                               unsigned int offset, unsigned int len)
-{
-       zram_slot_lock(zram, index);
-       if (unlikely(!zram_get_handle(zram, index) ||
-                       zram_test_flag(zram, index, ZRAM_SAME))) {
-               void *mem;
-
-               zram_slot_unlock(zram, index);
-               mem = kmap_atomic(page);
-               zram_fill_page(mem + offset, len,
-                                       zram_get_element(zram, index));
-               kunmap_atomic(mem);
-               return true;
-       }
-       zram_slot_unlock(zram, index);
-
-       return false;
-}
-
 static void zram_meta_free(struct zram *zram, u64 disksize)
 {
        size_t num_pages = disksize >> PAGE_SHIFT;
@@ -884,11 +863,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
                zram_slot_unlock(zram, index);
        }
 
-       if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
-               return 0;
-
        zram_slot_lock(zram, index);
        handle = zram_get_handle(zram, index);
+       if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
+               unsigned long value;
+               void *mem;
+
+               value = handle ? zram_get_element(zram, index) : 0;
+               mem = kmap_atomic(page);
+               zram_fill_page(mem, PAGE_SIZE, value);
+               kunmap_atomic(mem);
+               zram_slot_unlock(zram, index);
+               return 0;
+       }
+
        size = zram_get_obj_size(zram, index);
 
        src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
index c7f39690318473d4b75bca11c0c3fd4830f4d084..70db4d5638a6338632a916e4ee07f0f4b3ca5e04 100644 (file)
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
                        if (mbus->hw_io_coherency)
                                w->mbus_attr |= ATTR_HW_COHERENCY;
                        w->base = base & DDR_BASE_CS_LOW_MASK;
-                       w->size = (size | ~DDR_SIZE_MASK) + 1;
+                       w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
                }
        }
        mvebu_mbus_dram_info.num_cs = cs;
index fe597e6c55c40ca34f070f83a2af53e9b42a66de..1d6729be4cd6376727b9b210319bd77f03ccfb02 100644 (file)
@@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
                        goto out;
                }
 
-               msleep(TPM_TIMEOUT);    /* CHECK */
+               tpm_msleep(TPM_TIMEOUT);
                rmb();
        } while (time_before(jiffies, stop));
 
@@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
                        dev_info(
                            &chip->dev, HW_ERR
                            "TPM command timed out during continue self test");
-                       msleep(delay_msec);
+                       tpm_msleep(delay_msec);
                        continue;
                }
 
@@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
                }
                if (rc != TPM_WARN_DOING_SELFTEST)
                        return rc;
-               msleep(delay_msec);
+               tpm_msleep(delay_msec);
        } while (--loops > 0);
 
        return rc;
@@ -1085,7 +1085,7 @@ again:
                }
        } else {
                do {
-                       msleep(TPM_TIMEOUT);
+                       tpm_msleep(TPM_TIMEOUT);
                        status = chip->ops->status(chip);
                        if ((status & mask) == mask)
                                return 0;
@@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev)
                 */
                if (rc != TPM_WARN_RETRY)
                        break;
-               msleep(TPM_TIMEOUT_RETRY);
+               tpm_msleep(TPM_TIMEOUT_RETRY);
        }
 
        if (rc)
index 04fbff2edbf38ce19845e5c1fdd1cd619293419d..2d5466a72e40f82b3272b857b74a1f822f82b966 100644 (file)
@@ -50,7 +50,8 @@ enum tpm_const {
 
 enum tpm_timeout {
        TPM_TIMEOUT = 5,        /* msecs */
-       TPM_TIMEOUT_RETRY = 100 /* msecs */
+       TPM_TIMEOUT_RETRY = 100, /* msecs */
+       TPM_TIMEOUT_RANGE_US = 300      /* usecs */
 };
 
 /* TPM addresses */
@@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev);
 int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
                      wait_queue_head_t *queue, bool check_cancel);
 
+static inline void tpm_msleep(unsigned int delay_msec)
+{
+       usleep_range(delay_msec * 1000,
+                    (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
+};
+
 struct tpm_chip *tpm_chip_find_get(int chip_num);
 __must_check int tpm_try_get_ops(struct tpm_chip *chip);
 void tpm_put_ops(struct tpm_chip *chip);
index f7f34b2aa98190c7474b3dd6c0f02b2abc032ec9..e1a41b788f081c829ad0d0353ac7d3622752c82f 100644 (file)
@@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
                if (rc != TPM2_RC_TESTING)
                        break;
 
-               msleep(delay_msec);
+               tpm_msleep(delay_msec);
        }
 
        return rc;
index a4ac63a21d8a051da3c336f5343b9dc72574b719..8f0a98dea327a5bc90d7fcf3529c3701d17abd6b 100644 (file)
@@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = {
        SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
 };
 
-static struct acpi_device_id crb_device_ids[] = {
+static const struct acpi_device_id crb_device_ids[] = {
        {"MSFT0101", 0},
        {"", 0},
 };
index f01d083eced2fc056d6d74e4423a62f28470e302..25f6e2665385d063d47c50ad9f21e25591f828c9 100644 (file)
 
 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
 
-static struct vio_device_id tpm_ibmvtpm_device_table[] = {
+static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
        { "IBM,vtpm", "IBM,vtpm"},
        { "", "" }
 };
 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
 
 /**
+ *
+ * ibmvtpm_send_crq_word - Send a CRQ request
+ * @vdev:      vio device struct
+ * @w1:                pre-constructed first word of tpm crq (second word is reserved)
+ *
+ * Return:
+ *     0 - Success
+ *     Non-zero - Failure
+ */
+static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
+{
+       return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
+}
+
+/**
+ *
  * ibmvtpm_send_crq - Send a CRQ request
  *
  * @vdev:      vio device struct
- * @w1:                first word
- * @w2:                second word
+ * @valid:     Valid field
+ * @msg:       Type field
+ * @len:       Length field
+ * @data:      Data field
+ *
+ * The ibmvtpm crq is defined as follows:
+ *
+ * Byte  |   0   |   1   |   2   |   3   |   4   |   5   |   6   |   7
+ * -----------------------------------------------------------------------
+ * Word0 | Valid | Type  |     Length    |              Data
+ * -----------------------------------------------------------------------
+ * Word1 |                Reserved
+ * -----------------------------------------------------------------------
+ *
+ * Which matches the following structure (on bigendian host):
+ *
+ * struct ibmvtpm_crq {
+ *         u8 valid;
+ *         u8 msg;
+ *         __be16 len;
+ *         __be32 data;
+ *         __be64 reserved;
+ * } __attribute__((packed, aligned(8)));
+ *
+ * However, the value is passed in a register so just compute the numeric value
+ * to load into the register avoiding byteswap altogether. Endian only affects
+ * memory loads and stores - registers are internally represented the same.
  *
  * Return:
- *     0 -Sucess
+ *     0 (H_SUCCESS) - Success
  *     Non-zero - Failure
  */
-static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
+static int ibmvtpm_send_crq(struct vio_dev *vdev,
+               u8 valid, u8 msg, u16 len, u32 data)
 {
-       return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
+       u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
+               (u64)data;
+       return ibmvtpm_send_crq_word(vdev, w1);
 }
 
 /**
@@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
-       struct ibmvtpm_crq crq;
-       __be64 *word = (__be64 *)&crq;
        int rc, sig;
 
        if (!ibmvtpm->rtce_buf) {
@@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
        spin_lock(&ibmvtpm->rtce_lock);
        ibmvtpm->res_len = 0;
        memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
-       crq.valid = (u8)IBMVTPM_VALID_CMD;
-       crq.msg = (u8)VTPM_TPM_COMMAND;
-       crq.len = cpu_to_be16(count);
-       crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
 
        /*
         * set the processing flag before the Hcall, since we may get the
@@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
         */
        ibmvtpm->tpm_processing_cmd = true;
 
-       rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
-                             be64_to_cpu(word[1]));
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+                       IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
+                       count, ibmvtpm->rtce_dma_handle);
        if (rc != H_SUCCESS) {
                dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
                rc = 0;
@@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
  */
 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
 {
-       struct ibmvtpm_crq crq;
-       u64 *buf = (u64 *) &crq;
        int rc;
 
-       crq.valid = (u8)IBMVTPM_VALID_CMD;
-       crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
-
-       rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
-                             cpu_to_be64(buf[1]));
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+                       IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
        if (rc != H_SUCCESS)
                dev_err(ibmvtpm->dev,
                        "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
@@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
  */
 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
 {
-       struct ibmvtpm_crq crq;
-       u64 *buf = (u64 *) &crq;
        int rc;
 
-       crq.valid = (u8)IBMVTPM_VALID_CMD;
-       crq.msg = (u8)VTPM_GET_VERSION;
-
-       rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
-                             cpu_to_be64(buf[1]));
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+                       IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
        if (rc != H_SUCCESS)
                dev_err(ibmvtpm->dev,
                        "ibmvtpm_crq_get_version failed rc=%d\n", rc);
@@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
 {
        int rc;
 
-       rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
+       rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
        if (rc != H_SUCCESS)
                dev_err(ibmvtpm->dev,
                        "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
@@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
 {
        int rc;
 
-       rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
+       rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
        if (rc != H_SUCCESS)
                dev_err(ibmvtpm->dev,
                        "ibmvtpm_crq_send_init failed rc=%d\n", rc);
@@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
 {
        struct tpm_chip *chip = dev_get_drvdata(dev);
        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
-       struct ibmvtpm_crq crq;
-       u64 *buf = (u64 *) &crq;
        int rc = 0;
 
-       crq.valid = (u8)IBMVTPM_VALID_CMD;
-       crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
-
-       rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
-                             cpu_to_be64(buf[1]));
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+                       IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
        if (rc != H_SUCCESS)
                dev_err(ibmvtpm->dev,
                        "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
index 3b1b9f9322d5547120f35f490b7ec7b802d2b085..d8f10047fbbaf1f2f965a795358e60be859fb759 100644 (file)
@@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
                /* check the status-register if wait_for_bit is set */
                if (status & 1 << wait_for_bit)
                        break;
-               msleep(TPM_MSLEEP_TIME);
+               tpm_msleep(TPM_MSLEEP_TIME);
        }
        if (i == TPM_MAX_TRIES) {       /* timeout occurs */
                if (wait_for_bit == STAT_XFE)
@@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip)
        wait_and_send(chip, TPM_CTRL_WTX);
        wait_and_send(chip, 0x00);
        wait_and_send(chip, 0x00);
-       msleep(TPM_WTX_MSLEEP_TIME);
+       tpm_msleep(TPM_WTX_MSLEEP_TIME);
 }
 
 static void tpm_wtx_abort(struct tpm_chip *chip)
@@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip)
        wait_and_send(chip, 0x00);
        wait_and_send(chip, 0x00);
        number_of_wtx = 0;
-       msleep(TPM_WTX_MSLEEP_TIME);
+       tpm_msleep(TPM_WTX_MSLEEP_TIME);
 }
 
 static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
index b617b2eeb080ad41034b7b7cbd1b21aaed524a3a..63bc6c3b949e51406527a484d356e24959ed9615 100644 (file)
@@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l)
 
                if (access & TPM_ACCESS_VALID)
                        return 0;
-               msleep(TPM_TIMEOUT);
+               tpm_msleep(TPM_TIMEOUT);
        } while (time_before(jiffies, stop));
        return -1;
 }
@@ -117,7 +117,7 @@ again:
                do {
                        if (check_locality(chip, l))
                                return l;
-                       msleep(TPM_TIMEOUT);
+                       tpm_msleep(TPM_TIMEOUT);
                } while (time_before(jiffies, stop));
        }
        return -1;
@@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip)
                burstcnt = (value >> 8) & 0xFFFF;
                if (burstcnt)
                        return burstcnt;
-               msleep(TPM_TIMEOUT);
+               tpm_msleep(TPM_TIMEOUT);
        } while (time_before(jiffies, stop));
        return -EBUSY;
 }
@@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
        priv->irq = irq;
        chip->flags |= TPM_CHIP_FLAG_IRQ;
        if (!priv->irq_tested)
-               msleep(1);
+               tpm_msleep(1);
        if (!priv->irq_tested)
                disable_interrupts(chip);
        priv->irq_tested = true;
index c834f5abfc497e8ce674de2a736b9c3103bf55fa..4c10456f8a32923c0dade56265e4eeef714df641 100644 (file)
@@ -105,6 +105,7 @@ err:
 
        return  ret;
 }
+EXPORT_SYMBOL_GPL(clk_bulk_prepare);
 
 #endif /* CONFIG_HAVE_CLK_PREPARE */
 
index 62d7854e4b873fe04c38dbeaaaf4e02f5031dc45..5970a50671b9a6638d637f8a017e1cc0cf063e97 100644 (file)
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(10), 8, GFLAGS),
 
        GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(10), 0, GFLAGS),
        GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(10), 1, GFLAGS),
        GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(10), 2, GFLAGS),
        GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(2), 15, GFLAGS),
 
        COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
                        RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
        GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
 
-       GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+       GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
        GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
 
        /* PD_MMC */
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
        "aclk_peri",
        "hclk_peri",
        "pclk_peri",
+       "pclk_pmu",
+       "sclk_timer5",
 };
 
 static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
index e40b77583c476f4cb413b34a7c6f2a734895efec..d8d3cb67b4029ac58c905fcae097a2c6957a9f22 100644 (file)
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
 #define PLL_ENABLED    (1 << 31)
 #define PLL_LOCKED     (1 << 29)
 
+static void exynos4_clk_enable_pll(u32 reg)
+{
+       u32 pll_con = readl(reg_base + reg);
+       pll_con |= PLL_ENABLED;
+       writel(pll_con, reg_base + reg);
+
+       while (!(pll_con & PLL_LOCKED)) {
+               cpu_relax();
+               pll_con = readl(reg_base + reg);
+       }
+}
+
 static void exynos4_clk_wait_for_pll(u32 reg)
 {
        u32 pll_con;
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
        samsung_clk_save(reg_base, exynos4_save_pll,
                                ARRAY_SIZE(exynos4_clk_pll_regs));
 
+       exynos4_clk_enable_pll(EPLL_CON0);
+       exynos4_clk_enable_pll(VPLL_CON0);
+
        if (exynos4_soc == EXYNOS4210) {
                samsung_clk_save(reg_base, exynos4_save_soc,
                                        ARRAY_SIZE(exynos4210_clk_save));
index a1df588343f2dac1fb1c6acb8f579f3688ad6938..1de8cac99a0e93b21d38f5c23f29739ec06caab7 100644 (file)
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
        /* Turn off the clock (and clear the event) */
        disable_timer(cs5535_event_clock);
 
-       if (clockevent_state_shutdown(&cs5535_clockevent))
+       if (clockevent_state_detached(&cs5535_clockevent) ||
+           clockevent_state_shutdown(&cs5535_clockevent))
                return IRQ_HANDLED;
 
        /* Clear the counter */
index 6a20dc8b253f4c66981e39bec82f1e2634c50f41..9a7d7f0f23feae971f1f8d959b66578e9c59833e 100644 (file)
@@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi
        return 0;
 }
 
-static struct clock_event_device numachip2_clockevent = {
+static const struct clock_event_device numachip2_clockevent __initconst = {
        .name            = "numachip2",
        .rating          = 400,
        .set_next_event  = numachip2_set_next_event,
index 2ff64d9d4fb31a3a10a85361d8f5b3b33039f76d..62d24690ba0205df997f588207bfe3e9b649c325 100644 (file)
@@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void)
        return -readl(sched_clk_base + TIMER_VALUE);
 }
 
-static int integrator_clocksource_init(unsigned long inrate,
-                                      void __iomem *base)
+static int __init integrator_clocksource_init(unsigned long inrate,
+                                             void __iomem *base)
 {
        u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
        unsigned long rate = inrate;
index a020da7940d6395a326d5c2ddb6dd01c8f5835e3..a753c50e9e412ea96cb69fa454e81e084a3016be 100644 (file)
@@ -106,6 +106,22 @@ static const struct of_device_id whitelist[] __initconst = {
  * platforms using "operating-points-v2" property.
  */
 static const struct of_device_id blacklist[] __initconst = {
+       { .compatible = "calxeda,highbank", },
+       { .compatible = "calxeda,ecx-2000", },
+
+       { .compatible = "marvell,armadaxp", },
+
+       { .compatible = "nvidia,tegra124", },
+
+       { .compatible = "st,stih407", },
+       { .compatible = "st,stih410", },
+
+       { .compatible = "sigma,tango4", },
+
+       { .compatible = "ti,am33xx", },
+       { .compatible = "ti,am43", },
+       { .compatible = "ti,dra7", },
+
        { }
 };
 
index b29cd339846302402dcee8333765ad670555a2b3..4bf47de6101faca3896966cd4a3214d190568b5c 100644 (file)
@@ -190,7 +190,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
 
 static const struct of_device_id ti_cpufreq_of_match[] = {
        { .compatible = "ti,am33xx", .data = &am3x_soc_data, },
-       { .compatible = "ti,am4372", .data = &am4x_soc_data, },
+       { .compatible = "ti,am43", .data = &am4x_soc_data, },
        { .compatible = "ti,dra7", .data = &dra7_soc_data },
        {},
 };
index 7080c384ad5de656e8c345a336167624b236a2bb..52a75053ee0312146e0ab879bc351b46153faee7 100644 (file)
@@ -104,13 +104,13 @@ static int __init arm_idle_init(void)
                ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
                if (ret <= 0) {
                        ret = ret ? : -ENODEV;
-                       goto out_fail;
+                       goto init_fail;
                }
 
                ret = cpuidle_register_driver(drv);
                if (ret) {
                        pr_err("Failed to register cpuidle driver\n");
-                       goto out_fail;
+                       goto init_fail;
                }
 
                /*
@@ -149,6 +149,8 @@ static int __init arm_idle_init(void)
        }
 
        return 0;
+init_fail:
+       kfree(drv);
 out_fail:
        while (--cpu >= 0) {
                dev = per_cpu(cpuidle_devices, cpu);
index 48eaf2879228371fa92fa0f314d7e3407295b78d..aa390404e85f132705e4ca80506d15d292469c7e 100644 (file)
@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                data->needs_update = 0;
        }
 
-       /* resume_latency is 0 means no restriction */
-       if (resume_latency && resume_latency < latency_req)
+       if (resume_latency < latency_req &&
+           resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
                latency_req = resume_latency;
 
        /* Special case when user has set very strict latency requirement */
index d9fbbf01062bc84f17001808ea6060c8fa7d7110..0f9754e077191e07f3ad985ad2897822f99a932c 100644 (file)
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
 /* The crypto framework makes it hard to avoid this global. */
 static struct device *artpec6_crypto_dev;
 
-static struct dentry *dbgfs_root;
-
 #ifdef CONFIG_FAULT_INJECTION
 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
        char *desc;
 };
 
+static struct dentry *dbgfs_root;
+
 static void artpec6_crypto_init_debugfs(void)
 {
        dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
index e36aeacd763596e76ce9eac46e3a1edd5f5443c8..1eb852765469e6633a0af44e3d6ed26ad52234ec 100644 (file)
@@ -1,6 +1,7 @@
 config CRYPTO_DEV_FSL_CAAM
        tristate "Freescale CAAM-Multicore driver backend"
        depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+       select SOC_BUS
        help
          Enables the driver module for Freescale's Cryptographic Accelerator
          and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
          To compile this as a module, choose M here: the module
          will be called caamrng.
 
-config CRYPTO_DEV_FSL_CAAM_IMX
-       def_bool SOC_IMX6 || SOC_IMX7D
-       depends on CRYPTO_DEV_FSL_CAAM
-
 config CRYPTO_DEV_FSL_CAAM_DEBUG
        bool "Enable debug output in CAAM driver"
        depends on CRYPTO_DEV_FSL_CAAM
index dacb53fb690e9583323500333de5d74be3af20f5..027e121c6f70aa9214a29692e8a3ef1e6d886496 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/device.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/sys_soc.h>
 
 #include "compat.h"
 #include "regs.h"
@@ -19,6 +20,8 @@ bool caam_little_end;
 EXPORT_SYMBOL(caam_little_end);
 bool caam_dpaa2;
 EXPORT_SYMBOL(caam_dpaa2);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
 
 #ifdef CONFIG_CAAM_QI
 #include "qi.h"
@@ -28,19 +31,11 @@ EXPORT_SYMBOL(caam_dpaa2);
  * i.MX targets tend to have clock control subsystems that can
  * enable/disable clocking to our device.
  */
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
 static inline struct clk *caam_drv_identify_clk(struct device *dev,
                                                char *clk_name)
 {
-       return devm_clk_get(dev, clk_name);
+       return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
 }
-#else
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
-                                               char *clk_name)
-{
-       return NULL;
-}
-#endif
 
 /*
  * Descriptor to instantiate RNG State Handle 0 in normal mode and
@@ -430,6 +425,10 @@ static int caam_probe(struct platform_device *pdev)
 {
        int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
        u64 caam_id;
+       static const struct soc_device_attribute imx_soc[] = {
+               {.family = "Freescale i.MX"},
+               {},
+       };
        struct device *dev;
        struct device_node *nprop, *np;
        struct caam_ctrl __iomem *ctrl;
@@ -451,6 +450,8 @@ static int caam_probe(struct platform_device *pdev)
        dev_set_drvdata(dev, ctrlpriv);
        nprop = pdev->dev.of_node;
 
+       caam_imx = (bool)soc_device_match(imx_soc);
+
        /* Enable clocking */
        clk = caam_drv_identify_clk(&pdev->dev, "ipg");
        if (IS_ERR(clk)) {
index 2b5efff9ec3caa69a49bd48b1121aa680e08a2c1..17cfd23a38faa3fbe3152d5925821082f2c2dfb3 100644 (file)
@@ -67,6 +67,7 @@
  */
 
 extern bool caam_little_end;
+extern bool caam_imx;
 
 #define caam_to_cpu(len)                               \
 static inline u##len caam##len ## _to_cpu(u##len val)  \
@@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg)
 #else /* CONFIG_64BIT */
 static inline void wr_reg64(void __iomem *reg, u64 data)
 {
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
-       if (caam_little_end) {
+       if (!caam_imx && caam_little_end) {
                wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
                wr_reg32((u32 __iomem *)(reg), data);
-       } else
-#endif
-       {
+       } else {
                wr_reg32((u32 __iomem *)(reg), data >> 32);
                wr_reg32((u32 __iomem *)(reg) + 1, data);
        }
@@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
 
 static inline u64 rd_reg64(void __iomem *reg)
 {
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
-       if (caam_little_end)
+       if (!caam_imx && caam_little_end)
                return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
                        (u64)rd_reg32((u32 __iomem *)(reg)));
-       else
-#endif
-               return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
-                       (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+
+       return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+               (u64)rd_reg32((u32 __iomem *)(reg) + 1));
 }
 #endif /* CONFIG_64BIT  */
 
+static inline u64 cpu_to_caam_dma64(dma_addr_t value)
+{
+       if (caam_imx)
+               return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
+                        (u64)cpu_to_caam32(upper_32_bits(value)));
+
+       return cpu_to_caam64(value);
+}
+
+static inline u64 caam_dma64_to_cpu(u64 value)
+{
+       if (caam_imx)
+               return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
+                        (u64)caam32_to_cpu(upper_32_bits(value)));
+
+       return caam64_to_cpu(value);
+}
+
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-#ifdef CONFIG_SOC_IMX7D
-#define cpu_to_caam_dma(value) \
-               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
-                 (u64)cpu_to_caam32(upper_32_bits(value)))
-#define caam_dma_to_cpu(value) \
-               (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
-                 (u64)caam32_to_cpu(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma(value) cpu_to_caam64(value)
-#define caam_dma_to_cpu(value) caam64_to_cpu(value)
-#endif /* CONFIG_SOC_IMX7D */
+#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
+#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
 #else
 #define cpu_to_caam_dma(value) cpu_to_caam32(value)
 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT  */
-
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
-#define cpu_to_caam_dma64(value) \
-               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
-                (u64)cpu_to_caam32(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
-#endif
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
 
 /*
  * jr_outentry
index d2207ac5ba19cdd357ab5bbae3a128d1fa92183e..5438552bc6d783b57a23763e64c59afb90c340ae 100644 (file)
@@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
        struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
        struct safexcel_crypto_priv *priv = ctx->priv;
        struct skcipher_request req;
-       struct safexcel_inv_result result = { 0 };
+       struct safexcel_inv_result result = {};
        int ring = ctx->base.ring;
 
        memset(&req, 0, sizeof(struct skcipher_request));
index 3f819399cd95519a9956ed1d3ecba76fa2aa62b4..3980f946874fa08b64288b5d8b04bcac9134a0ad 100644 (file)
@@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
        struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
        struct safexcel_crypto_priv *priv = ctx->priv;
        struct ahash_request req;
-       struct safexcel_inv_result result = { 0 };
+       struct safexcel_inv_result result = {};
        int ring = ctx->base.ring;
 
        memset(&req, 0, sizeof(struct ahash_request));
index b585ce54a8028ccfabd61c8a0e4a768f17328345..4835dd4a9e5075e3c652ea78072c515fc5537fd1 100644 (file)
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
        struct scatterlist sg[1], *tsg;
-       int err = 0, len = 0, reg, ncp;
+       int err = 0, len = 0, reg, ncp = 0;
        unsigned int i;
-       const u32 *buffer = (const u32 *)rctx->buffer;
+       u32 *buffer = (void *)rctx->buffer;
 
        rctx->sg = hdev->req->src;
        rctx->total = hdev->req->nbytes;
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
                reg |= HASH_CR_DMAA;
                stm32_hash_write(hdev, HASH_CR, reg);
 
-               for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
-                       stm32_hash_write(hdev, HASH_DIN, buffer[i]);
-
-               stm32_hash_set_nblw(hdev, ncp);
+               if (ncp) {
+                       memset(buffer + ncp, 0,
+                              DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
+                       writesl(hdev->io_base + HASH_DIN, buffer,
+                               DIV_ROUND_UP(ncp, sizeof(u32)));
+               }
+               stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
                reg = stm32_hash_read(hdev, HASH_STR);
                reg |= HASH_STR_DCAL;
                stm32_hash_write(hdev, HASH_STR, reg);
index 79791c690858fb1cea4aa057fb726efe46160df9..dff88838dce762c33ca7e898420f051c3a2f80cb 100644 (file)
@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
                req_ctx->swinit = 0;
        } else {
                desc->ptr[1] = zero_entry;
-               /* Indicate next op is not the first. */
-               req_ctx->first = 0;
        }
+       /* Indicate next op is not the first. */
+       req_ctx->first = 0;
 
        /* HMAC key */
        if (ctx->keylen)
@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
 
        sg_count = edesc->src_nents ?: 1;
        if (is_sec1 && sg_count > 1)
-               sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
+               sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
        else
                sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
                                      DMA_TO_DEVICE);
@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
                t_alg->algt.alg.hash.final = ahash_final;
                t_alg->algt.alg.hash.finup = ahash_finup;
                t_alg->algt.alg.hash.digest = ahash_digest;
-               t_alg->algt.alg.hash.setkey = ahash_setkey;
+               if (!strncmp(alg->cra_name, "hmac", 4))
+                       t_alg->algt.alg.hash.setkey = ahash_setkey;
                t_alg->algt.alg.hash.import = ahash_import;
                t_alg->algt.alg.hash.export = ahash_export;
 
index 66fb40d0ebdbbec521499cd58cf2e1d55c195878..03830634e141e70782c96ca42945f30babc0283a 100644 (file)
@@ -383,7 +383,7 @@ err_put_fd:
        return err;
 }
 
-static void sync_fill_fence_info(struct dma_fence *fence,
+static int sync_fill_fence_info(struct dma_fence *fence,
                                 struct sync_fence_info *info)
 {
        strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
                test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
                ktime_to_ns(fence->timestamp) :
                ktime_set(0, 0);
+
+       return info->status;
 }
 
 static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
         * sync_fence_info and return the actual number of fences on
         * info->num_fences.
         */
-       if (!info.num_fences)
+       if (!info.num_fences) {
+               info.status = dma_fence_is_signaled(sync_file->fence);
                goto no_fences;
+       } else {
+               info.status = 1;
+       }
 
        if (info.num_fences < num_fences)
                return -EINVAL;
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (!fence_info)
                return -ENOMEM;
 
-       for (i = 0; i < num_fences; i++)
-               sync_fill_fence_info(fences[i], &fence_info[i]);
+       for (i = 0; i < num_fences; i++) {
+               int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+               info.status = info.status <= 0 ? info.status : status;
+       }
 
        if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
                         size)) {
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 
 no_fences:
        sync_file_get_name(sync_file, info.name, sizeof(info.name));
-       info.status = dma_fence_is_signaled(sync_file->fence);
        info.num_fences = num_fences;
 
        if (copy_to_user((void __user *)arg, &info, sizeof(info)))
index 32905d5606ac8b90585148e26ace36c2b7352808..55f9c62ee54b063521bfbf0d4b19627b43e8c155 100644 (file)
@@ -212,11 +212,12 @@ struct msgdma_device {
 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
 {
        struct msgdma_sw_desc *desc;
+       unsigned long flags;
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
        list_del(&desc->node);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 
        INIT_LIST_HEAD(&desc->tx_list);
 
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
        struct msgdma_device *mdev = to_mdev(tx->chan);
        struct msgdma_sw_desc *new;
        dma_cookie_t cookie;
+       unsigned long flags;
 
        new = tx_to_desc(tx);
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        cookie = dma_cookie_assign(tx);
 
        list_add_tail(&new->node, &mdev->pending_list);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 
        return cookie;
 }
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
        struct msgdma_extended_desc *desc;
        size_t copy;
        u32 desc_cnt;
+       unsigned long irqflags;
 
        desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, irqflags);
        if (desc_cnt > mdev->desc_free_cnt) {
-               spin_unlock_bh(&mdev->lock);
+               spin_unlock_irqrestore(&mdev->lock, irqflags);
                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
                return NULL;
        }
        mdev->desc_free_cnt -= desc_cnt;
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, irqflags);
 
        do {
                /* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
        u32 desc_cnt = 0, i;
        struct scatterlist *sg;
        u32 stride;
+       unsigned long irqflags;
 
        for_each_sg(sgl, sg, sg_len, i)
                desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, irqflags);
        if (desc_cnt > mdev->desc_free_cnt) {
-               spin_unlock_bh(&mdev->lock);
+               spin_unlock_irqrestore(&mdev->lock, irqflags);
                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
                return NULL;
        }
        mdev->desc_free_cnt -= desc_cnt;
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, irqflags);
 
        avail = sg_dma_len(sgl);
 
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
 static void msgdma_issue_pending(struct dma_chan *chan)
 {
        struct msgdma_device *mdev = to_mdev(chan);
+       unsigned long flags;
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        msgdma_start_transfer(mdev);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 }
 
 /**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
 static void msgdma_free_chan_resources(struct dma_chan *dchan)
 {
        struct msgdma_device *mdev = to_mdev(dchan);
+       unsigned long flags;
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        msgdma_free_descriptors(mdev);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
        kfree(mdev->sw_desq);
 }
 
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
        u32 count;
        u32 __maybe_unused size;
        u32 __maybe_unused status;
+       unsigned long flags;
 
-       spin_lock(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
 
        /* Read number of responses that are available */
        count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
                 * bits. So we need to just drop these values.
                 */
                size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
-               status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
+               status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
 
                msgdma_complete_descriptor(mdev);
                msgdma_chan_desc_cleanup(mdev);
        }
 
-       spin_unlock(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 }
 
 /**
index 3879f80a4815cb27ba4329e29510752e34331ba4..a7ea20e7b8e94cd9527d73785578abd599e5029d 100644 (file)
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
        struct edma_desc *edesc;
        struct device *dev = chan->device->dev;
        struct edma_chan *echan = to_edma_chan(chan);
-       unsigned int width, pset_len;
+       unsigned int width, pset_len, array_size;
 
        if (unlikely(!echan || !len))
                return NULL;
 
+       /* Align the array size (acnt block) with the transfer properties */
+       switch (__ffs((src | dest | len))) {
+       case 0:
+               array_size = SZ_32K - 1;
+               break;
+       case 1:
+               array_size = SZ_32K - 2;
+               break;
+       default:
+               array_size = SZ_32K - 4;
+               break;
+       }
+
        if (len < SZ_64K) {
                /*
                 * Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
                 * When the full_length is multibple of 32767 one slot can be
                 * used to complete the transfer.
                 */
-               width = SZ_32K - 1;
+               width = array_size;
                pset_len = rounddown(len, width);
                /* One slot is enough for lengths multiple of (SZ_32K -1) */
                if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
                }
                dest += pset_len;
                src += pset_len;
-               pset_len = width = len % (SZ_32K - 1);
+               pset_len = width = len % array_size;
 
                ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
                                       width, pset_len, DMA_MEM_TO_MEM);
index 2f65a8fde21d4f5be2e2a623676047e4674da27f..f1d04b70ee672af4c99dcd21298d03c65898be96 100644 (file)
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
        mutex_lock(&xbar->mutex);
        map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
                                            xbar->dma_requests);
-       mutex_unlock(&xbar->mutex);
        if (map->xbar_out == xbar->dma_requests) {
+               mutex_unlock(&xbar->mutex);
                dev_err(&pdev->dev, "Run out of free DMA requests\n");
                kfree(map);
                return ERR_PTR(-ENOMEM);
        }
        set_bit(map->xbar_out, xbar->dma_inuse);
+       mutex_unlock(&xbar->mutex);
 
        map->xbar_in = (u16)dma_spec->args[0];
 
index 1cb2d1c070c31ba1a80e8e9460d0d102e9dddd74..a94601d5939e2f51c2ebfd542cbf90dcfc2f0b7c 100644 (file)
@@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
 
        efi_random_get_seed(sys_table);
 
-       if (!nokaslr()) {
+       /* hibernation expects the runtime regions to stay in the same place */
+       if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
                /*
                 * Randomize the base of the UEFI runtime services region.
                 * Preserve the 2 MB alignment of the region by taking a
index 08129b7b80ab9bbf2320a4061b4059554d90973f..41c48a1e8baaa8e46e9609c0b76a2698b5ac5c49 100644 (file)
@@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg)
        if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
                return -EFAULT;
 
+       if (qcaps.capsule_count == ULONG_MAX)
+               return -EINVAL;
+
        capsules = kcalloc(qcaps.capsule_count + 1,
                           sizeof(efi_capsule_header_t), GFP_KERNEL);
        if (!capsules)
index 08629ee69d1121604ee74949f0bbe0dc39a229fd..00e73d28077cee3732b61973222422f152ebb033 100644 (file)
@@ -361,12 +361,12 @@ static const struct fpga_manager_ops altera_cvp_ops = {
        .write_complete = altera_cvp_write_complete,
 };
 
-static ssize_t show_chkcfg(struct device_driver *dev, char *buf)
+static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
 {
        return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
 }
 
-static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
+static ssize_t chkcfg_store(struct device_driver *drv, const char *buf,
                            size_t count)
 {
        int ret;
@@ -378,7 +378,7 @@ static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
        return count;
 }
 
-static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg);
+static DRIVER_ATTR_RW(chkcfg);
 
 static int altera_cvp_probe(struct pci_dev *pdev,
                            const struct pci_device_id *dev_id);
index 3388d54ba11468a0ab013f1160b1ea2a258ca354..3f80f167ed56d917405aaad1dd5e09598c7fc9e6 100644 (file)
@@ -453,7 +453,8 @@ config GPIO_TS4800
 config GPIO_THUNDERX
        tristate "Cavium ThunderX/OCTEON-TX GPIO"
        depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
-       depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+       depends on PCI_MSI
+       select IRQ_DOMAIN_HIERARCHY
        select IRQ_FASTEOI_HIERARCHY_HANDLERS
        help
          Say yes here to support the on-chip GPIO lines on the ThunderX
index dbf869fb63ced2cb2884be234c5275f2893237b2..3233b72b682809e197ed2528f2c9bc634400d878 100644 (file)
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
                irq_set_handler_locked(d, handle_level_irq);
        else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-               irq_set_handler_locked(d, handle_edge_irq);
+               /*
+                * Edge IRQs are already cleared/acked in irq_handler and
+                * not need to be masked, as result handle_edge_irq()
+                * logic is excessed here and may cause lose of interrupts.
+                * So just use handle_simple_irq.
+                */
+               irq_set_handler_locked(d, handle_simple_irq);
 
        return 0;
 
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
 {
        void __iomem *isr_reg = NULL;
-       u32 isr;
+       u32 enabled, isr, level_mask;
        unsigned int bit;
        struct gpio_bank *bank = gpiobank;
        unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
        pm_runtime_get_sync(bank->chip.parent);
 
        while (1) {
-               u32 isr_saved, level_mask = 0;
-               u32 enabled;
-
                raw_spin_lock_irqsave(&bank->lock, lock_flags);
 
                enabled = omap_get_gpio_irqbank_mask(bank);
-               isr_saved = isr = readl_relaxed(isr_reg) & enabled;
+               isr = readl_relaxed(isr_reg) & enabled;
 
                if (bank->level_mask)
                        level_mask = bank->level_mask & enabled;
+               else
+                       level_mask = 0;
 
                /* clear edge sensitive interrupts before handler(s) are
                called so that we don't miss any interrupt occurred while
                executing them */
-               omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
-               omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
-               omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+               if (isr & ~level_mask)
+                       omap_clear_gpio_irqbank(bank, isr & ~level_mask);
 
                raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
 
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 
 /*---------------------------------------------------------------------*/
 
-static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+static void omap_gpio_show_rev(struct gpio_bank *bank)
 {
        static bool called;
        u32 rev;
index 4d2113530735185e5dcdc87553b2764df8121242..eb4528c87c0b3977420a2108c7feaaf9b2a95869 100644 (file)
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        if (pin <= 255) {
                char ev_name[5];
-               sprintf(ev_name, "_%c%02X",
+               sprintf(ev_name, "_%c%02hhX",
                        agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
                        pin);
                if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
index 7ef6c28a34d991a2bba6f20224284072330c22d1..bc746131987ffda438988f7b11bbb413b4c335c3 100644 (file)
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
        placement.busy_placement = &placements;
        placements.fpfn = 0;
        placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
-       placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
 
        r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
        if (unlikely(r))
index d228f5a990449f0b2c28314c972db2664c6bc1cd..dbbe986f90f29ffb6c77662866e336d8130d1af2 100644 (file)
@@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
                                NUM_BANKS(ADDR_SURF_2_BANK);
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
                        WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
-       } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) {
+       } else if (adev->asic_type == CHIP_OLAND) {
+               tilemode[0] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+               tilemode[1] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+               tilemode[2] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+               tilemode[3] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+               tilemode[4] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[5] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(split_equal_to_row_size) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[6] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(split_equal_to_row_size) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[7] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(split_equal_to_row_size) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+               tilemode[8] =   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[9] =   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[10] =  MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+               tilemode[11] =  MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[12] =  MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[13] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[14] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[15] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[16] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[17] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                               TILE_SPLIT(split_equal_to_row_size) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[21] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[22] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+               tilemode[23] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[24] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                               NUM_BANKS(ADDR_SURF_16_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+               tilemode[25] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                               PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                               NUM_BANKS(ADDR_SURF_8_BANK) |
+                               BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
+       } else if (adev->asic_type == CHIP_HAINAN) {
                tilemode[0] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                PIPE_CONFIG(ADDR_SURF_P2) |
index 31db356476f8f1f72f488c54282a09409bcbb749..430a6b4dfac972f7780e04ac3fc8b1303086878e 100644 (file)
@@ -225,11 +225,7 @@ static int uvd_v6_0_suspend(void *handle)
        if (r)
                return r;
 
-       /* Skip this for APU for now */
-       if (!(adev->flags & AMD_IS_APU))
-               r = amdgpu_uvd_suspend(adev);
-
-       return r;
+       return amdgpu_uvd_suspend(adev);
 }
 
 static int uvd_v6_0_resume(void *handle)
@@ -237,12 +233,10 @@ static int uvd_v6_0_resume(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       /* Skip this for APU for now */
-       if (!(adev->flags & AMD_IS_APU)) {
-               r = amdgpu_uvd_resume(adev);
-               if (r)
-                       return r;
-       }
+       r = amdgpu_uvd_resume(adev);
+       if (r)
+               return r;
+
        return uvd_v6_0_hw_init(adev);
 }
 
index e4a8c2e52cb2c14ab566d015b178a2e6bd243cac..660b3fbade4194f796ebe0be8e4fc7f7e9c46109 100644 (file)
@@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
        int err = 0;
 
        dev = kfd_device_by_id(args->gpu_id);
+       if (!dev)
+               return -EINVAL;
 
        dev->kfd2kgd->get_tile_config(dev->kgd, &config);
 
index 5979158c3f7b93e96627af2c03cd814228b0fe01..944abfad39c1f67447ca720d5e47c4b086336a82 100644 (file)
@@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd,
                                struct kfd_event *ev)
 {
        if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
-               pr_warn("Signal event wasn't created because limit was reached\n");
+               if (!p->signal_event_limit_reached) {
+                       pr_warn("Signal event wasn't created because limit was reached\n");
+                       p->signal_event_limit_reached = true;
+               }
                return -ENOMEM;
        }
 
index 681b639f51330b78af84854880cae988fd90b252..ed71ad40e8f797ca3c7b7d4f129f5e9fda382d27 100644 (file)
@@ -183,8 +183,8 @@ static void uninitialize(struct kernel_queue *kq)
 {
        if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
                kq->mqd->destroy_mqd(kq->mqd,
-                                       NULL,
-                                       false,
+                                       kq->queue->mqd,
+                                       KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
                                        QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
                                        kq->queue->pipe,
                                        kq->queue->queue);
@@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
        uint32_t wptr, rptr;
        unsigned int *queue_address;
 
+       /* When rptr == wptr, the buffer is empty.
+        * When rptr == wptr + 1, the buffer is full.
+        * It is always rptr that advances to the position of wptr, rather than
+        * the opposite. So we can only use up to queue_size_dwords - 1 dwords.
+        */
        rptr = *kq->rptr_kernel;
        wptr = *kq->wptr_kernel;
        queue_address = (unsigned int *)kq->pq_kernel_addr;
@@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
        pr_debug("wptr: %d\n", wptr);
        pr_debug("queue_address 0x%p\n", queue_address);
 
-       available_size = (rptr - 1 - wptr + queue_size_dwords) %
+       available_size = (rptr + queue_size_dwords - 1 - wptr) %
                                                        queue_size_dwords;
 
-       if (packet_size_in_dwords >= queue_size_dwords ||
-                       packet_size_in_dwords >= available_size) {
+       if (packet_size_in_dwords > available_size) {
                /*
                 * make sure calling functions know
                 * acquire_packet_buffer() failed
@@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
        }
 
        if (wptr + packet_size_in_dwords >= queue_size_dwords) {
+               /* make sure after rolling back to position 0, there is
+                * still enough space.
+                */
+               if (packet_size_in_dwords >= rptr) {
+                       *buffer_ptr = NULL;
+                       return -ENOMEM;
+               }
+               /* fill nops, roll back and start at position 0 */
                while (wptr > 0) {
                        queue_address[wptr] = kq->nop_packet;
                        wptr = (wptr + 1) % queue_size_dwords;
index b397ec726400c2a52533356363b36bfd275dfb39..b87e96cee5facfea112a874f4e69bfad52f3b70b 100644 (file)
@@ -521,6 +521,7 @@ struct kfd_process {
        struct list_head signal_event_pages;
        u32 next_nonsignal_event_id;
        size_t signal_event_count;
+       bool signal_event_limit_reached;
 };
 
 /**
index 1cae95e2b13adedfb19760279861fae827d057a7..03bec765b03d949de8bdac3cd8db62f8c684c92a 100644 (file)
@@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
        int num_queues = 0;
        struct queue *cur;
 
-       memset(&q_properties, 0, sizeof(struct queue_properties));
        memcpy(&q_properties, properties, sizeof(struct queue_properties));
        q = NULL;
        kq = NULL;
index c2743233ba10ed8dd9b55338730fbff9722ae680..b526f49be65d066d5eb49d7339ebe3e089c08877 100644 (file)
@@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
 {
        uint32_t reference_clock, tmp;
        struct cgs_display_info info = {0};
-       struct cgs_mode_info mode_info;
+       struct cgs_mode_info mode_info = {0};
 
        info.mode_info = &mode_info;
 
@@ -3948,10 +3948,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
        uint32_t ref_clock;
        uint32_t refresh_rate = 0;
        struct cgs_display_info info = {0};
-       struct cgs_mode_info mode_info;
+       struct cgs_mode_info mode_info = {0};
 
        info.mode_info = &mode_info;
-
        cgs_get_active_displays_info(hwmgr->device, &info);
        num_active_displays = info.display_count;
 
@@ -3967,6 +3966,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
        frame_time_in_us = 1000000 / refresh_rate;
 
        pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+
        data->frame_time_x2 = frame_time_in_us * 2 / 100;
 
        display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
index 97c94f9683fa047392ba62f128586e0b7e4492bc..38cea6fb25a8b9221d64b43da04c4268a2c986b8 100644 (file)
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity)
 {
        struct amd_sched_rq *rq = entity->rq;
-       int r;
 
        if (!amd_sched_entity_is_initialized(sched, entity))
                return;
+
        /**
         * The client will not queue more IBs during this fini, consume existing
-        * queued IBs or discard them on SIGKILL
+        * queued IBs
        */
-       if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
-               r = -ERESTARTSYS;
-       else
-               r = wait_event_killable(sched->job_scheduled,
-                                       amd_sched_entity_is_idle(entity));
-       amd_sched_rq_remove_entity(rq, entity);
-       if (r) {
-               struct amd_sched_job *job;
+       wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
 
-               /* Park the kernel for a moment to make sure it isn't processing
-                * our enity.
-                */
-               kthread_park(sched->thread);
-               kthread_unpark(sched->thread);
-               while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
-                       sched->ops->free_job(job);
-
-       }
+       amd_sched_rq_remove_entity(rq, entity);
        kfifo_free(&entity->job_queue);
 }
 
index 4e53aae9a1fb19fc6710202188a53119c566aef7..0028591f3f959ced1ad520ee280fb481d7a52898 100644 (file)
@@ -2960,6 +2960,7 @@ out:
                drm_modeset_backoff(&ctx);
        }
 
+       drm_atomic_state_put(state);
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
index 5a634594a6cea2d2be20e19a0e359229a860775c..57881167ccd22c9c16df18e1d93401edef2e1f82 100644 (file)
@@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
 void etnaviv_gem_free_object(struct drm_gem_object *obj)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+       struct etnaviv_drm_private *priv = obj->dev->dev_private;
        struct etnaviv_vram_mapping *mapping, *tmp;
 
        /* object should not be active */
        WARN_ON(is_active(etnaviv_obj));
 
+       mutex_lock(&priv->gem_lock);
        list_del(&etnaviv_obj->gem_node);
+       mutex_unlock(&priv->gem_lock);
 
        list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
                                 obj_node) {
index 026ef4e02f85cab130586c5c0b8c9b97bd0101bb..46dfe0737f438d37e4e65dec320e9a7e773e3856 100644 (file)
@@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
        cmdbuf->user_size = ALIGN(args->stream_size, 8);
 
        ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
-       if (ret == 0)
-               cmdbuf = NULL;
+       if (ret)
+               goto out;
+
+       cmdbuf = NULL;
 
        if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
                /*
index 730b8d9db18704c42346b9553c8d7ab737493392..6be5b53c3b279f42ee6dbcc445173fa539721ce5 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/iopoll.h>
+#include <linux/irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
index b1f7299600f040947240618cf2cfc262d1847b70..82b72425a42f7977c993134a2142434d8689227f 100644 (file)
@@ -168,23 +168,21 @@ static struct drm_driver exynos_drm_driver = {
 static int exynos_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
+       struct exynos_drm_private *private;
 
        if (pm_runtime_suspended(dev) || !drm_dev)
                return 0;
 
-       drm_connector_list_iter_begin(drm_dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               int old_dpms = connector->dpms;
-
-               if (connector->funcs->dpms)
-                       connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+       private = drm_dev->dev_private;
 
-               /* Set the old mode back to the connector for resume */
-               connector->dpms = old_dpms;
+       drm_kms_helper_poll_disable(drm_dev);
+       exynos_drm_fbdev_suspend(drm_dev);
+       private->suspend_state = drm_atomic_helper_suspend(drm_dev);
+       if (IS_ERR(private->suspend_state)) {
+               exynos_drm_fbdev_resume(drm_dev);
+               drm_kms_helper_poll_enable(drm_dev);
+               return PTR_ERR(private->suspend_state);
        }
-       drm_connector_list_iter_end(&conn_iter);
 
        return 0;
 }
@@ -192,22 +190,15 @@ static int exynos_drm_suspend(struct device *dev)
 static int exynos_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
+       struct exynos_drm_private *private;
 
        if (pm_runtime_suspended(dev) || !drm_dev)
                return 0;
 
-       drm_connector_list_iter_begin(drm_dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               if (connector->funcs->dpms) {
-                       int dpms = connector->dpms;
-
-                       connector->dpms = DRM_MODE_DPMS_OFF;
-                       connector->funcs->dpms(connector, dpms);
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
+       private = drm_dev->dev_private;
+       drm_atomic_helper_resume(drm_dev, private->suspend_state);
+       exynos_drm_fbdev_resume(drm_dev);
+       drm_kms_helper_poll_enable(drm_dev);
 
        return 0;
 }
@@ -439,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
 
        kfree(drm->dev_private);
        drm->dev_private = NULL;
+       dev_set_drvdata(dev, NULL);
 
        drm_dev_unref(drm);
 }
index cf131c2aa23e431f13a44fe1985bad58c32da250..f8bae4cb4823653562de92a20bec51f87747d07f 100644 (file)
@@ -202,6 +202,7 @@ struct drm_exynos_file_private {
  */
 struct exynos_drm_private {
        struct drm_fb_helper *fb_helper;
+       struct drm_atomic_state *suspend_state;
 
        struct device *dma_dev;
        void *mapping;
index c3a068409b484fda04bbf837626f011beb875c1f..dfb66ecf417b5c8ba9bd26b8966a52a5f14b4aaa 100644 (file)
@@ -18,6 +18,8 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/exynos_drm.h>
 
+#include <linux/console.h>
+
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
@@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev)
 
        drm_fb_helper_hotplug_event(fb_helper);
 }
+
+void exynos_drm_fbdev_suspend(struct drm_device *dev)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+
+       console_lock();
+       drm_fb_helper_set_suspend(private->fb_helper, 1);
+       console_unlock();
+}
+
+void exynos_drm_fbdev_resume(struct drm_device *dev)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+
+       console_lock();
+       drm_fb_helper_set_suspend(private->fb_helper, 0);
+       console_unlock();
+}
index 330eef87f7180b1b726ff41933874a6568b0474d..645d1bb7f665faed0dfa30bf3ab36c090a2c1c1f 100644 (file)
@@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev);
 void exynos_drm_fbdev_fini(struct drm_device *dev);
 void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
 void exynos_drm_output_poll_changed(struct drm_device *dev);
+void exynos_drm_fbdev_suspend(struct drm_device *drm);
+void exynos_drm_fbdev_resume(struct drm_device *drm);
 
 #else
 
@@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
 
 #define exynos_drm_output_poll_changed (NULL)
 
+static inline void exynos_drm_fbdev_suspend(struct drm_device *drm)
+{
+}
+
+static inline void exynos_drm_fbdev_resume(struct drm_device *drm)
+{
+}
+
 #endif
 
 #endif
index 214fa5e51963898ee5288f98d536681cdbebdcd6..0109ff40b1db2a95da6997e982fe0adaf4496196 100644 (file)
@@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct drm_connector *connector;
        struct drm_display_mode *m;
+       struct drm_connector_list_iter conn_iter;
        int mode_ok;
 
        drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
                if (connector->encoder == encoder)
                        break;
        }
+       if (connector)
+               drm_connector_get(connector);
+       drm_connector_list_iter_end(&conn_iter);
 
-       if (connector->encoder != encoder)
+       if (!connector)
                return true;
 
        mode_ok = hdmi_mode_valid(connector, adjusted_mode);
 
        if (mode_ok == MODE_OK)
-               return true;
+               goto cleanup;
 
        /*
         * Find the most suitable mode and copy it to adjusted_mode.
@@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
                }
        }
 
+cleanup:
+       drm_connector_put(connector);
+
        return true;
 }
 
index 40af17ec6312533d4080cc1581faa4683a0405b9..ff3154fe6588b6aa48d4c2618d65f1f2d912b137 100644 (file)
@@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
        void *p_data, unsigned int bytes)
 {
-       unsigned int bar_index =
-               (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
        u32 new = *(u32 *)(p_data);
        bool lo = IS_ALIGNED(offset, 8);
        u64 size;
        int ret = 0;
        bool mmio_enabled =
                vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+       struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
 
-       if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
-               return -EINVAL;
-
+       /*
+        * Power-up software can determine how much address
+        * space the device requires by writing a value of
+        * all 1's to the register and then reading the value
+        * back. The device will return 0's in all don't-care
+        * address bits.
+        */
        if (new == 0xffffffff) {
-               /*
-                * Power-up software can determine how much address
-                * space the device requires by writing a value of
-                * all 1's to the register and then reading the value
-                * back. The device will return 0's in all don't-care
-                * address bits.
-                */
-               size = vgpu->cfg_space.bar[bar_index].size;
-               if (lo) {
-                       new = rounddown(new, size);
-               } else {
-                       u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
-                       /* for 32bit mode bar it returns all-0 in upper 32
-                        * bit, for 64bit mode bar it will calculate the
-                        * size with lower 32bit and return the corresponding
-                        * value
+               switch (offset) {
+               case PCI_BASE_ADDRESS_0:
+               case PCI_BASE_ADDRESS_1:
+                       size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
+                       intel_vgpu_write_pci_bar(vgpu, offset,
+                                               size >> (lo ? 0 : 32), lo);
+                       /*
+                        * Untrap the BAR, since guest hasn't configured a
+                        * valid GPA
                         */
-                       if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
-                               new &= (~(size-1)) >> 32;
-                       else
-                               new = 0;
-               }
-               /*
-                * Unmapp & untrap the BAR, since guest hasn't configured a
-                * valid GPA
-                */
-               switch (bar_index) {
-               case INTEL_GVT_PCI_BAR_GTTMMIO:
                        ret = trap_gttmmio(vgpu, false);
                        break;
-               case INTEL_GVT_PCI_BAR_APERTURE:
+               case PCI_BASE_ADDRESS_2:
+               case PCI_BASE_ADDRESS_3:
+                       size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
+                       intel_vgpu_write_pci_bar(vgpu, offset,
+                                               size >> (lo ? 0 : 32), lo);
                        ret = map_aperture(vgpu, false);
                        break;
+               default:
+                       /* Unimplemented BARs */
+                       intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
                }
-               intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
        } else {
-               /*
-                * Unmapp & untrap the old BAR first, since guest has
-                * re-configured the BAR
-                */
-               switch (bar_index) {
-               case INTEL_GVT_PCI_BAR_GTTMMIO:
-                       ret = trap_gttmmio(vgpu, false);
+               switch (offset) {
+               case PCI_BASE_ADDRESS_0:
+               case PCI_BASE_ADDRESS_1:
+                       /*
+                        * Untrap the old BAR first, since guest has
+                        * re-configured the BAR
+                        */
+                       trap_gttmmio(vgpu, false);
+                       intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+                       ret = trap_gttmmio(vgpu, mmio_enabled);
                        break;
-               case INTEL_GVT_PCI_BAR_APERTURE:
-                       ret = map_aperture(vgpu, false);
+               case PCI_BASE_ADDRESS_2:
+               case PCI_BASE_ADDRESS_3:
+                       map_aperture(vgpu, false);
+                       intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+                       ret = map_aperture(vgpu, mmio_enabled);
                        break;
-               }
-               intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
-               /* Track the new BAR */
-               if (mmio_enabled) {
-                       switch (bar_index) {
-                       case INTEL_GVT_PCI_BAR_GTTMMIO:
-                               ret = trap_gttmmio(vgpu, true);
-                               break;
-                       case INTEL_GVT_PCI_BAR_APERTURE:
-                               ret = map_aperture(vgpu, true);
-                               break;
-                       }
+               default:
+                       intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
                }
        }
        return ret;
@@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        }
 
        switch (rounddown(offset, 4)) {
-       case PCI_BASE_ADDRESS_0:
-       case PCI_BASE_ADDRESS_1:
-       case PCI_BASE_ADDRESS_2:
-       case PCI_BASE_ADDRESS_3:
+       case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
                if (WARN_ON(!IS_ALIGNED(offset, 4)))
                        return -EINVAL;
                return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
@@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
        struct intel_gvt *gvt = vgpu->gvt;
        const struct intel_gvt_device_info *info = &gvt->device_info;
        u16 *gmch_ctl;
-       int i;
 
        memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
               info->cfg_space_size);
@@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
         */
        memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
        memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
        memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
 
-       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
-               vgpu->cfg_space.bar[i].size = pci_resource_len(
-                                             gvt->dev_priv->drm.pdev, i * 2);
-               vgpu->cfg_space.bar[i].tracked = false;
-       }
+       vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
+                               pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+       vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
+                               pci_resource_len(gvt->dev_priv->drm.pdev, 2);
 }
 
 /**
index 21c36e256884fb8414352d5ffdd1f3baaa6606a3..d4726a3358a4a5b241ff50af1efb9ddcd64f4fbf 100644 (file)
@@ -2723,6 +2723,9 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
        unsigned char *bb_start_sva;
 
+       if (!wa_ctx->per_ctx.valid)
+               return 0;
+
        per_ctx_start[0] = 0x18800001;
        per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
 
index 91b4300f3b394a59e6ad2f19ea5fae9834d82806..e5320b4eb698e9793ab1d6d8284935422b706a89 100644 (file)
@@ -701,8 +701,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
                        CACHELINE_BYTES;
                workload->wa_ctx.per_ctx.guest_gma =
                        per_ctx & PER_CTX_ADDR_MASK;
-
-               WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+               workload->wa_ctx.per_ctx.valid = per_ctx & 1;
        }
 
        if (emulate_schedule_in)
index 2294466dd4150b943ad80c53a8ca3b611c9324c8..a5bed2e71b9260afbe5ee3db7bb6649f29b826a2 100644 (file)
@@ -1429,18 +1429,7 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
-static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
-               unsigned int offset, void *p_data, unsigned int bytes)
-{
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-
-       mmio_hw_access_pre(dev_priv);
-       vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
-       mmio_hw_access_post(dev_priv);
-       return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
-}
-
-static int instdone_mmio_read(struct intel_vgpu *vgpu,
+static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -1589,6 +1578,8 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
        MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
        MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
        MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+       if (HAS_BSD2(dev_priv)) \
+               MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
 } while (0)
 
 #define MMIO_RING_D(prefix, d) \
@@ -1635,10 +1626,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x6c)
-       MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
-       MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
+       MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
 #undef RING_REG
-       MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
+       MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
 
        MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
        MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
@@ -1648,7 +1638,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
        MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
 
        /* RING MODE */
@@ -1662,9 +1652,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                        NULL, NULL);
        MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
-                       ring_timestamp_mmio_read, NULL);
+                       mmio_read_from_hw, NULL);
        MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
-                       ring_timestamp_mmio_read, NULL);
+                       mmio_read_from_hw, NULL);
 
        MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2411,9 +2401,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        struct drm_i915_private *dev_priv = gvt->dev_priv;
        int ret;
 
-       MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
-                       intel_vgpu_reg_imr_handler);
-
        MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
        MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
        MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
@@ -2476,68 +2463,34 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
                intel_vgpu_reg_master_irq_handler);
 
-       MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
-               F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-
-       MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
-               NULL, NULL);
-       MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE),  D_BDW_PLUS,
-               F_CMD_ACCESS, NULL, NULL);
-       MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
-       MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
-               NULL, NULL);
-       MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
-               F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
-               F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
-               ring_mode_mmio_write);
-       MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
-               F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
-               F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
-                       ring_timestamp_mmio_read, NULL);
-
-       MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
+               mmio_read_from_hw, NULL);
 
 #define RING_REG(base) (base + 0xd0)
        MMIO_RING_F(RING_REG, 4, F_RO, 0,
                ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
                ring_reset_ctl_write);
-       MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
-               ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
-               ring_reset_ctl_write);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x230)
        MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
-       MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x234)
        MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
                NULL, NULL);
-       MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
-               ~0LL, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x244)
        MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
-               NULL, NULL);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x370)
        MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
-       MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
-                       NULL, NULL);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x3a0)
        MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
 #undef RING_REG
 
        MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
@@ -2557,11 +2510,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
 
 #define RING_REG(base) (base + 0x270)
        MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
-       MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
        MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
-       MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
 
        MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
@@ -2849,7 +2800,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x65f08, D_SKL | D_KBL);
        MMIO_D(0x320f0, D_SKL | D_KBL);
 
-       MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(0x70034, D_SKL_PLUS);
        MMIO_D(0x71034, D_SKL_PLUS);
        MMIO_D(0x72034, D_SKL_PLUS);
index fbd023a16f18163d6dcb52bcf795675e3c16a4f7..7d01c77a0f7ac61212d17c901860b2ef5e6fab63 100644 (file)
@@ -54,9 +54,6 @@
 
 #define VGT_SPRSTRIDE(pipe)    _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
 
-#define _REG_VECS_EXCC         0x1A028
-#define _REG_VCS2_EXCC         0x1c028
-
 #define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
 #define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
 
index 436377da41baced8e81352587cc3e035d0dc1ba3..03532dfc0cd51b8342e50da61524024dafc8ac34 100644 (file)
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
 
 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
 {
-       struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
-       int ring_id;
-
        kfree(vgpu->sched_data);
        vgpu->sched_data = NULL;
-
-       spin_lock_bh(&scheduler->mmio_context_lock);
-       for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
-               if (scheduler->engine_owner[ring_id] == vgpu) {
-                       intel_gvt_switch_mmio(vgpu, NULL, ring_id);
-                       scheduler->engine_owner[ring_id] = NULL;
-               }
-       }
-       spin_unlock_bh(&scheduler->mmio_context_lock);
 }
 
 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 {
        struct intel_gvt_workload_scheduler *scheduler =
                &vgpu->gvt->scheduler;
+       int ring_id;
 
        gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
 
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->need_reschedule = true;
                scheduler->current_vgpu = NULL;
        }
+
+       spin_lock_bh(&scheduler->mmio_context_lock);
+       for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+               if (scheduler->engine_owner[ring_id] == vgpu) {
+                       intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+                       scheduler->engine_owner[ring_id] = NULL;
+               }
+       }
+       spin_unlock_bh(&scheduler->mmio_context_lock);
 }
index 0d431a968a32970f07cda55bb37f55a0634afc98..93a49eb0209ee645818a673a07d8a31291840ce7 100644 (file)
@@ -68,6 +68,7 @@ struct shadow_indirect_ctx {
 struct shadow_per_ctx {
        unsigned long guest_gma;
        unsigned long shadow_gma;
+       unsigned valid;
 };
 
 struct intel_shadow_wa_ctx {
index 19404c96eeb10670da75ce5426c96c10f6a5d7e1..32e857dc507cf9b1a9247f3bab497616d82e60a9 100644 (file)
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        if (READ_ONCE(obj->mm.pages))
                return -ENODEV;
 
+       if (obj->mm.madv != I915_MADV_WILLNEED)
+               return -EFAULT;
+
        /* Before the pages are instantiated the object is treated as being
         * in the CPU domain. The pages will be clflushed as required before
         * use, and we can freely write into the pages directly. If userspace
@@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 static void nop_submit_request(struct drm_i915_gem_request *request)
 {
+       unsigned long flags;
+
        GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
        dma_fence_set_error(&request->fence, -EIO);
-       i915_gem_request_submit(request);
+
+       spin_lock_irqsave(&request->engine->timeline->lock, flags);
+       __i915_gem_request_submit(request);
        intel_engine_init_global_seqno(request->engine, request->global_seqno);
+       spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
 }
 
 static void engine_set_wedged(struct intel_engine_cs *engine)
index 4df039ef2ce316509ecc6faa04e707d135acf507..e161d383b526757a79097eadb9e65260392befe1 100644 (file)
 #include "intel_drv.h"
 #include "i915_trace.h"
 
-static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
 
-       for_each_engine(engine, dev_priv, id) {
-               struct intel_timeline *tl;
+       if (i915->gt.active_requests)
+              return false;
 
-               tl = &ggtt->base.timeline.engine[engine->id];
-               if (i915_gem_active_isset(&tl->last_request))
-                       return false;
-       }
+       for_each_engine(engine, i915, id) {
+              if (engine->last_retired_context != i915->kernel_context)
+                      return false;
+       }
 
-       return true;
+       return true;
 }
 
 static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
                                    min_size, alignment, cache_level,
                                    start, end, mode);
 
-       /* Retire before we search the active list. Although we have
+       /*
+        * Retire before we search the active list. Although we have
         * reasonable accuracy in our retirement lists, we may have
         * a stray pin (preventing eviction) that can only be resolved by
         * retiring.
@@ -182,7 +182,8 @@ search_again:
                BUG_ON(ret);
        }
 
-       /* Can we unpin some objects such as idle hw contents,
+       /*
+        * Can we unpin some objects such as idle hw contents,
         * or pending flips? But since only the GGTT has global entries
         * such as scanouts, rinbuffers and contexts, we can skip the
         * purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
        if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
                return -ENOSPC;
 
-       if (ggtt_is_idle(dev_priv)) {
-               /* If we still have pending pageflip completions, drop
-                * back to userspace to give our workqueues time to
-                * acquire our locks and unpin the old scanouts.
-                */
-               return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
-       }
+       /*
+        * Not everything in the GGTT is tracked via VMA using
+        * i915_vma_move_to_active(), otherwise we could evict as required
+        * with minimal stalling. Instead we are forced to idle the GPU and
+        * explicitly retire outstanding requests which will then remove
+        * the pinning for active objects such as contexts and ring,
+        * enabling us to evict them on the next iteration.
+        *
+        * To ensure that all user contexts are evictable, we perform
+        * a switch to the perma-pinned kernel context. This all also gives
+        * us a termination condition, when the last retired context is
+        * the kernel's there is no more we can evict.
+        */
+       if (!ggtt_is_idle(dev_priv)) {
+               ret = ggtt_flush(dev_priv);
+               if (ret)
+                       return ret;
 
-       ret = ggtt_flush(dev_priv);
-       if (ret)
-               return ret;
+               goto search_again;
+       }
 
-       goto search_again;
+       /*
+        * If we still have pending pageflip completions, drop
+        * back to userspace to give our workqueues time to
+        * acquire our locks and unpin the old scanouts.
+        */
+       return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
 
 found:
        /* drm_mm doesn't allow any other other operations while
index e21ce9c18b6eee10ad8556d5fb310cd544987c88..b63893eeca73ddf78070bbecf055f6560f8636b7 100644 (file)
@@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
                                                                pipe);
        int position;
        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
-       bool in_vbl = true;
        unsigned long irqflags;
 
        if (WARN_ON(!mode->crtc_clock)) {
@@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
-       in_vbl = position >= vbl_start && position < vbl_end;
-
        /*
         * While in vblank, position will be negative
         * counting up towards 0 at vbl_end. And outside
index 94185d610673a77d17bf0bfb5796fbac4df2a05d..370b9d248fed89abe2b3601063ddffd27661c76e 100644 (file)
@@ -2537,6 +2537,10 @@ static const struct file_operations fops = {
        .poll           = i915_perf_poll,
        .read           = i915_perf_read,
        .unlocked_ioctl = i915_perf_ioctl,
+       /* Our ioctl have no arguments, so it's safe to use the same function
+        * to handle 32bits compatibility.
+        */
+       .compat_ioctl   = i915_perf_ioctl,
 };
 
 
index ed7cd9ee2c2af89737b2cc4618317c4d2fc9bedd..c9bcc6c450126e7cf638ba1c872a55938660add8 100644 (file)
@@ -6998,6 +6998,7 @@ enum {
  */
 #define  L3_GENERAL_PRIO_CREDITS(x)            (((x) >> 1) << 19)
 #define  L3_HIGH_PRIO_CREDITS(x)               (((x) >> 1) << 14)
+#define  L3_PRIO_CREDITS_MASK                  ((0x1f << 19) | (0x1f << 14))
 
 #define GEN7_L3CNTLREG1                                _MMIO(0xB01C)
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
index d805b6e6fe71467ed4f9a85a74fdba1b5ed81998..27743be5b768e13c4be9749537b1af76dfb3f478 100644 (file)
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
                         connector->encoder->base.id,
                         connector->encoder->name);
 
-       /* ELD Conn_Type */
-       connector->eld[5] &= ~(3 << 2);
-       if (intel_crtc_has_dp_encoder(crtc_state))
-               connector->eld[5] |= (1 << 2);
-
        connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
 
        if (dev_priv->display.audio_codec_enable)
index 183e87e8ea319ee7b4c8cb67dadbe6aaf159f106..5d4cd3d00564ce7a95bd02a7a5f56fbcb191337d 100644 (file)
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
        is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
 
+       if (port == PORT_A && is_dvi) {
+               DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+                             is_hdmi ? "/HDMI" : "");
+               is_dvi = false;
+               is_hdmi = false;
+       }
+
        info->supports_dvi = is_dvi;
        info->supports_hdmi = is_hdmi;
        info->supports_dp = is_dp;
@@ -1233,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
 {
        enum port port;
 
-       if (!HAS_DDI(dev_priv))
+       if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                return;
 
        if (!dev_priv->vbt.child_dev_num)
index ff9ecd211abbb07fd8bae3fb6d55a8c33b23770d..b8315bca852b56061ff4cf598245f7902722c09f 100644 (file)
@@ -74,7 +74,7 @@
 #define I9XX_CSC_COEFF_1_0             \
        ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
 
-static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
 {
        return !state->degamma_lut &&
                !state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
        }
 
        mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
-       if (!crtc_state_is_legacy(state)) {
+       if (!crtc_state_is_legacy_gamma(state)) {
                mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
                        (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
        }
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
        struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
        enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                haswell_load_luts(state);
                return;
        }
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
 
        glk_load_degamma_lut(state);
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                haswell_load_luts(state);
                return;
        }
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
        uint32_t i, lut_size;
        uint32_t word0, word1;
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                /* Turn off degamma/gamma on CGM block. */
                I915_WRITE(CGM_PIPE_MODE(pipe),
                           (state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
                return 0;
 
        /*
-        * We also allow no degamma lut and a gamma lut at the legacy
+        * We also allow no degamma lut/ctm and a gamma lut at the legacy
         * size (256 entries).
         */
-       if (!crtc_state->degamma_lut &&
-           crtc_state->gamma_lut &&
-           crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+       if (crtc_state_is_legacy_gamma(crtc_state))
                return 0;
 
        return -EINVAL;
index 965988f79a558aaa933a546fb69b72f0ef81cc29..92c1f8e166dc55381ab77bb92b680909131ffc4f 100644 (file)
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 
        mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
 
-       if (IS_BROXTON(dev_priv))
+       if (IS_GEN9_LP(dev_priv))
                mask |= DC_STATE_DEBUG_MASK_CORES;
 
        /* The below bit doesn't need to be cleared ever afterwards */
index 4b4fd1f8110b2f886d5c1299367dbea74eef77cb..5e5fe03b638cbf2ee17206ccd4c6ee985134645e 100644 (file)
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
                            int *n_entries)
 {
        if (IS_BROADWELL(dev_priv)) {
-               *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
-               return hsw_ddi_translations_fdi;
+               *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+               return bdw_ddi_translations_fdi;
        } else if (IS_HASWELL(dev_priv)) {
                *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
                return hsw_ddi_translations_fdi;
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 out:
        if (ret && IS_GEN9_LP(dev_priv)) {
                tmp = I915_READ(BXT_PHY_CTL(port));
-               if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+               if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                           BXT_PHY_LANE_POWERDOWN_ACK |
                            BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
                        DRM_ERROR("Port %c enabled but PHY powered down? "
                                  "(PHY_CTL %08x)\n", port_name(port), tmp);
@@ -2101,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
                 * register writes.
                 */
                val = I915_READ(DPCLKA_CFGCR0);
-               val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
-                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
                I915_WRITE(DPCLKA_CFGCR0, val);
        } else if (IS_GEN9_BC(dev_priv)) {
                /* DDI -> PLL mapping  */
index f17275519484bd8f598401269959c038fe139171..5c7828c52d12562e8e95872112aef34d367bffc6 100644 (file)
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       enum transcoder cpu_transcoder;
        struct drm_display_mode *mode;
        struct intel_crtc_state *pipe_config;
-       int htot = I915_READ(HTOTAL(cpu_transcoder));
-       int hsync = I915_READ(HSYNC(cpu_transcoder));
-       int vtot = I915_READ(VTOTAL(cpu_transcoder));
-       int vsync = I915_READ(VSYNC(cpu_transcoder));
+       u32 htot, hsync, vtot, vsync;
        enum pipe pipe = intel_crtc->pipe;
 
        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        i9xx_crtc_clock_get(intel_crtc, pipe_config);
 
        mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+       cpu_transcoder = pipe_config->cpu_transcoder;
+       htot = I915_READ(HTOTAL(cpu_transcoder));
+       hsync = I915_READ(HSYNC(cpu_transcoder));
+       vtot = I915_READ(VTOTAL(cpu_transcoder));
+       vsync = I915_READ(VSYNC(cpu_transcoder));
+
        mode->hdisplay = (htot & 0xffff) + 1;
        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
        mode->hsync_start = (hsync & 0xffff) + 1;
@@ -12359,7 +12363,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        struct drm_crtc *crtc;
        struct intel_crtc_state *intel_cstate;
-       bool hw_check = intel_state->modeset;
        u64 put_domains[I915_MAX_PIPES] = {};
        unsigned crtc_vblank_mask = 0;
        int i;
@@ -12376,7 +12379,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
                if (needs_modeset(new_crtc_state) ||
                    to_intel_crtc_state(new_crtc_state)->update_pipe) {
-                       hw_check = true;
 
                        put_domains[to_intel_crtc(crtc)->pipe] =
                                modeset_get_crtc_power_domains(crtc,
@@ -14030,7 +14032,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 
                if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
                        DRM_DEBUG_KMS("bad plane %d handle\n", i);
-                       return -EINVAL;
+                       goto err;
                }
 
                stride_alignment = intel_fb_stride_alignment(fb, i);
index 64134947c0aae4e478767d3347f5ac73d8d87aa5..203198659ab2dbfe788531dd60878efbd61c303e 100644 (file)
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 
-       intel_dp->panel_power_off_time = ktime_get_boottime();
        wait_panel_off(intel_dp);
+       intel_dp->panel_power_off_time = ktime_get_boottime();
 
        /* We got a reference when we enabled the VDD. */
        intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
         * seems sufficient to avoid this problem.
         */
        if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
-               vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
+               vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
                DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
                              vbt.t11_t12);
        }
index 09b6709297867d6751957dc12541812fe3f30344..de38d014ed39b937abc348988830a2f188815901 100644 (file)
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
        },
 };
 
-static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
-{
-       return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
-               BIT(phy_info->channel[DPIO_CH0].port);
-}
-
 static const struct bxt_ddi_phy_info *
 bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
 {
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
                            enum dpio_phy phy)
 {
        const struct bxt_ddi_phy_info *phy_info;
-       enum port port;
 
        phy_info = bxt_get_phy_info(dev_priv, phy);
 
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
                return false;
        }
 
-       for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
-               u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
-               if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
-                       DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
-                                        "for port %c powered down "
-                                        "(PHY_CTL %08x)\n",
-                                        phy, port_name(port), tmp);
-
-                       return false;
-               }
-       }
-
        return true;
 }
 
index a2a3d93d67bd252a3c9d137bedb66be26816bd23..df808a94c51194a886d8664ff8b8118ad05870ef 100644 (file)
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 
        /* 3. Configure DPLL_CFGCR0 */
        /* Avoid touch CFGCR1 if HDMI mode is not enabled */
-       if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+       if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
                val = pll->state.hw_state.cfgcr1;
                I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
                /* 4. Reab back to ensure writes completed */
index f0c11aec5ea5c99933f4243d9ce408692cd66cda..7442891762be4e486f8e1bd4c4f9a365819d7c88 100644 (file)
@@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
                              struct intel_crtc_state *old_crtc_state,
                              struct drm_connector_state *old_conn_state)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
 
@@ -902,15 +900,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
        intel_panel_disable_backlight(old_conn_state);
 
-       /*
-        * Disable Device ready before the port shutdown in order
-        * to avoid split screen
-        */
-       if (IS_BROXTON(dev_priv)) {
-               for_each_dsi_port(port, intel_dsi->ports)
-                       I915_WRITE(MIPI_DEVICE_READY(port), 0);
-       }
-
        /*
         * According to the spec we should send SHUTDOWN before
         * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
index 9ab5969413722a5999a4266629ea2ba0fc2305f0..3c2d9cf22ed5a537253a14c2fe85ee200ce7b24c 100644 (file)
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
        }
 
        /* WaProgramL3SqcReg1DefaultForPerf:bxt */
-       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
-               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
-                                          L3_HIGH_PRIO_CREDITS(2));
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+               u32 val = I915_READ(GEN8_L3SQCREG1);
+               val &= ~L3_PRIO_CREDITS_MASK;
+               val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+               I915_WRITE(GEN8_L3SQCREG1, val);
+       }
 
        /* WaToEnableHwFixForPushConstHWBug:bxt */
        if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
index 951e834dd2744f7f2fe84a54723136f743d185cf..28a778b785ac9e02ea7de55878e5fec98f019b98 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
+{
+       u8 conn_type;
+
+       if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+           connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+               conn_type = DRM_ELD_CONN_TYPE_DP;
+       } else {
+               conn_type = DRM_ELD_CONN_TYPE_HDMI;
+       }
+
+       connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
+       connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
+}
+
 /**
  * intel_connector_update_modes - update connector from edid
  * @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
        ret = drm_add_edid_modes(connector, edid);
        drm_edid_to_eld(connector, edid);
 
+       intel_connector_update_eld_conn_type(connector);
+
        return ret;
 }
 
index a17b1de7d7e0edf9ec2f605a2d257227587599ba..3b1c5d783ee7ccf12fc9d0cda009ab695fef4e57 100644 (file)
@@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
        if (!panel->backlight.max)
                return -ENODEV;
 
+       panel->backlight.min = get_backlight_min_vbt(connector);
+
        val = bxt_get_backlight(connector);
        val = intel_panel_compute_brightness(connector, val);
        panel->backlight.level = clamp(val, panel->backlight.min,
@@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
        if (!panel->backlight.max)
                return -ENODEV;
 
+       panel->backlight.min = get_backlight_min_vbt(connector);
+
        val = bxt_get_backlight(connector);
        val = intel_panel_compute_brightness(connector, val);
        panel->backlight.level = clamp(val, panel->backlight.min,
index ed662937ec3c85685b7fed049f381c6d1cb5654c..0a09f8ff6aff6710ea3580329d83646a67593b8c 100644 (file)
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
                                   int high_prio_credits)
 {
        u32 misccpctl;
+       u32 val;
 
        /* WaTempDisableDOPClkGating:bdw */
        misccpctl = I915_READ(GEN7_MISCCPCTL);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 
-       I915_WRITE(GEN8_L3SQCREG1,
-                  L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
-                  L3_HIGH_PRIO_CREDITS(high_prio_credits));
+       val = I915_READ(GEN8_L3SQCREG1);
+       val &= ~L3_PRIO_CREDITS_MASK;
+       val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
+       val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
+       I915_WRITE(GEN8_L3SQCREG1, val);
 
        /*
         * Wait at least 100 clocks before re-enabling clock gating.
index b66d8e136aa37c3b72d072312d0f06d141223237..49577eba8e7efc30e68e2b8ea8a88cda3232dae7 100644 (file)
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 {
        enum i915_power_well_id id = power_well->id;
        bool wait_fuses = power_well->hsw.has_fuses;
-       enum skl_power_gate pg;
+       enum skl_power_gate uninitialized_var(pg);
        u32 val;
 
        if (wait_fuses) {
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
 
        /* 6. Enable DBUF */
        gen9_dbuf_enable(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
 }
 
 #undef CNL_PROCMON_IDX
index dbb31a0144194662a85f20ae0b77117ce071b901..deaf869374ea7016bbaa33aa45fb013aa00ec6ad 100644 (file)
@@ -248,7 +248,7 @@ disable_clks:
        clk_disable_unprepare(ahb_clk);
 disable_gdsc:
        regulator_disable(gdsc_reg);
-       pm_runtime_put_autosuspend(dev);
+       pm_runtime_put_sync(dev);
 put_clk:
        clk_put(ahb_clk);
 put_gdsc:
index c2bdad88447eb59de05e1476d7607cacfb968376..824067d2d4277d36699b1f15c6d58f74a97fe23f 100644 (file)
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
                                  .caps = MDP_LM_CAP_WB },
                             },
                .nb_stages = 5,
+               .max_width = 2048,
+               .max_height = 0xFFFF,
        },
        .dspp = {
                .count = 3,
index 6fcb58ab718cd9c986155f50eb55cccf07c22c32..44097767700124df63f8a325969ea44b494256d7 100644 (file)
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 
        spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 
-       pm_runtime_put_autosuspend(&pdev->dev);
-
 set_cursor:
        ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
        if (ret) {
index f15821a0d90089d1045e810e4b28af552db9eec0..ea5bb0e1632c69e45e746d8abfb820ba9d540e0b 100644 (file)
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
        struct dma_fence *fence;
        int i, ret;
 
-       if (!exclusive) {
-               /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
-                * which makes this a slightly strange place to call it.  OTOH this
-                * is a convenient can-fail point to hook it in.  (And similar to
-                * how etnaviv and nouveau handle this.)
-                */
-               ret = reservation_object_reserve_shared(msm_obj->resv);
-               if (ret)
-                       return ret;
-       }
-
        fobj = reservation_object_get_list(msm_obj->resv);
        if (!fobj || (fobj->shared_count == 0)) {
                fence = reservation_object_get_excl(msm_obj->resv);
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
        }
 
        vaddr = msm_gem_get_vaddr(obj);
-       if (!vaddr) {
+       if (IS_ERR(vaddr)) {
                msm_gem_put_iova(obj, aspace);
                drm_gem_object_unreference(obj);
-               return ERR_PTR(-ENOMEM);
+               return ERR_CAST(vaddr);
        }
 
        if (bo)
index 5d0a75d4b249c439ff9ff072f89c87c5e95bd776..93535cac0676355d935e1cf032d4601ba5000ac3 100644 (file)
@@ -221,7 +221,7 @@ fail:
        return ret;
 }
 
-static int submit_fence_sync(struct msm_gem_submit *submit)
+static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 {
        int i, ret = 0;
 
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
                bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
 
+               if (!write) {
+                       /* NOTE: _reserve_shared() must happen before
+                        * _add_shared_fence(), which makes this a slightly
+                        * strange place to call it.  OTOH this is a
+                        * convenient can-fail point to hook it in.
+                        */
+                       ret = reservation_object_reserve_shared(msm_obj->resv);
+                       if (ret)
+                               return ret;
+               }
+
+               if (no_implicit)
+                       continue;
+
                ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
                if (ret)
                        break;
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
-               ret = submit_fence_sync(submit);
-               if (ret)
-                       goto out;
-       }
+       ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
+       if (ret)
+               goto out;
 
        ret = submit_pin_objects(submit);
        if (ret)
index ffbff27600e0ff620fe623172e30783f53ce5fe7..6a887032c66ae08ea5354599b997640c7b005ac5 100644 (file)
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
                        msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
                msm_ringbuffer_destroy(gpu->rb);
        }
-       if (gpu->aspace) {
+
+       if (!IS_ERR_OR_NULL(gpu->aspace)) {
                gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
                        NULL, 0);
                msm_gem_address_space_put(gpu->aspace);
index 0366b8092f9772178334449ce121255087206ede..ec56794ad0399277693b9185c75b6abcf9241e4a 100644 (file)
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
 
                wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
 
+               /* Note that smp_load_acquire() is not strictly required
+                * as CIRC_SPACE_TO_END() does not access the tail more
+                * than once.
+                */
                n = min(sz, circ_space_to_end(&rd->fifo));
                memcpy(fptr, ptr, n);
 
-               fifo->head = (fifo->head + n) & (BUF_SZ - 1);
+               smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
                sz  -= n;
                ptr += n;
 
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
        if (ret)
                goto out;
 
+       /* Note that smp_load_acquire() is not strictly required
+        * as CIRC_CNT_TO_END() does not access the head more than
+        * once.
+        */
        n = min_t(int, sz, circ_count_to_end(&rd->fifo));
        if (copy_to_user(buf, fptr, n)) {
                ret = -EFAULT;
                goto out;
        }
 
-       fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
+       smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
        *ppos += n;
 
        wake_up_all(&rd->fifo_event);
index f7707849bb538697009ca3b68c8296100a67b2a4..2b12d82aac1509f7023b24e15d5f04fe7ecc8290 100644 (file)
@@ -223,7 +223,7 @@ void
 nouveau_fbcon_accel_save_disable(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
+       if (drm->fbcon && drm->fbcon->helper.fbdev) {
                drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
                drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
        }
@@ -233,7 +233,7 @@ void
 nouveau_fbcon_accel_restore(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
+       if (drm->fbcon && drm->fbcon->helper.fbdev) {
                drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
        }
 }
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
        struct nouveau_fbdev *fbcon = drm->fbcon;
        if (fbcon && drm->channel) {
                console_lock();
-               fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+               if (fbcon->helper.fbdev)
+                       fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
                console_unlock();
                nouveau_channel_idle(drm->channel);
                nvif_object_fini(&fbcon->twod);
index 2dbf62a2ac413081f7a15cb7c8779667447015d4..e4751f92b342d60f44c7d1a73a981940f15cb400 100644 (file)
@@ -3265,11 +3265,14 @@ nv50_mstm = {
 void
 nv50_mstm_service(struct nv50_mstm *mstm)
 {
-       struct drm_dp_aux *aux = mstm->mgr.aux;
+       struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
        bool handled = true;
        int ret;
        u8 esi[8] = {};
 
+       if (!aux)
+               return;
+
        while (handled) {
                ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
                if (ret != 8) {
index 8e2e24a7477458d0ad2361885e79fca4f936068a..44e116f7880dd02e6754d3d328f1d909a0a7041a 100644 (file)
@@ -39,5 +39,5 @@ int
 g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
        return nvkm_xtensa_new_(&g84_bsp, device, index,
-                               true, 0x103000, pengine);
+                               device->chipset != 0x92, 0x103000, pengine);
 }
index d06ad2c372bf30efb6b8ecc5978776def5721222..455da298227f65c2b4c2cfc6a2cedebe12661877 100644 (file)
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
                        mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
                }
 
+               mmu->func->flush(vm);
+
                nvkm_memory_del(&pgt);
        }
 }
index 14c5613b4388a839461915fc448d4772422861a3..afbf50d0c08fa1c89c49120717b7a51cb05d5b98 100644 (file)
@@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
            .y2 = qfb->base.height
        };
 
-       if (!old_state->fb) {
-               qxl_io_log(qdev,
-                          "create primary fb: %dx%d,%d,%d\n",
-                          bo->surf.width, bo->surf.height,
-                          bo->surf.stride, bo->surf.format);
+       if (old_state->fb) {
+               qfb_old = to_qxl_framebuffer(old_state->fb);
+               bo_old = gem_to_qxl_bo(qfb_old->obj);
+       } else {
+               bo_old = NULL;
+       }
 
-               qxl_io_create_primary(qdev, 0, bo);
-               bo->is_primary = true;
+       if (bo == bo_old)
                return;
 
-       } else {
-               qfb_old = to_qxl_framebuffer(old_state->fb);
-               bo_old = gem_to_qxl_bo(qfb_old->obj);
+       if (bo_old && bo_old->is_primary) {
+               qxl_io_destroy_primary(qdev);
                bo_old->is_primary = false;
        }
 
-       bo->is_primary = true;
+       if (!bo->is_primary) {
+               qxl_io_create_primary(qdev, 0, bo);
+               bo->is_primary = true;
+       }
        qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
 }
 
@@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
 {
        struct qxl_device *qdev = plane->dev->dev_private;
 
-       if (old_state->fb)
-       {       struct qxl_framebuffer *qfb =
+       if (old_state->fb) {
+               struct qxl_framebuffer *qfb =
                        to_qxl_framebuffer(old_state->fb);
                struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
 
-               qxl_io_destroy_primary(qdev);
-               bo->is_primary = false;
+               if (bo->is_primary) {
+                       qxl_io_destroy_primary(qdev);
+                       bo->is_primary = false;
+               }
        }
 }
 
@@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
        struct drm_gem_object *obj;
        struct qxl_bo *user_bo;
 
-       if (!plane->state->fb) {
-               /* we never executed prepare_fb, so there's nothing to
+       if (!old_state->fb) {
+               /*
+                * we never executed prepare_fb, so there's nothing to
                 * unpin.
                 */
                return;
        }
 
-       obj = to_qxl_framebuffer(plane->state->fb)->obj;
+       obj = to_qxl_framebuffer(old_state->fb)->obj;
        user_bo = gem_to_qxl_bo(obj);
        qxl_bo_unpin(user_bo);
 }
index 997131d58c7f639f8dbb765e51433a35a7da27ba..ffc10cadcf34ccb79a7d09ae253b4a2dffda7cfe 100644 (file)
@@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
        radeon_agp_suspend(rdev);
 
        pci_save_state(dev->pdev);
-       if (freeze && rdev->family >= CHIP_CEDAR) {
+       if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
                rdev->asic->asic_reset(rdev, true);
                pci_restore_state(dev->pdev);
        } else if (suspend) {
index 06f05302ee75e33b2a05f9c468b1a3bbd60619be..882d85db90539ae1b00e194a0cae43bf8fb4918d 100644 (file)
@@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC
        bool "Allwinner A10 HDMI CEC Support"
        depends on DRM_SUN4I_HDMI
        select CEC_CORE
-       depends on CEC_PIN
+       select CEC_PIN
        help
          Choose this option if you have an Allwinner SoC with an HDMI
          controller and want to use CEC.
index 1457750988da4930792eceb3e543f211d66f84f7..a1f8cba251a245af7227d853da784518d19d405d 100644 (file)
@@ -15,7 +15,7 @@
 #include <drm/drm_connector.h>
 #include <drm/drm_encoder.h>
 
-#include <media/cec.h>
+#include <media/cec-pin.h>
 
 #define SUN4I_HDMI_CTRL_REG            0x004
 #define SUN4I_HDMI_CTRL_ENABLE                 BIT(31)
index 9ea6cd5a1370d92e6eb78c864641986bffbc0086..3cf1a6932facf0b33d25cebbd288477ff6152af7 100644 (file)
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
        hdmi->mod_clk = devm_clk_get(dev, "mod");
        if (IS_ERR(hdmi->mod_clk)) {
                dev_err(dev, "Couldn't get the HDMI mod clock\n");
-               return PTR_ERR(hdmi->mod_clk);
+               ret = PTR_ERR(hdmi->mod_clk);
+               goto err_disable_bus_clk;
        }
        clk_prepare_enable(hdmi->mod_clk);
 
        hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
        if (IS_ERR(hdmi->pll0_clk)) {
                dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
-               return PTR_ERR(hdmi->pll0_clk);
+               ret = PTR_ERR(hdmi->pll0_clk);
+               goto err_disable_mod_clk;
        }
 
        hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
        if (IS_ERR(hdmi->pll1_clk)) {
                dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
-               return PTR_ERR(hdmi->pll1_clk);
+               ret = PTR_ERR(hdmi->pll1_clk);
+               goto err_disable_mod_clk;
        }
 
        ret = sun4i_tmds_create(hdmi);
        if (ret) {
                dev_err(dev, "Couldn't create the TMDS clock\n");
-               return ret;
+               goto err_disable_mod_clk;
        }
 
        writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
        ret = sun4i_hdmi_i2c_create(dev, hdmi);
        if (ret) {
                dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
-               return ret;
+               goto err_disable_mod_clk;
        }
 
        drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +425,10 @@ err_cleanup_connector:
        drm_encoder_cleanup(&hdmi->encoder);
 err_del_i2c_adapter:
        i2c_del_adapter(hdmi->i2c);
+err_disable_mod_clk:
+       clk_disable_unprepare(hdmi->mod_clk);
+err_disable_bus_clk:
+       clk_disable_unprepare(hdmi->bus_clk);
        return ret;
 }
 
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
        drm_connector_cleanup(&hdmi->connector);
        drm_encoder_cleanup(&hdmi->encoder);
        i2c_del_adapter(hdmi->i2c);
+       clk_disable_unprepare(hdmi->mod_clk);
+       clk_disable_unprepare(hdmi->bus_clk);
 }
 
 static const struct component_ops sun4i_hdmi_ops = {
index e9b7cdad5c4c1beac73bca274321cc57280006bd..5a1ab4046e926fefe4cb6d73d2c3ebe031eb45bb 100644 (file)
@@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl,
 
 /* This part must be outside protection */
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra
 #define TRACE_INCLUDE_FILE trace
 #include <trace/define_trace.h>
index 6a573d21d3cc2ec91ea3d0d0e0ebecda20e4c534..658fa2d3e40c260d051d4299bda4eddb0af5abeb 100644 (file)
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
                return -EINVAL;
        }
 
+       /*
+        * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
+        * i.MX53 channel arbitration locking doesn't seem to work properly.
+        * Allow enabling the lock feature on IPUv3H / i.MX6 only.
+        */
+       if (bursts && ipu->ipu_type != IPUV3H)
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
                if (channel->num == idmac_lock_en_info[i].chnum)
                        break;
index c35f74c830657f26a3e29c34f7cef7e9f864f71a..c860a7997cb59c981557e01021d6a4278b2470ea 100644 (file)
 #define  IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v)                ((v & 0x7) << 1)
 #define  IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v)   ((v & 0x3) << 4)
 
+#define IPU_PRE_STORE_ENG_STATUS                       0x120
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK   0xffff
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT  0
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK   0x3fff
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT  16
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL      (1 << 30)
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIELD          (1 << 31)
+
 #define IPU_PRE_STORE_ENG_SIZE                         0x130
 #define  IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v)         ((v & 0xffff) << 0)
 #define  IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v)                ((v & 0xffff) << 16)
@@ -93,6 +101,7 @@ struct ipu_pre {
        dma_addr_t              buffer_paddr;
        void                    *buffer_virt;
        bool                    in_use;
+       unsigned int            safe_window_end;
 };
 
 static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
        u32 active_bpp = info->cpp[0] >> 1;
        u32 val;
 
+       /* calculate safe window for ctrl register updates */
+       pre->safe_window_end = height - 2;
+
        writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
        writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
 
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
 
 void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
 {
+       unsigned long timeout = jiffies + msecs_to_jiffies(5);
+       unsigned short current_yblock;
+       u32 val;
+
        writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+
+       do {
+               if (time_after(jiffies, timeout)) {
+                       dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
+                       return;
+               }
+
+               val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
+               current_yblock =
+                       (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
+                       IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
+       } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
+
        writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
 }
 
index ecc9ea44dc50fc4f5b815564ae54f46884f93a2e..0013ca9f72c83e8f85b3c91aa62aaf619690f234 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_fourcc.h>
 #include <linux/clk.h>
 #include <linux/err.h>
+#include <linux/iopoll.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/module.h>
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
        val = IPU_PRG_REG_UPDATE_REG_UPDATE;
        writel(val, prg->regs + IPU_PRG_REG_UPDATE);
 
+       /* wait for both double buffers to be filled */
+       readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
+                          (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
+                          (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
+                          5, 1000);
+
        clk_disable_unprepare(prg->clk_ipg);
 
        chan->enabled = true;
index 0a3117cc29e70c54b3f9b269889d9b4ccf2fb56c..374301fcbc86791e5005ba3e1b45a35c496174ad 100644 (file)
@@ -281,6 +281,7 @@ config HID_ELECOM
        Support for ELECOM devices:
          - BM084 Bluetooth Mouse
          - DEFT Trackball (Wired and wireless)
+         - HUGE Trackball (Wired and wireless)
 
 config HID_ELO
        tristate "ELO USB 4000/4500 touchscreen"
index 9bc91160819b6eaeac1b0368bdbdfe15a1328422..330ca983828ba6d11feaf5538841a6810d84f5c6 100644 (file)
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ELO)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
index e2c7465df69f3ae74c2cb1979c531b02e2934089..54aeea57d2099bd8c0a1acf41b9d2f0c21c58ee6 100644 (file)
@@ -3,6 +3,7 @@
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
  *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
  *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ *  Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
  */
 
 /*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                break;
        case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
        case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
-               /* The DEFT trackball has eight buttons, but its descriptor only
-                * reports five, disabling the three Fn buttons on the top of
-                * the mouse.
+       case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
+       case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
+               /* The DEFT/HUGE trackball has eight buttons, but its descriptor
+                * only reports five, disabling the three Fn buttons on the top
+                * of the mouse.
                 *
                 * Apply the following diff to the descriptor:
                 *
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                 * End Collection,                     End Collection,
                 */
                if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
-                       hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+                       hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
                        rdesc[13] = 8; /* Button/Variable Report Count */
                        rdesc[21] = 8; /* Button/Variable Usage Maximum */
                        rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index b397a14ab9703b259d7b258ebcfdc9d582b31b07..be2e005c3c516c6cb522a68a891d75e48f8f800f 100644 (file)
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
 #define USB_DEVICE_ID_ELECOM_DEFT_WIRED        0x00fe
 #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS     0x00ff
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRED        0x010c
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS     0x010d
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
 #define USB_VENDOR_ID_IDEACOM          0x1cb6
 #define USB_DEVICE_ID_IDEACOM_IDC6650  0x6650
 #define USB_DEVICE_ID_IDEACOM_IDC6651  0x6651
+#define USB_DEVICE_ID_IDEACOM_IDC6680  0x6680
 
 #define USB_VENDOR_ID_ILITEK           0x222a
 #define USB_DEVICE_ID_ILITEK_MULTITOUCH        0x0001
 #define USB_DEVICE_ID_LENOVO_CBTKBD    0x6048
 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
+#define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 
 #define USB_VENDOR_ID_LG               0x1fd2
 #define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
index 440b999304a554309c208999ae2be053ec218d32..9e8c4d2ba11d2cc7c2559f34dc46fe646640e131 100644 (file)
@@ -930,6 +930,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
            field->application != HID_DG_PEN &&
            field->application != HID_DG_TOUCHPAD &&
            field->application != HID_GD_KEYBOARD &&
+           field->application != HID_GD_SYSTEM_CONTROL &&
            field->application != HID_CP_CONSUMER_CONTROL &&
            field->application != HID_GD_WIRELESS_RADIO_CTLS &&
            !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
@@ -1419,6 +1420,12 @@ static const struct hid_device_id mt_devices[] = {
                        USB_VENDOR_ID_ALPS_JP,
                        HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
 
+       /* Lenovo X1 TAB Gen 2 */
+       { .driver_data = MT_CLS_WIN_8_DUAL,
+               HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+                          USB_VENDOR_ID_LENOVO,
+                          USB_DEVICE_ID_LENOVO_X1_TAB) },
+
        /* Anton devices */
        { .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
                MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
index 5b40c26145993fafa51530dcb573bad0e971fb89..ef241d66562e00950e8979ef4fd0633fd4efdca3 100644 (file)
@@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev)
        if (!(data->device_flags & RMI_DEVICE))
                return 0;
 
-       ret = rmi_reset_attn_mode(hdev);
+       /* Make sure the HID device is ready to receive events */
+       ret = hid_hw_open(hdev);
        if (ret)
                return ret;
 
+       ret = rmi_reset_attn_mode(hdev);
+       if (ret)
+               goto out;
+
        ret = rmi_driver_resume(rmi_dev, false);
        if (ret) {
                hid_warn(hdev, "Failed to resume device: %d\n", ret);
-               return ret;
+               goto out;
        }
 
-       return 0;
+out:
+       hid_hw_close(hdev);
+       return ret;
 }
 #endif /* CONFIG_PM */
 
index ec530454e6f68789fe57444fba14eb1daae94e2e..5fbe0f81ab2ebd2c972237fa1be2bc3b544afee8 100644 (file)
@@ -337,8 +337,8 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
                        kfree(hidraw);
                } else {
                        /* close device for last reader */
-                       hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
                        hid_hw_close(hidraw->hid);
+                       hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
                }
        }
 }
index 77396145d2d093af60e44c825ffaabd25f9b68a1..9145c2129a967464e1e7ed8978dbe53c6db9e86e 100644 (file)
@@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
 {
        /* the worst case is computed from the set_report command with a
         * reportID > 15 and the maximum report length */
-       int args_len = sizeof(__u8) + /* optional ReportID byte */
+       int args_len = sizeof(__u8) + /* ReportID */
+                      sizeof(__u8) + /* optional ReportID byte */
                       sizeof(__u16) + /* data register */
                       sizeof(__u16) + /* size of the report */
                       report_size; /* report */
index 089bad8a9a21d6b35742df8819fabb4da5036730..045b5da9b992873ce74f5d401de34855b93a6f8e 100644 (file)
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
        unsigned int rsize = 0;
        char *rdesc;
        int ret, n;
+       int num_descriptors;
+       size_t offset = offsetof(struct hid_descriptor, desc);
 
        quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
                        le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
                return -ENODEV;
        }
 
+       if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+               dbg_hid("hid descriptor is too short\n");
+               return -EINVAL;
+       }
+
        hid->version = le16_to_cpu(hdesc->bcdHID);
        hid->country = hdesc->bCountryCode;
 
-       for (n = 0; n < hdesc->bNumDescriptors; n++)
+       num_descriptors = min_t(int, hdesc->bNumDescriptors,
+              (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+       for (n = 0; n < num_descriptors; n++)
                if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
                        rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
 
index a83fa76655b94f6ab8461f167748f785d1a1cebc..f489a5cfcb48cf5216a532091482ac669786e61d 100644 (file)
@@ -99,6 +99,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
index e82a696a1d07ca97df04371c00dc5dc95fbafec9..906e654fb0ba42fe85b49e3813beebabe86d7b3e 100644 (file)
@@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
 
        /* Try to find an already-probed interface from the same device */
        list_for_each_entry(data, &wacom_udev_list, list) {
-               if (compare_device_paths(hdev, data->dev, '/'))
+               if (compare_device_paths(hdev, data->dev, '/')) {
+                       kref_get(&data->kref);
                        return data;
+               }
        }
 
        /* Fallback to finding devices that appear to be "siblings" */
@@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom)
        if (!wacom->led.groups)
                return -ENOTSUPP;
 
+       if (wacom->wacom_wac.features.type == REMOTE)
+               return -ENOTSUPP;
+
        if (wacom->wacom_wac.pid) { /* wireless connected */
                report_id = WAC_CMD_WL_LED_CONTROL;
                buf_size = 13;
index bb17d7bbefd3e7bf033344e7e1bc6b5d5228fd27..aa692e28b2cd60e52bbae7dcb93454fa145ee69a 100644 (file)
@@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
                                keys = data[9] & 0x07;
                        }
                } else {
-                       buttons = ((data[6] & 0x10) << 10) |
-                                 ((data[5] & 0x10) << 9)  |
+                       buttons = ((data[6] & 0x10) << 5)  |
+                                 ((data[5] & 0x10) << 4)  |
                                  ((data[6] & 0x0F) << 4)  |
                                  (data[5] & 0x0F);
                }
@@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
                        continue;
 
                if (range) {
+                       /* Fix rotation alignment: userspace expects zero at left */
+                       int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
+                       rotation += 1800/4;
+                       if (rotation > 899)
+                               rotation -= 1800;
+
                        input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
                        input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
-                       input_report_abs(pen_input, ABS_TILT_X, frame[7]);
-                       input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
-                       input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
+                       input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
+                       input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]);
+                       input_report_abs(pen_input, ABS_Z, rotation);
                        input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
                }
                input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
@@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
        unsigned char *data = wacom->data;
 
        int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
-       int ring = data[285];
-       int prox = buttons | (ring & 0x80);
+       int ring = data[285] & 0x7F;
+       bool ringstatus = data[285] & 0x80;
+       bool prox = buttons || ringstatus;
+
+       /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
+       ring = 71 - ring;
+       ring += 3*72/16;
+       if (ring > 71)
+               ring -= 72;
 
        wacom_report_numbered_buttons(pad_input, 9, buttons);
 
-       input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
+       input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
 
        input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
        input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
@@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
        return 0;
 }
 
+static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage,
+                                int value, int num, int denom)
+{
+       struct input_absinfo *abs = &input->absinfo[usage->code];
+       int range = (abs->maximum - abs->minimum + 1);
+
+       value += num*range/denom;
+       if (value > abs->maximum)
+               value -= range;
+       else if (value < abs->minimum)
+               value += range;
+       return value;
+}
+
 int wacom_equivalent_usage(int usage)
 {
        if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
@@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
        unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
        int i;
        bool is_touch_on = value;
+       bool do_report = false;
 
        /*
         * Avoid reporting this event and setting inrange_state if this usage
@@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
        }
 
        switch (equivalent_usage) {
+       case WACOM_HID_WD_TOUCHRING:
+               /*
+                * Userspace expects touchrings to increase in value with
+                * clockwise gestures and have their zero point at the
+                * tablet's left. HID events "should" be clockwise-
+                * increasing and zero at top, though the MobileStudio
+                * Pro and 2nd-gen Intuos Pro don't do this...
+                */
+               if (hdev->vendor == 0x56a &&
+                   (hdev->product == 0x34d || hdev->product == 0x34e ||  /* MobileStudio Pro */
+                    hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */
+                       value = (field->logical_maximum - value);
+
+                       if (hdev->product == 0x357 || hdev->product == 0x358)
+                               value = wacom_offset_rotation(input, usage, value, 3, 16);
+                       else if (hdev->product == 0x34d || hdev->product == 0x34e)
+                               value = wacom_offset_rotation(input, usage, value, 1, 2);
+               }
+               else {
+                       value = wacom_offset_rotation(input, usage, value, 1, 4);
+               }
+               do_report = true;
+               break;
        case WACOM_HID_WD_TOUCHRINGSTATUS:
                if (!value)
                        input_event(input, usage->type, usage->code, 0);
@@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
                                         value, i);
                 /* fall through*/
        default:
+               do_report = true;
+               break;
+       }
+
+       if (do_report) {
                input_event(input, usage->type, usage->code, value);
                if (value)
                        wacom_wac->hid_data.pad_input_event_flag = true;
-               break;
        }
 }
 
@@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                wacom_wac->hid_data.tipswitch |= value;
                return;
        case HID_DG_TOOLSERIALNUMBER:
-               wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
-               wacom_wac->serial[0] |= (__u32)value;
+               if (value) {
+                       wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+                       wacom_wac->serial[0] |= (__u32)value;
+               }
                return;
+       case HID_DG_TWIST:
+               /*
+                * Userspace expects pen twist to have its zero point when
+                * the buttons/finger is on the tablet's left. HID values
+                * are zero when buttons are toward the top.
+                */
+               value = wacom_offset_rotation(input, usage, value, 1, 4);
+               break;
        case WACOM_HID_WD_SENSE:
                wacom_wac->hid_data.sense_state = value;
                return;
        case WACOM_HID_WD_SERIALHI:
-               wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
-               wacom_wac->serial[0] |= ((__u64)value) << 32;
-               /*
-                * Non-USI EMR devices may contain additional tool type
-                * information here. See WACOM_HID_WD_TOOLTYPE case for
-                * more details.
-                */
-               if (value >> 20 == 1) {
-                       wacom_wac->id[0] |= value & 0xFFFFF;
+               if (value) {
+                       wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+                       wacom_wac->serial[0] |= ((__u64)value) << 32;
+                       /*
+                        * Non-USI EMR devices may contain additional tool type
+                        * information here. See WACOM_HID_WD_TOOLTYPE case for
+                        * more details.
+                        */
+                       if (value >> 20 == 1) {
+                               wacom_wac->id[0] |= value & 0xFFFFF;
+                       }
                }
                return;
        case WACOM_HID_WD_TOOLTYPE:
@@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
                input_report_key(input, wacom_wac->tool[0], prox);
                if (wacom_wac->serial[0]) {
                        input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
-                       input_report_abs(input, ABS_MISC, id);
+                       input_report_abs(input, ABS_MISC, prox ? id : 0);
                }
 
                wacom_wac->hid_data.tipswitch = false;
@@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
        if (!prox) {
                wacom_wac->tool[0] = 0;
                wacom_wac->id[0] = 0;
+               wacom_wac->serial[0] = 0;
        }
 }
 
index efd5db743319282922a3fa9994bcee6c618781bb..894b67ac2cae509296cc6c421565df9b7ec48f7f 100644 (file)
@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
                 */
                return;
        }
+       mutex_lock(&vmbus_connection.channel_mutex);
        /*
         * Close all the sub-channels first and then close the
         * primary channel.
@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
                cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
                vmbus_close_internal(cur_channel);
                if (cur_channel->rescind) {
-                       mutex_lock(&vmbus_connection.channel_mutex);
-                       hv_process_channel_removal(cur_channel,
+                       hv_process_channel_removal(
                                           cur_channel->offermsg.child_relid);
-                       mutex_unlock(&vmbus_connection.channel_mutex);
                }
        }
        /*
         * Now close the primary.
         */
        vmbus_close_internal(channel);
+       mutex_unlock(&vmbus_connection.channel_mutex);
 }
 EXPORT_SYMBOL_GPL(vmbus_close);
 
index 060df71c2e8b72f42020168fb574ff42f6827729..379b0df123bee913a8151640e727c38cce11e9eb 100644 (file)
@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
 
 
        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-
+       channel->rescind = true;
        list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
                                msglistentry) {
 
@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
                       true);
 }
 
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
+void hv_process_channel_removal(u32 relid)
 {
        unsigned long flags;
-       struct vmbus_channel *primary_channel;
+       struct vmbus_channel *primary_channel, *channel;
 
-       BUG_ON(!channel->rescind);
        BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
 
+       /*
+        * Make sure channel is valid as we may have raced.
+        */
+       channel = relid2channel(relid);
+       if (!channel)
+               return;
+
+       BUG_ON(!channel->rescind);
        if (channel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(channel->target_cpu,
@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
        if (!fnew) {
                if (channel->sc_creation_callback != NULL)
                        channel->sc_creation_callback(newchannel);
+               newchannel->probe_done = true;
                return;
        }
 
@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 {
        struct vmbus_channel_rescind_offer *rescind;
        struct vmbus_channel *channel;
-       unsigned long flags;
        struct device *dev;
 
        rescind = (struct vmbus_channel_rescind_offer *)hdr;
@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                return;
        }
 
-       spin_lock_irqsave(&channel->lock, flags);
-       channel->rescind = true;
-       spin_unlock_irqrestore(&channel->lock, flags);
-
-       /*
-        * Now that we have posted the rescind state, perform
-        * rescind related cleanup.
-        */
-       vmbus_rescind_cleanup(channel);
-
        /*
         * Now wait for offer handling to complete.
         */
@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        if (channel->device_obj) {
                if (channel->chn_rescind_callback) {
                        channel->chn_rescind_callback(channel);
+                       vmbus_rescind_cleanup(channel);
                        return;
                }
                /*
@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 */
                dev = get_device(&channel->device_obj->device);
                if (dev) {
+                       vmbus_rescind_cleanup(channel);
                        vmbus_device_unregister(channel->device_obj);
                        put_device(dev);
                }
@@ -921,29 +920,28 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 * 1. Close all sub-channels first
                 * 2. Then close the primary channel.
                 */
+               mutex_lock(&vmbus_connection.channel_mutex);
+               vmbus_rescind_cleanup(channel);
                if (channel->state == CHANNEL_OPEN_STATE) {
                        /*
                         * The channel is currently not open;
                         * it is safe for us to cleanup the channel.
                         */
-                       mutex_lock(&vmbus_connection.channel_mutex);
-                       hv_process_channel_removal(channel,
-                                               channel->offermsg.child_relid);
-                       mutex_unlock(&vmbus_connection.channel_mutex);
+                       hv_process_channel_removal(rescind->child_relid);
                }
+               mutex_unlock(&vmbus_connection.channel_mutex);
        }
 }
 
 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
 {
-       mutex_lock(&vmbus_connection.channel_mutex);
-
        BUG_ON(!is_hvsock_channel(channel));
 
-       channel->rescind = true;
-       vmbus_device_unregister(channel->device_obj);
+       /* We always get a rescind msg when a connection is closed. */
+       while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
+               msleep(1);
 
-       mutex_unlock(&vmbus_connection.channel_mutex);
+       vmbus_device_unregister(channel->device_obj);
 }
 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
 
index daa75bd41f8672ad181a7f1ec2d7b9ccc3ab99b8..2364281d8593ece99ce983f86beff657461c2c72 100644 (file)
@@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy)
                out_src = smsg_out;
                break;
 
+       case WRITE_TO_FILE:
+               out_src = fcopy_transaction.fcopy_msg;
+               out_len = sizeof(struct hv_do_fcopy);
+               break;
        default:
                out_src = fcopy_transaction.fcopy_msg;
                out_len = fcopy_transaction.recv_len;
index a9d49f6f6501ccb1965217abb429d13571f26f12..937801ac2fe0eafb3d148072a604c4e50a159572 100644 (file)
@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
        struct vmbus_channel *channel = hv_dev->channel;
 
        mutex_lock(&vmbus_connection.channel_mutex);
-       hv_process_channel_removal(channel,
-                                  channel->offermsg.child_relid);
+       hv_process_channel_removal(channel->offermsg.child_relid);
        mutex_unlock(&vmbus_connection.channel_mutex);
        kfree(hv_dev);
 
index 97a62f5b9ea466cc4277a14b1a8bc59a6bd360b5..a973eb6a28908be2c17092ad4a1169e288a6f8e8 100644 (file)
@@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
                /* disable touchscreen features */
                da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00);
 
+               /* Sample every 1ms */
+               da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG,
+                                         DA9052_ADCCONT_ADCMODE,
+                                         DA9052_ADCCONT_ADCMODE);
+
                err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY,
                                         "tsiready-irq", da9052_tsi_datardy_irq,
                                         hwmon);
index 5eafbaada7958d81b530637b5ab5c2a3a29066b0..dfc40c740d07b7a2029777b5f01a8bbd145439b0 100644 (file)
@@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client,
                return err;
        }
 
-       tmp102->ready_time = jiffies;
-       if (tmp102->config_orig & TMP102_CONF_SD) {
-               /*
-                * Mark that we are not ready with data until the first
-                * conversion is complete
-                */
-               tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
-       }
+       /*
+        * Mark that we are not ready with data until the first
+        * conversion is complete
+        */
+       tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
 
        hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
                                                         tmp102,
index 9c0dbb8191ad4da9e1c5111da2ab88790ae30966..e1be61095532f03dda79effe36fbde2147a31d66 100644 (file)
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                         sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
                         GFP_KERNEL);
        if (rc)
-               goto out_mbox_free;
+               return -ENOMEM;
 
        INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
 
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (IS_ERR(ctx->mbox_chan)) {
                        dev_err(&pdev->dev,
                                "SLIMpro mailbox channel request failed\n");
-                       return -ENODEV;
+                       rc = -ENODEV;
+                       goto out_mbox_free;
                }
        } else {
                struct acpi_pcct_hw_reduced *cppc_ss;
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (device_property_read_u32(&pdev->dev, "pcc-channel",
                                             &ctx->mbox_idx)) {
                        dev_err(&pdev->dev, "no pcc-channel property\n");
-                       return -ENODEV;
+                       rc = -ENODEV;
+                       goto out_mbox_free;
                }
 
                cl->rx_callback = xgene_hwmon_pcc_rx_cb;
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (IS_ERR(ctx->mbox_chan)) {
                        dev_err(&pdev->dev,
                                "PPC channel request failed\n");
-                       return -ENODEV;
+                       rc = -ENODEV;
+                       goto out_mbox_free;
                }
 
                /*
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (!cppc_ss) {
                        dev_err(&pdev->dev, "PPC subspace not found\n");
                        rc = -ENODEV;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                if (!ctx->mbox_chan->mbox->txdone_irq) {
                        dev_err(&pdev->dev, "PCC IRQ not supported\n");
                        rc = -ENODEV;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                /*
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                } else {
                        dev_err(&pdev->dev, "Failed to get PCC comm region\n");
                        rc = -ENODEV;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                if (!ctx->pcc_comm_addr) {
                        dev_err(&pdev->dev,
                                "Failed to ioremap PCC comm region\n");
                        rc = -ENOMEM;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                /*
index bc9cebc305261bbd937f499472a66e64fcc03c05..c2a2ce8ee5410743c750bdbcad8c0170992af1e0 100644 (file)
@@ -143,6 +143,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
                .driver_data = (kernel_ulong_t)0,
        },
+       {
+               /* Lewisburg PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
+               .driver_data = (kernel_ulong_t)0,
+       },
        {
                /* Gemini Lake */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
@@ -158,6 +163,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Cedar Fork PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index 9414900575d86b9956fd0ac4b4275b39d63ea867..f129869e05a9b0dc6430b63ce8ff1693c25c4726 100644 (file)
@@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
 
        stm_source_link_drop(src);
 
-       device_destroy(&stm_source_class, src->dev.devt);
+       device_unregister(&src->dev);
 }
 EXPORT_SYMBOL_GPL(stm_source_unregister_device);
 
index c06dce2c1da745fab9727715f5ffaa01a40d4d04..45a3f3ca29b383ce7edfbb34e6d3fd506c5173b6 100644 (file)
@@ -131,6 +131,7 @@ config I2C_I801
            Gemini Lake (SOC)
            Cannon Lake-H (PCH)
            Cannon Lake-LP (PCH)
+           Cedar Fork (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index e114e4e00d2972e2bb55765bfbf7c431d38ccb44..9e12a53ef7b8cf2cdccf9de473af8e2cec9c5f36 100644 (file)
@@ -68,6 +68,7 @@
  * Gemini Lake (SOC)           0x31d4  32      hard    yes     yes     yes
  * Cannon Lake-H (PCH)         0xa323  32      hard    yes     yes     yes
  * Cannon Lake-LP (PCH)                0x9da3  32      hard    yes     yes     yes
+ * Cedar Fork (PCH)            0x18df  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 
 /* Older devices have their ID defined in <linux/pci_ids.h> */
 #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS             0x0f12
+#define PCI_DEVICE_ID_INTEL_CDF_SMBUS                  0x18df
 #define PCI_DEVICE_ID_INTEL_DNV_SMBUS                  0x19df
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS          0x1c22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS             0x1d22
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
        case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
        case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
+       case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
        case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
        case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
                priv->features |= FEATURE_I2C_BLOCK_READ;
index 84fb35f6837f0861e628ecab0527dbc6561db008..eb1d91b986fdeaba683a19104465b947b894f10f 100644 (file)
@@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = {
 };
 module_platform_driver(img_scb_i2c_driver);
 
-MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
 MODULE_DESCRIPTION("IMG host I2C driver");
 MODULE_LICENSE("GPL v2");
index 54a47b40546f69c7ea0d3dbf033c22c95f106516..f96830ffd9f1c1456965810fad723ab365a7f263 100644 (file)
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
        }
 
        dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
-                       rinfo->sda_gpio, rinfo->scl_gpio);
+                       rinfo->scl_gpio, rinfo->sda_gpio);
 
        rinfo->prepare_recovery = i2c_imx_prepare_recovery;
        rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
        }
 
        /* Request IRQ */
-       ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
+       ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
                                pdev->name, i2c_imx);
        if (ret) {
                dev_err(&pdev->dev, "can't claim irq %d\n", irq);
index 22ffcb73c185f592d8b4e6bdbb1ede45cbeb4951..b51adffa484109efb842bbe75afec593ac8a1731 100644 (file)
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
                        data->word = dma_buffer[0] | (dma_buffer[1] << 8);
                        break;
                case I2C_SMBUS_BLOCK_DATA:
-               case I2C_SMBUS_I2C_BLOCK_DATA:
                        if (desc->rxbytes != dma_buffer[0] + 1)
                                return -EMSGSIZE;
 
                        memcpy(data->block, dma_buffer, desc->rxbytes);
                        break;
+               case I2C_SMBUS_I2C_BLOCK_DATA:
+                       memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+                       data->block[0] = desc->rxbytes;
+                       break;
                }
                return 0;
        }
index 1ebb5e947e0b6625fcf0cda7a71f51e79ac29178..23c2ea2baedc07ee15dfab3e9ea0ce0629587374 100644 (file)
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
        unsigned long fclk_rate = 12000000;
        unsigned long internal_clk = 0;
        struct clk *fclk;
+       int error;
 
        if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
                /*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
                 * do this bit unconditionally.
                 */
                fclk = clk_get(omap->dev, "fck");
+               if (IS_ERR(fclk)) {
+                       error = PTR_ERR(fclk);
+                       dev_err(omap->dev, "could not get fck: %i\n", error);
+
+                       return error;
+               }
+
                fclk_rate = clk_get_rate(fclk);
                clk_put(fclk);
 
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
                else
                        internal_clk = 4000;
                fclk = clk_get(omap->dev, "fck");
+               if (IS_ERR(fclk)) {
+                       error = PTR_ERR(fclk);
+                       dev_err(omap->dev, "could not get fck: %i\n", error);
+
+                       return error;
+               }
                fclk_rate = clk_get_rate(fclk) / 1000;
                clk_put(fclk);
 
index 0ecdb47a23abcbf9691bf809b126d72d6c3a46f8..174579d32e5f39ecdc44d2c230b55fbfb5d073e2 100644 (file)
@@ -85,6 +85,9 @@
 /* SB800 constants */
 #define SB800_PIIX4_SMB_IDX            0xcd6
 
+#define KERNCZ_IMC_IDX                 0x3e
+#define KERNCZ_IMC_DATA                        0x3f
+
 /*
  * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
  * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
 #define SB800_PIIX4_PORT_IDX_ALT       0x2e
 #define SB800_PIIX4_PORT_IDX_SEL       0x2f
 #define SB800_PIIX4_PORT_IDX_MASK      0x06
+#define SB800_PIIX4_PORT_IDX_SHIFT     1
+
+/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ            0x02
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ       0x18
+#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ      3
 
 /* insmod parameters */
 
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
  */
 static DEFINE_MUTEX(piix4_mutex_sb800);
 static u8 piix4_port_sel_sb800;
+static u8 piix4_port_mask_sb800;
+static u8 piix4_port_shift_sb800;
 static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
        " port 0", " port 2", " port 3", " port 4"
 };
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
 
        /* SB800 */
        bool sb800_main;
+       bool notify_imc;
        u8 port;                /* Port number, shifted */
 };
 
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
 
        /* Find which register is used for port selection */
        if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
-               piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+               switch (PIIX4_dev->device) {
+               case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+                       piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+                       piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+                       piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+                       break;
+               case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+               default:
+                       piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+                       piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+                       piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+                       break;
+               }
        } else {
                mutex_lock(&piix4_mutex_sb800);
                outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
                piix4_port_sel_sb800 = (port_sel & 0x01) ?
                                       SB800_PIIX4_PORT_IDX_ALT :
                                       SB800_PIIX4_PORT_IDX;
+               piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+               piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
                mutex_unlock(&piix4_mutex_sb800);
        }
 
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
        return 0;
 }
 
+static uint8_t piix4_imc_read(uint8_t idx)
+{
+       outb_p(idx, KERNCZ_IMC_IDX);
+       return inb_p(KERNCZ_IMC_DATA);
+}
+
+static void piix4_imc_write(uint8_t idx, uint8_t value)
+{
+       outb_p(idx, KERNCZ_IMC_IDX);
+       outb_p(value, KERNCZ_IMC_DATA);
+}
+
+static int piix4_imc_sleep(void)
+{
+       int timeout = MAX_TIMEOUT;
+
+       if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+               return -EBUSY;
+
+       /* clear response register */
+       piix4_imc_write(0x82, 0x00);
+       /* request ownership flag */
+       piix4_imc_write(0x83, 0xB4);
+       /* kick off IMC Mailbox command 96 */
+       piix4_imc_write(0x80, 0x96);
+
+       while (timeout--) {
+               if (piix4_imc_read(0x82) == 0xfa) {
+                       release_region(KERNCZ_IMC_IDX, 2);
+                       return 0;
+               }
+               usleep_range(1000, 2000);
+       }
+
+       release_region(KERNCZ_IMC_IDX, 2);
+       return -ETIMEDOUT;
+}
+
+static void piix4_imc_wakeup(void)
+{
+       int timeout = MAX_TIMEOUT;
+
+       if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+               return;
+
+       /* clear response register */
+       piix4_imc_write(0x82, 0x00);
+       /* release ownership flag */
+       piix4_imc_write(0x83, 0xB5);
+       /* kick off IMC Mailbox command 96 */
+       piix4_imc_write(0x80, 0x96);
+
+       while (timeout--) {
+               if (piix4_imc_read(0x82) == 0xfa)
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       release_region(KERNCZ_IMC_IDX, 2);
+}
+
 /*
  * Handles access to multiple SMBus ports on the SB800.
  * The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
                return -EBUSY;
        }
 
+       /*
+        * Notify the IMC (Integrated Micro Controller) if required.
+        * Among other responsibilities, the IMC is in charge of monitoring
+        * the System fans and temperature sensors, and act accordingly.
+        * All this is done through SMBus and can/will collide
+        * with our transactions if they are long (BLOCK_DATA).
+        * Therefore we need to request the ownership flag during those
+        * transactions.
+        */
+       if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
+               int ret;
+
+               ret = piix4_imc_sleep();
+               switch (ret) {
+               case -EBUSY:
+                       dev_warn(&adap->dev,
+                                "IMC base address index region 0x%x already in use.\n",
+                                KERNCZ_IMC_IDX);
+                       break;
+               case -ETIMEDOUT:
+                       dev_warn(&adap->dev,
+                                "Failed to communicate with the IMC.\n");
+                       break;
+               default:
+                       break;
+               }
+
+               /* If IMC communication fails do not retry */
+               if (ret) {
+                       dev_warn(&adap->dev,
+                                "Continuing without IMC notification.\n");
+                       adapdata->notify_imc = false;
+               }
+       }
+
        outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
 
        port = adapdata->port;
-       if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
-               outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
+       if ((smba_en_lo & piix4_port_mask_sb800) != port)
+               outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
                       SB800_PIIX4_SMB_IDX + 1);
 
        retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
        /* Release the semaphore */
        outb_p(smbslvcnt | 0x20, SMBSLVCNT);
 
+       if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
+               piix4_imc_wakeup();
+
        mutex_unlock(&piix4_mutex_sb800);
 
        return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
 static struct i2c_adapter *piix4_aux_adapter;
 
 static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
-                            bool sb800_main, u8 port,
+                            bool sb800_main, u8 port, bool notify_imc,
                             const char *name, struct i2c_adapter **padap)
 {
        struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
 
        adapdata->smba = smba;
        adapdata->sb800_main = sb800_main;
-       adapdata->port = port << 1;
+       adapdata->port = port << piix4_port_shift_sb800;
+       adapdata->notify_imc = notify_imc;
 
        /* set up the sysfs linkage to our parent device */
        adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
        return 0;
 }
 
-static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
+static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
+                                   bool notify_imc)
 {
        struct i2c_piix4_adapdata *adapdata;
        int port;
        int retval;
 
        for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
-               retval = piix4_add_adapter(dev, smba, true, port,
+               retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
                                           piix4_main_port_names_sb800[port],
                                           &piix4_main_adapters[port]);
                if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
             dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
             dev->revision >= 0x40) ||
            dev->vendor == PCI_VENDOR_ID_AMD) {
+               bool notify_imc = false;
                is_sb800 = true;
 
                if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return -EBUSY;
                }
 
+               if (dev->vendor == PCI_VENDOR_ID_AMD &&
+                   dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
+                       u8 imc;
+
+                       /*
+                        * Detect if IMC is active or not, this method is
+                        * described on coreboot's AMD IMC notes
+                        */
+                       pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
+                                                0x40, &imc);
+                       if (imc & 0x80)
+                               notify_imc = true;
+               }
+
                /* base address location etc changed in SB800 */
                retval = piix4_setup_sb800(dev, id, 0);
                if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                 * Try to register multiplexed main SMBus adapter,
                 * give up if we can't
                 */
-               retval = piix4_add_adapters_sb800(dev, retval);
+               retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
                if (retval < 0) {
                        release_region(SB800_PIIX4_SMB_IDX, 2);
                        return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return retval;
 
                /* Try to register main SMBus adapter, give up if we can't */
-               retval = piix4_add_adapter(dev, retval, false, 0, "",
+               retval = piix4_add_adapter(dev, retval, false, 0, false, "",
                                           &piix4_main_adapters[0]);
                if (retval < 0)
                        return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (retval > 0) {
                /* Try to add the aux adapter if it exists,
                 * piix4_add_adapter will clean up if this fails */
-               piix4_add_adapter(dev, retval, false, 0,
+               piix4_add_adapter(dev, retval, false, 0, false,
                                  is_sb800 ? piix4_aux_port_name_sb800 : "",
                                  &piix4_aux_adapter);
        }
index 22e08ae1704f4a6730928ee1e1392f9e08a08778..25fcc3c1e32bf3d9a41fa345982039fb234dbcbd 100644 (file)
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
 
 static const struct of_device_id sprd_i2c_of_match[] = {
        { .compatible = "sprd,sc9860-i2c", },
+       {},
 };
 
 static struct platform_driver sprd_i2c_driver = {
index 47c67b0ca8960a2436ef899f9e1cecd5d35ab19c..d4a6e9c2e9aaeaa679bb159ade420bcdb37e6988 100644 (file)
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
        unsigned int msg_num;
        unsigned int msg_id;
        struct stm32f7_i2c_msg f7_msg;
-       struct stm32f7_i2c_setup *setup;
+       struct stm32f7_i2c_setup setup;
        struct stm32f7_i2c_timings timing;
 };
 
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
        },
 };
 
-struct stm32f7_i2c_setup stm32f7_setup = {
+static const struct stm32f7_i2c_setup stm32f7_setup = {
        .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
        .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
        .dnf = STM32F7_I2C_DNF_DEFAULT,
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
        writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
 
        /* Enable I2C */
-       if (i2c_dev->setup->analog_filter)
+       if (i2c_dev->setup.analog_filter)
                stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
                                     STM32F7_I2C_CR1_ANFOFF);
        else
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
        }
 
        setup = of_device_get_match_data(&pdev->dev);
-       i2c_dev->setup->rise_time = setup->rise_time;
-       i2c_dev->setup->fall_time = setup->fall_time;
-       i2c_dev->setup->dnf = setup->dnf;
-       i2c_dev->setup->analog_filter = setup->analog_filter;
+       i2c_dev->setup = *setup;
 
        ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
                                       &rise_time);
        if (!ret)
-               i2c_dev->setup->rise_time = rise_time;
+               i2c_dev->setup.rise_time = rise_time;
 
        ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
                                       &fall_time);
        if (!ret)
-               i2c_dev->setup->fall_time = fall_time;
+               i2c_dev->setup.fall_time = fall_time;
 
-       ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup);
+       ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
        if (ret)
                goto clk_free;
 
index 01b2adfd8226bf1bcd048100f4e4bcb0f6afb180..eaf39e5db08ba98ad2bfcc32e9892e2f2c8be972 100644 (file)
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
                if (hwif_init(hwif) == 0) {
                        printk(KERN_INFO "%s: failed to initialize IDE "
                                         "interface\n", hwif->name);
+                       device_unregister(hwif->portdev);
                        device_unregister(&hwif->gendev);
                        ide_disable_port(hwif);
                        continue;
index 86aa88aeb3a6b529315277f636fc07b196030d0a..acf874800ca4086803d6740d3df32ca38b732017 100644 (file)
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
 {
        struct list_head *l;
        struct pci_driver *d;
+       int ret;
 
        list_for_each(l, &ide_pci_drivers) {
                d = list_entry(l, struct pci_driver, node);
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
                        const struct pci_device_id *id =
                                pci_match_id(d->id_table, dev);
 
-                       if (id != NULL && d->probe(dev, id) >= 0) {
-                               dev->driver = d;
-                               pci_dev_get(dev);
-                               return 1;
+                       if (id != NULL) {
+                               pci_assign_irq(dev);
+                               ret = d->probe(dev, id);
+                               if (ret >= 0) {
+                                       dev->driver = d;
+                                       pci_dev_get(dev);
+                                       return 1;
+                               }
                        }
                }
        }
index 112d2fe1bcdbc8b3acb45d613901afa06798d556..fdc8e813170c32b4df99417c37d4940e0abbbc78 100644 (file)
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
 /**
  *     ide_pci_enable  -       do PCI enables
  *     @dev: PCI device
+ *     @bars: PCI BARs mask
  *     @d: IDE port info
  *
  *     Enable the IDE PCI device. We attempt to enable the device in full
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
  *     Returns zero on success or an error code
  */
 
-static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
+static int ide_pci_enable(struct pci_dev *dev, int bars,
+                         const struct ide_port_info *d)
 {
-       int ret, bars;
+       int ret;
 
        if (pci_enable_device(dev)) {
                ret = pci_enable_device_io(dev);
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
                goto out;
        }
 
-       if (d->host_flags & IDE_HFLAG_SINGLE)
-               bars = (1 << 2) - 1;
-       else
-               bars = (1 << 4) - 1;
-
-       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
-               if (d->host_flags & IDE_HFLAG_CS5520)
-                       bars |= (1 << 2);
-               else
-                       bars |= (1 << 4);
-       }
-
        ret = pci_request_selected_regions(dev, bars, d->name);
        if (ret < 0)
                printk(KERN_ERR "%s %s: can't reserve resources\n",
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
 /**
  *     ide_setup_pci_controller        -       set up IDE PCI
  *     @dev: PCI device
+ *     @bars: PCI BARs mask
  *     @d: IDE port info
  *     @noisy: verbose flag
  *
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
  *     and enables it if need be
  */
 
-static int ide_setup_pci_controller(struct pci_dev *dev,
+static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
                                    const struct ide_port_info *d, int noisy)
 {
        int ret;
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
        if (noisy)
                ide_setup_pci_noise(dev, d);
 
-       ret = ide_pci_enable(dev, d);
+       ret = ide_pci_enable(dev, bars, d);
        if (ret < 0)
                goto out;
 
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
        if (ret < 0) {
                printk(KERN_ERR "%s %s: error accessing PCI regs\n",
                        d->name, pci_name(dev));
-               goto out;
+               goto out_free_bars;
        }
        if (!(pcicmd & PCI_COMMAND_IO)) {       /* is device disabled? */
                ret = ide_pci_configure(dev, d);
                if (ret < 0)
-                       goto out;
+                       goto out_free_bars;
                printk(KERN_INFO "%s %s: device enabled (Linux)\n",
                        d->name, pci_name(dev));
        }
 
+       goto out;
+
+out_free_bars:
+       pci_release_selected_regions(dev, bars);
 out:
        return ret;
 }
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
 {
        struct pci_dev *pdev[] = { dev1, dev2 };
        struct ide_host *host;
-       int ret, i, n_ports = dev2 ? 4 : 2;
+       int ret, i, n_ports = dev2 ? 4 : 2, bars;
        struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
 
+       if (d->host_flags & IDE_HFLAG_SINGLE)
+               bars = (1 << 2) - 1;
+       else
+               bars = (1 << 4) - 1;
+
+       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+               if (d->host_flags & IDE_HFLAG_CS5520)
+                       bars |= (1 << 2);
+               else
+                       bars |= (1 << 4);
+       }
+
        for (i = 0; i < n_ports / 2; i++) {
-               ret = ide_setup_pci_controller(pdev[i], d, !i);
-               if (ret < 0)
+               ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
+               if (ret < 0) {
+                       if (i == 1)
+                               pci_release_selected_regions(pdev[0], bars);
                        goto out;
+               }
 
                ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
        }
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
        host = ide_host_alloc(d, hws, n_ports);
        if (host == NULL) {
                ret = -ENOMEM;
-               goto out;
+               goto out_free_bars;
        }
 
        host->dev[0] = &dev1->dev;
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
                 * do_ide_setup_pci_device() on the first device!
                 */
                if (ret < 0)
-                       goto out;
+                       goto out_free_bars;
 
                /* fixup IRQ */
                if (ide_pci_is_in_compatibility_mode(pdev[i])) {
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
        ret = ide_host_register(host, d, hws);
        if (ret)
                ide_host_free(host);
+       else
+               goto out;
+
+out_free_bars:
+       i = n_ports / 2;
+       while (i--)
+               pci_release_selected_regions(pdev[i], bars);
 out:
        return ret;
 }
index 57625653fcb6da69c67fbe381f7ac3e805adcf4f..1d13bf03c75863e745623260cae2cef71f012130 100644 (file)
@@ -243,6 +243,8 @@ config DA9150_GPADC
 config DLN2_ADC
        tristate "Diolan DLN-2 ADC driver support"
        depends on MFD_DLN2
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          Say yes here to build support for Diolan DLN-2 ADC.
 
index e6706a09e100d271ea5528e3c8b8c289d6fca9a3..47c3d7f329004d577f865d261702622149d2cc46 100644 (file)
@@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
        unsigned int vref_mv)
 {
        struct ad7793_state *st = iio_priv(indio_dev);
-       int i, ret = -1;
+       int i, ret;
        unsigned long long scale_uv;
        u32 id;
 
@@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
                return ret;
 
        /* reset the serial interface */
-       ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
+       ret = ad_sd_reset(&st->sd, 32);
        if (ret < 0)
                goto out;
        usleep_range(500, 2000); /* Wait for at least 500us */
index d10bd0c97233fa2351954e8bc6fdcd0b299e61dd..22c4c17cd9969486fe1ef80b68eaa99e51c43032 100644 (file)
@@ -177,6 +177,34 @@ out:
 }
 EXPORT_SYMBOL_GPL(ad_sd_read_reg);
 
+/**
+ * ad_sd_reset() - Reset the serial interface
+ *
+ * @sigma_delta: The sigma delta device
+ * @reset_length: Number of SCLKs with DIN = 1
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+       unsigned int reset_length)
+{
+       uint8_t *buf;
+       unsigned int size;
+       int ret;
+
+       size = DIV_ROUND_UP(reset_length, 8);
+       buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       memset(buf, 0xff, size);
+       ret = spi_write(sigma_delta->spi, buf, size);
+       kfree(buf);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_reset);
+
 static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
        unsigned int mode, unsigned int channel)
 {
index bc5b38e3a147e6559e4863bfc39e21115648d7b9..a70ef7fec95f05065295a41280d7b17632aac7c9 100644 (file)
@@ -225,6 +225,7 @@ struct at91_adc_trigger {
        char                            *name;
        unsigned int                    trgmod_value;
        unsigned int                    edge_type;
+       bool                            hw_trig;
 };
 
 struct at91_adc_state {
@@ -254,16 +255,25 @@ static const struct at91_adc_trigger at91_adc_trigger_list[] = {
                .name = "external_rising",
                .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE,
                .edge_type = IRQ_TYPE_EDGE_RISING,
+               .hw_trig = true,
        },
        {
                .name = "external_falling",
                .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL,
                .edge_type = IRQ_TYPE_EDGE_FALLING,
+               .hw_trig = true,
        },
        {
                .name = "external_any",
                .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY,
                .edge_type = IRQ_TYPE_EDGE_BOTH,
+               .hw_trig = true,
+       },
+       {
+               .name = "software",
+               .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER,
+               .edge_type = IRQ_TYPE_NONE,
+               .hw_trig = false,
        },
 };
 
@@ -597,7 +607,7 @@ static int at91_adc_probe(struct platform_device *pdev)
        struct at91_adc_state *st;
        struct resource *res;
        int ret, i;
-       u32 edge_type;
+       u32 edge_type = IRQ_TYPE_NONE;
 
        indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
        if (!indio_dev)
@@ -641,14 +651,14 @@ static int at91_adc_probe(struct platform_device *pdev)
        ret = of_property_read_u32(pdev->dev.of_node,
                                   "atmel,trigger-edge-type", &edge_type);
        if (ret) {
-               dev_err(&pdev->dev,
-                       "invalid or missing value for atmel,trigger-edge-type\n");
-               return ret;
+               dev_dbg(&pdev->dev,
+                       "atmel,trigger-edge-type not specified, only software trigger available\n");
        }
 
        st->selected_trig = NULL;
 
-       for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++)
+       /* find the right trigger, or no trigger at all */
+       for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++)
                if (at91_adc_trigger_list[i].edge_type == edge_type) {
                        st->selected_trig = &at91_adc_trigger_list[i];
                        break;
@@ -717,24 +727,27 @@ static int at91_adc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, indio_dev);
 
-       ret = at91_adc_buffer_init(indio_dev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
-               goto per_clk_disable_unprepare;
-       }
+       if (st->selected_trig->hw_trig) {
+               ret = at91_adc_buffer_init(indio_dev);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
+                       goto per_clk_disable_unprepare;
+               }
 
-       ret = at91_adc_trigger_init(indio_dev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "couldn't setup the triggers.\n");
-               goto per_clk_disable_unprepare;
+               ret = at91_adc_trigger_init(indio_dev);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "couldn't setup the triggers.\n");
+                       goto per_clk_disable_unprepare;
+               }
        }
 
        ret = iio_device_register(indio_dev);
        if (ret < 0)
                goto per_clk_disable_unprepare;
 
-       dev_info(&pdev->dev, "setting up trigger as %s\n",
-                st->selected_trig->name);
+       if (st->selected_trig->hw_trig)
+               dev_info(&pdev->dev, "setting up trigger as %s\n",
+                        st->selected_trig->name);
 
        dev_info(&pdev->dev, "version: %x\n",
                 readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
index 634717ae12f354a79c77cea7fbd84633c1bb9f79..071dd23a33d9f0a8bbe51752f3f85cebf8362a42 100644 (file)
@@ -17,6 +17,8 @@
  * MCP3204
  * MCP3208
  * ------------
+ * 13 bit converter
+ * MCP3301
  *
  * Datasheet can be found here:
  * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf  mcp3001
@@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
 }
 
 static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
-                                 bool differential, int device_index)
+                                 bool differential, int device_index, int *val)
 {
        int ret;
 
@@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
 
        switch (device_index) {
        case mcp3001:
-               return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+               *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+               return 0;
        case mcp3002:
        case mcp3004:
        case mcp3008:
-               return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+               *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+               return 0;
        case mcp3201:
-               return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+               *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+               return 0;
        case mcp3202:
        case mcp3204:
        case mcp3208:
-               return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+               *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+               return 0;
        case mcp3301:
-               return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
+               *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
+                                   | adc->rx_buf[1], 12);
+               return 0;
        default:
                return -EINVAL;
        }
@@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
                ret = mcp320x_adc_conversion(adc, channel->address,
-                       channel->differential, device_index);
-
+                       channel->differential, device_index, val);
                if (ret < 0)
                        goto out;
 
-               *val = ret;
                ret = IIO_VAL_INT;
                break;
 
@@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi)
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &mcp320x_info;
+       spi_set_drvdata(spi, indio_dev);
 
        chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
        indio_dev->channels = chip_info->channels;
index e3c15f88075f7beb37b290e0a7d89bcc5bcd35d1..4df32cf1650e7c7f58f680026610478857269a35 100644 (file)
@@ -1666,7 +1666,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 
        num_channels = of_property_count_u32_elems(node, "st,adc-channels");
        if (num_channels < 0 ||
-           num_channels >= adc_info->max_channels) {
+           num_channels > adc_info->max_channels) {
                dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
                return num_channels < 0 ? num_channels : -EINVAL;
        }
index d1210024f6bccec6c321df49d537df5e7b1d7661..e0dc204883357270dfe7b568c3c588089cf96777 100644 (file)
@@ -52,7 +52,7 @@
 
 #define ADS1015_CFG_COMP_QUE_MASK      GENMASK(1, 0)
 #define ADS1015_CFG_COMP_LAT_MASK      BIT(2)
-#define ADS1015_CFG_COMP_POL_MASK      BIT(2)
+#define ADS1015_CFG_COMP_POL_MASK      BIT(3)
 #define ADS1015_CFG_COMP_MODE_MASK     BIT(4)
 #define ADS1015_CFG_DR_MASK    GENMASK(7, 5)
 #define ADS1015_CFG_MOD_MASK   BIT(8)
@@ -1017,10 +1017,12 @@ static int ads1015_probe(struct i2c_client *client,
 
                switch (irq_trig) {
                case IRQF_TRIGGER_LOW:
-                       cfg_comp |= ADS1015_CFG_COMP_POL_LOW;
+                       cfg_comp |= ADS1015_CFG_COMP_POL_LOW <<
+                                       ADS1015_CFG_COMP_POL_SHIFT;
                        break;
                case IRQF_TRIGGER_HIGH:
-                       cfg_comp |= ADS1015_CFG_COMP_POL_HIGH;
+                       cfg_comp |= ADS1015_CFG_COMP_POL_HIGH <<
+                                       ADS1015_CFG_COMP_POL_SHIFT;
                        break;
                default:
                        return -EINVAL;
index 1edd99f0c5e55a367772ab85b01e8a64cf399b54..e3cfb91bffc61122d56dc44b2ccd793afcd73800 100644 (file)
@@ -887,21 +887,27 @@ static int twl4030_madc_probe(struct platform_device *pdev)
 
        /* Enable 3v1 bias regulator for MADC[3:6] */
        madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
-       if (IS_ERR(madc->usb3v1))
-               return -ENODEV;
+       if (IS_ERR(madc->usb3v1)) {
+               ret = -ENODEV;
+               goto err_i2c;
+       }
 
        ret = regulator_enable(madc->usb3v1);
-       if (ret)
+       if (ret) {
                dev_err(madc->dev, "could not enable 3v1 bias regulator\n");
+               goto err_i2c;
+       }
 
        ret = iio_device_register(iio_dev);
        if (ret) {
                dev_err(&pdev->dev, "could not register iio device\n");
-               goto err_i2c;
+               goto err_usb3v1;
        }
 
        return 0;
 
+err_usb3v1:
+       regulator_disable(madc->usb3v1);
 err_i2c:
        twl4030_madc_set_current_generator(madc, 0, 0);
 err_current_generator:
index d99bb1460fe240b1721cdeda742c092739e13e9f..02e833b14db08db9460b073f10a74f1942d3c752 100644 (file)
@@ -463,8 +463,17 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
        u8 drdy_mask;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
 
-       if (!sdata->sensor_settings->drdy_irq.addr)
+       if (!sdata->sensor_settings->drdy_irq.addr) {
+               /*
+                * there are some devices (e.g. LIS3MDL) where drdy line is
+                * routed to a given pin and it is not possible to select a
+                * different one. Take into account irq status register
+                * to understand if irq trigger can be properly supported
+                */
+               if (sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+                       sdata->hw_irq_trigger = enable;
                return 0;
+       }
 
        /* Enable/Disable the interrupt generator 1. */
        if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) {
index ed63ffd849f8201505499d2be4594572b09b535d..7ec2a0bb08076ffa4ffefaf50cc0336471553756 100644 (file)
@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
                                st->event_en = state;
                        else
                                return -EINVAL;
+                       break;
                default:
                        return -EINVAL;
                }
index 17ec4cee51dc005a9f7cc0efc92a9326a3906a61..a47428b4d31be9b07adc5cfb96dd9c0472397006 100644 (file)
@@ -310,8 +310,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
        ret = indio_dev->info->debugfs_reg_access(indio_dev,
                                                  indio_dev->cached_reg_addr,
                                                  0, &val);
-       if (ret)
+       if (ret) {
                dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
+               return ret;
+       }
 
        len = snprintf(buf, sizeof(buf), "0x%X\n", val);
 
index e68368b5b2a38eed3ec722cc4a6e342401767ea0..08aafba4481c66345b7ef770b3e44ce8b464f843 100644 (file)
@@ -315,6 +315,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
                                },
                        },
                },
+               .drdy_irq = {
+                       /* drdy line is routed drdy pin */
+                       .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
index 0d2ea3ee371b9fc8c0fff8c213f97ee3ab31b5a1..8f26428804a236fa1a38e612c0cd6aa3245b7eff 100644 (file)
@@ -573,7 +573,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
        u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
                  BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
 
-       ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+       ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
                                 BMP280_OSRS_TEMP_MASK |
                                 BMP280_OSRS_PRESS_MASK |
                                 BMP280_MODE_MASK,
index ebfb1de7377ffb5b0c30cf3c23c61b0f7afcdddd..91431454eb85de1c51cf88e477ab3d8378e4d007 100644 (file)
@@ -865,7 +865,6 @@ complete:
 static int zpa2326_wait_oneshot_completion(const struct iio_dev   *indio_dev,
                                           struct zpa2326_private *private)
 {
-       int          ret;
        unsigned int val;
        long     timeout;
 
@@ -887,14 +886,11 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev   *indio_dev,
                /* Timed out. */
                zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
                             timeout);
-               ret = -ETIME;
-       } else if (timeout < 0) {
-               zpa2326_warn(indio_dev,
-                            "wait for one shot interrupt cancelled");
-               ret = -ERESTARTSYS;
+               return -ETIME;
        }
 
-       return ret;
+       zpa2326_warn(indio_dev, "wait for one shot interrupt cancelled");
+       return -ERESTARTSYS;
 }
 
 static int zpa2326_init_managed_irq(struct device          *parent,
index 0eeff29b61bed8cc2f678c05a22f1fa51562794b..4a48b7ba3a1c1753c6c3f7d847131ca0ba61b4ac 100644 (file)
 #define AS3935_AFE_GAIN_MAX    0x1F
 #define AS3935_AFE_PWR_BIT     BIT(0)
 
+#define AS3935_NFLWDTH         0x01
+#define AS3935_NFLWDTH_MASK    0x7f
+
 #define AS3935_INT             0x03
 #define AS3935_INT_MASK                0x0f
+#define AS3935_DISTURB_INT     BIT(2)
 #define AS3935_EVENT_INT       BIT(3)
 #define AS3935_NOISE_INT       BIT(0)
 
@@ -48,6 +52,7 @@
 #define AS3935_DATA_MASK       0x3F
 
 #define AS3935_TUNE_CAP                0x08
+#define AS3935_DEFAULTS                0x3C
 #define AS3935_CALIBRATE       0x3D
 
 #define AS3935_READ_DATA       BIT(14)
@@ -62,7 +67,9 @@ struct as3935_state {
        struct mutex lock;
        struct delayed_work work;
 
+       unsigned long noise_tripped;
        u32 tune_cap;
+       u32 nflwdth_reg;
        u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
        u8 buf[2] ____cacheline_aligned;
 };
@@ -145,12 +152,29 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
        return len;
 }
 
+static ssize_t as3935_noise_level_tripped_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct as3935_state *st = iio_priv(dev_to_iio_dev(dev));
+       int ret;
+
+       mutex_lock(&st->lock);
+       ret = sprintf(buf, "%d\n", !time_after(jiffies, st->noise_tripped + HZ));
+       mutex_unlock(&st->lock);
+
+       return ret;
+}
+
 static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR,
        as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0);
 
+static IIO_DEVICE_ATTR(noise_level_tripped, S_IRUGO,
+       as3935_noise_level_tripped_show, NULL, 0);
 
 static struct attribute *as3935_attributes[] = {
        &iio_dev_attr_sensor_sensitivity.dev_attr.attr,
+       &iio_dev_attr_noise_level_tripped.dev_attr.attr,
        NULL,
 };
 
@@ -246,7 +270,11 @@ static void as3935_event_work(struct work_struct *work)
        case AS3935_EVENT_INT:
                iio_trigger_poll_chained(st->trig);
                break;
+       case AS3935_DISTURB_INT:
        case AS3935_NOISE_INT:
+               mutex_lock(&st->lock);
+               st->noise_tripped = jiffies;
+               mutex_unlock(&st->lock);
                dev_warn(&st->spi->dev, "noise level is too high\n");
                break;
        }
@@ -269,15 +297,14 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
 
 static void calibrate_as3935(struct as3935_state *st)
 {
-       /* mask disturber interrupt bit */
-       as3935_write(st, AS3935_INT, BIT(5));
-
+       as3935_write(st, AS3935_DEFAULTS, 0x96);
        as3935_write(st, AS3935_CALIBRATE, 0x96);
        as3935_write(st, AS3935_TUNE_CAP,
                BIT(5) | (st->tune_cap / TUNE_CAP_DIV));
 
        mdelay(2);
        as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
+       as3935_write(st, AS3935_NFLWDTH, st->nflwdth_reg);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -370,6 +397,15 @@ static int as3935_probe(struct spi_device *spi)
                return -EINVAL;
        }
 
+       ret = of_property_read_u32(np,
+                       "ams,nflwdth", &st->nflwdth_reg);
+       if (!ret && st->nflwdth_reg > AS3935_NFLWDTH_MASK) {
+               dev_err(&spi->dev,
+                       "invalid nflwdth setting of %d\n",
+                       st->nflwdth_reg);
+               return -EINVAL;
+       }
+
        indio_dev->dev.parent = &spi->dev;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->channels = as3935_channels;
@@ -384,6 +420,7 @@ static int as3935_probe(struct spi_device *spi)
                return -ENOMEM;
 
        st->trig = trig;
+       st->noise_tripped = jiffies - HZ;
        trig->dev.parent = indio_dev->dev.parent;
        iio_trigger_set_drvdata(trig, indio_dev);
        trig->ops = &iio_interrupt_trigger_ops;
index 9b9053494daf5ef2dd601db958a3c7177dcf3a27..eb212f8c88793b2c1029f087b40733039b5d9f96 100644 (file)
@@ -174,6 +174,7 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
                clk_disable(priv->clk);
 
        /* Stop timer */
+       regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
        regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
        regmap_write(priv->regmap, TIM_PSC, 0);
        regmap_write(priv->regmap, TIM_ARR, 0);
@@ -715,8 +716,9 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
        if (ret)
                return ret;
 
+       /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+       regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
        regmap_write(priv->regmap, TIM_ARR, preset);
-       regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
 
        return len;
 }
index 30825bb9b8e9247800b3aaa6084c603383d45bbf..8861c052155ab72e0062be5e1c03d1723705f0c2 100644 (file)
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
        if (ret)
                goto pid_query_error;
 
+       nlmsg_end(skb, nlh);
+
        pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
                __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
 
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                                &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
        if (ret)
                goto add_mapping_error;
+
+       nlmsg_end(skb, nlh);
        nlmsg_request->req_buffer = pm_msg;
 
        ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                                &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
        if (ret)
                goto query_mapping_error;
+
+       nlmsg_end(skb, nlh);
        nlmsg_request->req_buffer = pm_msg;
 
        ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
        if (ret)
                goto remove_mapping_error;
 
+       nlmsg_end(skb, nlh);
+
        ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
index c81c5594262621770b7f55f433cdd0340f153396..3c4faadb8cddd7fdf6611df3674811664b1eb890 100644 (file)
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
                                &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
        if (ret)
                goto mapinfo_num_error;
+
+       nlmsg_end(skb, nlh);
+
        ret = rdma_nl_unicast(skb, iwpm_pid);
        if (ret) {
                skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
                        if (ret)
                                goto send_mapping_info_unlock;
 
+                       nlmsg_end(skb, nlh);
+
                        iwpm_print_sockaddr(&map_info->local_sockaddr,
                                "send_mapping_info: Local sockaddr:");
                        iwpm_print_sockaddr(&map_info->mapped_sockaddr,
index b12e58787c3ddc9f87d80e500392740336bfa362..1fb72c356e36ccc77fc3d97e384a238d9fa6d2f9 100644 (file)
@@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
            !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
+       /*
+        * LS responses overload the 0x100 (NLM_F_ROOT) flag.  Don't
+        * mistakenly call the .dump() function.
+        */
+       if (index == RDMA_NL_LS) {
+               if (cb_table[op].doit)
+                       return cb_table[op].doit(skb, nlh, extack);
+               return -EINVAL;
+       }
        /* FIXME: Convert IWCM to properly handle doit callbacks */
        if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
            index == RDMA_NL_IWCM) {
                struct netlink_dump_control c = {
                        .dump = cb_table[op].dump,
                };
-               return netlink_dump_start(nls, skb, nlh, &c);
+               if (c.dump)
+                       return netlink_dump_start(nls, skb, nlh, &c);
+               return -EINVAL;
        }
 
        if (cb_table[op].doit)
index 70ad19c4c73e77da961ffccd9958887dcead8027..88bdafb297f5fe9f722c7a8edee27554ea3b6862 100644 (file)
@@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
        atomic_set(&qp->qp_sec->error_list_count, 0);
        init_completion(&qp->qp_sec->error_complete);
        ret = security_ib_alloc_security(&qp->qp_sec->security);
-       if (ret)
+       if (ret) {
                kfree(qp->qp_sec);
+               qp->qp_sec = NULL;
+       }
 
        return ret;
 }
index 4ab30d832ac5b8a5ac4994b9c4f0b3f3802e019c..52a2cf2d83aaf483944ad9720e55121f2adb6484 100644 (file)
@@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
        resp.raw_packet_caps = attr.raw_packet_caps;
        resp.response_length += sizeof(resp.raw_packet_caps);
 
-       if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps))
+       if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
                goto end;
 
-       resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size;
-       resp.xrq_caps.max_num_tags      = attr.xrq_caps.max_num_tags;
-       resp.xrq_caps.max_ops           = attr.xrq_caps.max_ops;
-       resp.xrq_caps.max_sge           = attr.xrq_caps.max_sge;
-       resp.xrq_caps.flags             = attr.xrq_caps.flags;
-       resp.response_length += sizeof(resp.xrq_caps);
+       resp.tm_caps.max_rndv_hdr_size  = attr.tm_caps.max_rndv_hdr_size;
+       resp.tm_caps.max_num_tags       = attr.tm_caps.max_num_tags;
+       resp.tm_caps.max_ops            = attr.tm_caps.max_ops;
+       resp.tm_caps.max_sge            = attr.tm_caps.max_sge;
+       resp.tm_caps.flags              = attr.tm_caps.flags;
+       resp.response_length += sizeof(resp.tm_caps);
 end:
        err = ib_copy_to_udata(ucore, &resp, resp.response_length);
        return err;
index ee9e27dc799b50ead4663cd5fd13c94a3ac020d4..de57d6c11a25428d96e8bcb4868fc19794cc374d 100644 (file)
@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
         */
        if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
                if (attr.qp_state >= IB_QPS_INIT) {
-                       if (qp->device->get_link_layer(qp->device, attr.port_num) !=
+                       if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
                            IB_LINK_LAYER_INFINIBAND)
                                return true;
                        goto lid_check;
@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
 
        /* Can't get a quick answer, iterate over all ports */
        for (port = 0; port < qp->device->phys_port_cnt; port++)
-               if (qp->device->get_link_layer(qp->device, port) !=
+               if (rdma_port_get_link_layer(qp->device, port) !=
                    IB_LINK_LAYER_INFINIBAND)
                        num_eth_ports++;
 
index b3ad37fec578be497cc55e3dc48552e62d71c595..ecbac91b2e1441acf4529d6d40fa4256892349c0 100644 (file)
@@ -93,11 +93,13 @@ struct bnxt_re_dev {
        struct ib_device                ibdev;
        struct list_head                list;
        unsigned long                   flags;
-#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
-#define BNXT_RE_FLAG_IBDEV_REGISTERED  1
-#define BNXT_RE_FLAG_GOT_MSIX          2
-#define BNXT_RE_FLAG_RCFW_CHANNEL_EN   8
-#define BNXT_RE_FLAG_QOS_WORK_REG      16
+#define BNXT_RE_FLAG_NETDEV_REGISTERED         0
+#define BNXT_RE_FLAG_IBDEV_REGISTERED          1
+#define BNXT_RE_FLAG_GOT_MSIX                  2
+#define BNXT_RE_FLAG_HAVE_L2_REF               3
+#define BNXT_RE_FLAG_RCFW_CHANNEL_EN           4
+#define BNXT_RE_FLAG_QOS_WORK_REG              5
+#define BNXT_RE_FLAG_TASK_IN_PROG              6
        struct net_device               *netdev;
        unsigned int                    version, major, minor;
        struct bnxt_en_dev              *en_dev;
@@ -108,6 +110,8 @@ struct bnxt_re_dev {
 
        struct delayed_work             worker;
        u8                              cur_prio_map;
+       u8                              active_speed;
+       u8                              active_width;
 
        /* FP Notification Queue (CQ & SRQ) */
        struct tasklet_struct           nq_task;
index 01eee15bbd6598fa6ae103aa1b86a81023e67f1d..0d89621d9fe8ed81b0d28c031d48ffcacb31fdfd 100644 (file)
@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
        port_attr->sm_sl = 0;
        port_attr->subnet_timeout = 0;
        port_attr->init_type_reply = 0;
-       /* call the underlying netdev's ethtool hooks to query speed settings
-        * for which we acquire rtnl_lock _only_ if it's registered with
-        * IB stack to avoid race in the NETDEV_UNREG path
-        */
-       if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
-               if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
-                                    &port_attr->active_width))
-                       return -EINVAL;
+       port_attr->active_speed = rdev->active_speed;
+       port_attr->active_width = rdev->active_width;
+
        return 0;
 }
 
@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
        struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
        struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
        struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+       struct bnxt_qplib_gid *gid_to_del;
 
        /* Delete the entry from the hardware */
        ctx = *context;
@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
        if (sgid_tbl && sgid_tbl->active) {
                if (ctx->idx >= sgid_tbl->max)
                        return -EINVAL;
+               gid_to_del = &sgid_tbl->tbl[ctx->idx];
+               /* DEL_GID is called in WQ context(netdevice_event_work_handler)
+                * or via the ib_unregister_device path. In the former case QP1
+                * may not be destroyed yet, in which case just return as FW
+                * needs that entry to be present and will fail it's deletion.
+                * We could get invoked again after QP1 is destroyed OR get an
+                * ADD_GID call with a different GID value for the same index
+                * where we issue MODIFY_GID cmd to update the GID entry -- TBD
+                */
+               if (ctx->idx == 0 &&
+                   rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
+                   ctx->refcnt == 1 && rdev->qp1_sqp) {
+                       dev_dbg(rdev_to_dev(rdev),
+                               "Trying to delete GID0 while QP1 is alive\n");
+                       return -EFAULT;
+               }
                ctx->refcnt--;
                if (!ctx->refcnt) {
-                       rc = bnxt_qplib_del_sgid(sgid_tbl,
-                                                &sgid_tbl->tbl[ctx->idx],
-                                                true);
+                       rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
                        if (rc) {
                                dev_err(rdev_to_dev(rdev),
                                        "Failed to remove GID: %#x", rc);
@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
 
                kfree(rdev->sqp_ah);
                kfree(rdev->qp1_sqp);
+               rdev->qp1_sqp = NULL;
+               rdev->sqp_ah = NULL;
        }
 
        if (!IS_ERR_OR_NULL(qp->rumem))
@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
                qp->qplib_qp.modify_flags |=
                                CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
                qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
+               qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
        } else if (qp_attr->qp_state == IB_QPS_RTR) {
                qp->qplib_qp.modify_flags |=
                        CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
                qp->qplib_qp.path_mtu =
                        __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
+               qp->qplib_qp.mtu =
+                       ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
        }
 
        if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
 {
        struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
        struct bnxt_re_dev *rdev = qp->rdev;
-       struct bnxt_qplib_qp qplib_qp;
+       struct bnxt_qplib_qp *qplib_qp;
        int rc;
 
-       memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
-       qplib_qp.id = qp->qplib_qp.id;
-       qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
+       qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
+       if (!qplib_qp)
+               return -ENOMEM;
+
+       qplib_qp->id = qp->qplib_qp.id;
+       qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
 
-       rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
+       rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
        if (rc) {
                dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
-               return rc;
+               goto out;
        }
-       qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
-       qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
-       qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
-       qp_attr->pkey_index = qplib_qp.pkey_index;
-       qp_attr->qkey = qplib_qp.qkey;
+       qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
+       qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
+       qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
+       qp_attr->pkey_index = qplib_qp->pkey_index;
+       qp_attr->qkey = qplib_qp->qkey;
        qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
-       rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
-                       qplib_qp.ah.host_sgid_index,
-                       qplib_qp.ah.hop_limit,
-                       qplib_qp.ah.traffic_class);
-       rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
-       rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
-       ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
-       qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
-       qp_attr->timeout = qplib_qp.timeout;
-       qp_attr->retry_cnt = qplib_qp.retry_cnt;
-       qp_attr->rnr_retry = qplib_qp.rnr_retry;
-       qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
-       qp_attr->rq_psn = qplib_qp.rq.psn;
-       qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
-       qp_attr->sq_psn = qplib_qp.sq.psn;
-       qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
-       qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
-                                                       IB_SIGNAL_REQ_WR;
-       qp_attr->dest_qp_num = qplib_qp.dest_qpn;
+       rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
+                       qplib_qp->ah.host_sgid_index,
+                       qplib_qp->ah.hop_limit,
+                       qplib_qp->ah.traffic_class);
+       rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
+       rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
+       ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
+       qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
+       qp_attr->timeout = qplib_qp->timeout;
+       qp_attr->retry_cnt = qplib_qp->retry_cnt;
+       qp_attr->rnr_retry = qplib_qp->rnr_retry;
+       qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+       qp_attr->rq_psn = qplib_qp->rq.psn;
+       qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
+       qp_attr->sq_psn = qplib_qp->sq.psn;
+       qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
+       qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
+                                                        IB_SIGNAL_REQ_WR;
+       qp_attr->dest_qp_num = qplib_qp->dest_qpn;
 
        qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
        qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
        qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
        qp_init_attr->cap = qp_attr->cap;
 
-       return 0;
+out:
+       kfree(qplib_qp);
+       return rc;
 }
 
 /* Routine for sending QP1 packets for RoCE V1 an V2
@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
        switch (wr->opcode) {
        case IB_WR_ATOMIC_CMP_AND_SWP:
                wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
+               wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
                wqe->atomic.swap_data = atomic_wr(wr)->swap;
                break;
        case IB_WR_ATOMIC_FETCH_AND_ADD:
@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
                return rc;
        }
 
-       if (mr->npages && mr->pages) {
+       if (mr->pages) {
                rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
                                                        &mr->qplib_frpl);
                kfree(mr->pages);
index 82d1cbc27aeec80af4ac927227af347eba6f9714..e7450ea92aa9e11ba0d28792f54fcf373e470a66 100644 (file)
@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                }
        }
        set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+       ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+                        &rdev->active_width);
        bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
        bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
 
@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work)
                else if (netif_carrier_ok(rdev->netdev))
                        bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
                                               IB_EVENT_PORT_ACTIVE);
+               ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+                                &rdev->active_width);
                break;
        default:
                break;
        }
+       smp_mb__before_atomic();
+       clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
        kfree(re_work);
 }
 
@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                break;
 
        case NETDEV_UNREGISTER:
+               /* netdev notifier will call NETDEV_UNREGISTER again later since
+                * we are still holding the reference to the netdev
+                */
+               if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
+                       goto exit;
                bnxt_re_ib_unreg(rdev, false);
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                        re_work->vlan_dev = (real_dev == netdev ?
                                             NULL : netdev);
                        INIT_WORK(&re_work->work, bnxt_re_task);
+                       set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
                        queue_work(bnxt_re_wq, &re_work->work);
                }
        }
@@ -1375,6 +1387,22 @@ err_netdev:
 
 static void __exit bnxt_re_mod_exit(void)
 {
+       struct bnxt_re_dev *rdev;
+       LIST_HEAD(to_be_deleted);
+
+       mutex_lock(&bnxt_re_dev_lock);
+       /* Free all adapter allocated resources */
+       if (!list_empty(&bnxt_re_dev_list))
+               list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
+       mutex_unlock(&bnxt_re_dev_lock);
+
+       list_for_each_entry(rdev, &to_be_deleted, list) {
+               dev_info(rdev_to_dev(rdev), "Unregistering Device");
+               bnxt_re_dev_stop(rdev);
+               bnxt_re_ib_unreg(rdev, true);
+               bnxt_re_remove_one(rdev);
+               bnxt_re_dev_unreg(rdev);
+       }
        unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
        if (bnxt_re_wq)
                destroy_workqueue(bnxt_re_wq);
index 391bb7006e8ffb5fe42175e662c3b80c87d74f35..2bdb1562bd2197e850f14bcc353d6ee12c3271c4 100644 (file)
@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
                return -EINVAL;
        }
 
+       if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
+               return -ETIMEDOUT;
+
        /* Cmdq are in 16-byte units, each request can consume 1 or more
         * cmdqe
         */
@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
                /* timed out */
                dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
                        cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
+               set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
                return rc;
        }
 
index 0ed312f17c8de8025feea00fdd47feb063d65aa5..85b16da287f99edfee396fcdaa11ccef1c7c8d3b 100644 (file)
@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw {
        unsigned long           *cmdq_bitmap;
        u32                     bmap_size;
        unsigned long           flags;
-#define FIRMWARE_INITIALIZED_FLAG      1
+#define FIRMWARE_INITIALIZED_FLAG      BIT(0)
 #define FIRMWARE_FIRST_FLAG            BIT(31)
+#define FIRMWARE_TIMED_OUT             BIT(3)
        wait_queue_head_t       waitq;
        int                     (*aeq_handler)(struct bnxt_qplib_rcfw *,
                                               struct creq_func_event *);
index ceaa2fa54d322d2c9368576602a8d47e448527d0..daf7a56e5d7ebe4a7b6dffd93245d5a1655b3805 100644 (file)
@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int stid = GET_TID(rpl);
        struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
 
+       if (!ep) {
+               pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+               goto out;
+       }
        pr_debug("%s ep %p\n", __func__, ep);
        c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
        c4iw_put_ep(&ep->com);
+out:
        return 0;
 }
 
@@ -2594,9 +2599,9 @@ fail:
        c4iw_put_ep(&child_ep->com);
 reject:
        reject_cr(dev, hwtid, skb);
+out:
        if (parent_ep)
                c4iw_put_ep(&parent_ep->com);
-out:
        return 0;
 }
 
@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
                cm_id->provider_data = ep;
                goto out;
        }
-
+       remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
        cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
                        ep->com.local_addr.ss_family);
 fail2:
index b2ed4b9cda6eef62f81b5ac22b71b4cc3a5a2d10..0be42787759fa78c73d0e0d9776209c2362a3ddc 100644 (file)
@@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
 static int thermal_init(struct hfi1_devdata *dd);
 
 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+                                           int msecs);
 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
                                  int msecs);
 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
@@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
        u64 regs[CCE_NUM_INT_CSRS];
        u32 bit;
        int i;
+       irqreturn_t handled = IRQ_NONE;
 
        this_cpu_inc(*dd->int_counter);
 
@@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data)
        for_each_set_bit(bit, (unsigned long *)&regs[0],
                         CCE_NUM_INT_CSRS * 64) {
                is_interrupt(dd, bit);
+               handled = IRQ_HANDLED;
        }
 
-       return IRQ_HANDLED;
+       return handled;
 }
 
 static irqreturn_t sdma_interrupt(int irq, void *data)
@@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
 }
 
-void reset_qsfp(struct hfi1_pportdata *ppd)
+int reset_qsfp(struct hfi1_pportdata *ppd)
 {
        struct hfi1_devdata *dd = ppd->dd;
        u64 mask, qsfp_mask;
@@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
         * for alarms and warnings
         */
        set_qsfp_int_n(ppd, 1);
+
+       /*
+        * After the reset, AOC transmitters are enabled by default. They need
+        * to be turned off to complete the QSFP setup before they can be
+        * enabled again.
+        */
+       return set_qsfp_tx(ppd, 0);
 }
 
 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
@@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
 {
        struct hfi1_devdata *dd = ppd->dd;
        u32 previous_state;
+       int offline_state_ret;
        int ret;
 
        update_lcb_cache(dd);
@@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
                ppd->offline_disabled_reason =
                HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
 
-       /*
-        * Wait for offline transition. It can take a while for
-        * the link to go down.
-        */
-       ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
-       if (ret < 0)
-               return ret;
-
-       /*
-        * Now in charge of LCB - must be after the physical state is
-        * offline.quiet and before host_link_state is changed.
-        */
-       set_host_lcb_access(dd);
-       write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
-
-       /* make sure the logical state is also down */
-       ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
-       if (ret)
-               force_logical_link_state_down(ppd);
-
-       ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+       offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
+       if (offline_state_ret < 0)
+               return offline_state_ret;
 
+       /* Disabling AOC transmitters */
        if (ppd->port_type == PORT_TYPE_QSFP &&
            ppd->qsfp_info.limiting_active &&
            qsfp_mod_present(ppd)) {
@@ -10364,6 +10359,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
                }
        }
 
+       /*
+        * Wait for the offline.Quiet transition if it hasn't happened yet. It
+        * can take a while for the link to go down.
+        */
+       if (offline_state_ret != PLS_OFFLINE_QUIET) {
+               ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /*
+        * Now in charge of LCB - must be after the physical state is
+        * offline.quiet and before host_link_state is changed.
+        */
+       set_host_lcb_access(dd);
+       write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
+
+       /* make sure the logical state is also down */
+       ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
+       if (ret)
+               force_logical_link_state_down(ppd);
+
+       ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+
        /*
         * The LNI has a mandatory wait time after the physical state
         * moves to Offline.Quiet.  The wait time may be different
@@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
                        & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
                /* went down while attempting link up */
                check_lni_states(ppd);
+
+               /* The QSFP doesn't need to be reset on LNI failure */
+               ppd->qsfp_info.reset_needed = 0;
        }
 
        /* the active link width (downgrade) is 0 on link down */
@@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
        return 0;
 }
 
+/*
+ * wait_phys_link_offline_quiet_substates - wait for any offline substate
+ * @ppd: port device
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for any offline physical link
+ * state change to occur.
+ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
+ */
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+                                           int msecs)
+{
+       u32 read_state;
+       unsigned long timeout;
+
+       timeout = jiffies + msecs_to_jiffies(msecs);
+       while (1) {
+               read_state = read_physical_state(ppd->dd);
+               if ((read_state & 0xF0) == PLS_OFFLINE)
+                       break;
+               if (time_after(jiffies, timeout)) {
+                       dd_dev_err(ppd->dd,
+                                  "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
+                                  read_state, msecs);
+                       return -ETIMEDOUT;
+               }
+               usleep_range(1950, 2050); /* sleep 2ms-ish */
+       }
+
+       log_state_transition(ppd, read_state);
+       return read_state;
+}
+
 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
 
index b8345a60a0fbc452215dba3c679daa6785fb4e4a..50b8645d0b876dbf6d58f56d677e3cfcc7984e55 100644 (file)
 #define PLS_OFFLINE_READY_TO_QUIET_LT     0x92
 #define PLS_OFFLINE_REPORT_FAILURE                0x93
 #define PLS_OFFLINE_READY_TO_QUIET_BCC    0x94
+#define PLS_OFFLINE_QUIET_DURATION        0x95
 #define PLS_POLLING                               0x20
 #define PLS_POLLING_QUIET                         0x20
 #define PLS_POLLING_ACTIVE                        0x21
@@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work);
 void handle_link_bounce(struct work_struct *work);
 void handle_start_link(struct work_struct *work);
 void handle_sma_message(struct work_struct *work);
-void reset_qsfp(struct hfi1_pportdata *ppd);
+int reset_qsfp(struct hfi1_pportdata *ppd);
 void qsfp_event(struct work_struct *work);
 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
 int send_idle_sma(struct hfi1_devdata *dd, u64 message);
index d46b171079010d2cf7e42dd7557f0abc2bef1348..1613af1c58d9ddaafca831b1282a6462fdc10b66 100644 (file)
@@ -204,7 +204,10 @@ done_asic:
        return ret;
 }
 
-/* magic character sequence that trails an image */
+/* magic character sequence that begins an image */
+#define IMAGE_START_MAGIC "APO="
+
+/* magic character sequence that might trail an image */
 #define IMAGE_TRAIL_MAGIC "egamiAPO"
 
 /* EPROM file types */
@@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
 {
        void *buffer;
        void *p;
+       u32 length;
        int ret;
 
        buffer = kmalloc(P1_SIZE, GFP_KERNEL);
@@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
                return ret;
        }
 
-       /* scan for image magic that may trail the actual data */
-       p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
-       if (!p) {
+       /* config partition is valid only if it starts with IMAGE_START_MAGIC */
+       if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
                kfree(buffer);
                return -ENOENT;
        }
 
+       /* scan for image magic that may trail the actual data */
+       p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
+       if (p)
+               length = p - buffer;
+       else
+               length = P1_SIZE;
+
        *data = buffer;
-       *size = p - buffer;
+       *size = length;
        return 0;
 }
 
index 2bc89260235a1db1c744931cde7ec07fd3202495..d9a1e989313641b06f32ffdd4677ce8aa7e32802 100644 (file)
@@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
        switch (ret) {
        case 0:
                ret = setup_base_ctxt(fd, uctxt);
-               if (uctxt->subctxt_cnt) {
-                       /*
-                        * Base context is done (successfully or not), notify
-                        * anybody using a sub-context that is waiting for
-                        * this completion.
-                        */
-                       clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
-                       wake_up(&uctxt->wait);
-               }
+               if (ret)
+                       deallocate_ctxt(uctxt);
                break;
        case 1:
                ret = complete_subctxt(fd);
@@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
        /* Now allocate the RcvHdr queue and eager buffers. */
        ret = hfi1_create_rcvhdrq(dd, uctxt);
        if (ret)
-               return ret;
+               goto done;
 
        ret = hfi1_setup_eagerbufs(uctxt);
        if (ret)
-               goto setup_failed;
+               goto done;
 
        /* If sub-contexts are enabled, do the appropriate setup */
        if (uctxt->subctxt_cnt)
                ret = setup_subctxt(uctxt);
        if (ret)
-               goto setup_failed;
+               goto done;
 
        ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
        if (ret)
-               goto setup_failed;
+               goto done;
 
        ret = init_user_ctxt(fd, uctxt);
        if (ret)
-               goto setup_failed;
+               goto done;
 
        user_init(uctxt);
 
@@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
        fd->uctxt = uctxt;
        hfi1_rcd_get(uctxt);
 
-       return 0;
+done:
+       if (uctxt->subctxt_cnt) {
+               /*
+                * On error, set the failed bit so sub-contexts will clean up
+                * correctly.
+                */
+               if (ret)
+                       set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
 
-setup_failed:
-       /* Set the failed bit so sub-context init can do the right thing */
-       set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
-       deallocate_ctxt(uctxt);
+               /*
+                * Base context is done (successfully or not), notify anybody
+                * using a sub-context that is waiting for this completion.
+                */
+               clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
+               wake_up(&uctxt->wait);
+       }
 
        return ret;
 }
index 82447b7cdda1e958d7e7c42c3447ec4a72c1fb4a..09e50fd2a08f07bf7b2d42d3d4b4a1a00644b2ce 100644 (file)
@@ -68,7 +68,7 @@
 /*
  * Code to adjust PCIe capabilities.
  */
-static int tune_pcie_caps(struct hfi1_devdata *);
+static void tune_pcie_caps(struct hfi1_devdata *);
 
 /*
  * Do all the common PCIe setup and initialization.
@@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
  */
 int request_msix(struct hfi1_devdata *dd, u32 msireq)
 {
-       int nvec, ret;
+       int nvec;
 
        nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
                                     PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
@@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
                return nvec;
        }
 
-       ret = tune_pcie_caps(dd);
-       if (ret) {
-               dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
-               pci_free_irq_vectors(dd->pcidev);
-               return ret;
-       }
+       tune_pcie_caps(dd);
 
        /* check for legacy IRQ */
        if (nvec == 1 && !dd->pcidev->msix_enabled)
@@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED;
 module_param_named(aspm, aspm_mode, uint, S_IRUGO);
 MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
 
-static int tune_pcie_caps(struct hfi1_devdata *dd)
+static void tune_pcie_caps(struct hfi1_devdata *dd)
 {
        struct pci_dev *parent;
        u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
         * Turn on extended tags in DevCtl in case the BIOS has turned it off
         * to improve WFR SDMA bandwidth
         */
-       ret = pcie_capability_read_word(dd->pcidev,
-                                       PCI_EXP_DEVCTL, &ectl);
-       if (ret) {
-               dd_dev_err(dd, "Unable to read from PCI config\n");
-               return ret;
-       }
-
-       if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
+       ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+       if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
                dd_dev_info(dd, "Enabling PCIe extended tags\n");
                ectl |= PCI_EXP_DEVCTL_EXT_TAG;
                ret = pcie_capability_write_word(dd->pcidev,
                                                 PCI_EXP_DEVCTL, ectl);
-               if (ret) {
-                       dd_dev_err(dd, "Unable to write to PCI config\n");
-                       return ret;
-               }
+               if (ret)
+                       dd_dev_info(dd, "Unable to write to PCI config\n");
        }
        /* Find out supported and configured values for parent (root) */
        parent = dd->pcidev->bus->self;
@@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
         * The driver cannot perform the tuning if it does not have
         * access to the upstream component.
         */
-       if (!parent)
-               return -EINVAL;
+       if (!parent) {
+               dd_dev_info(dd, "Parent not found\n");
+               return;
+       }
        if (!pci_is_root_bus(parent->bus)) {
                dd_dev_info(dd, "Parent not root\n");
-               return -EINVAL;
+               return;
+       }
+       if (!pci_is_pcie(parent)) {
+               dd_dev_info(dd, "Parent is not PCI Express capable\n");
+               return;
+       }
+       if (!pci_is_pcie(dd->pcidev)) {
+               dd_dev_info(dd, "PCI device is not PCI Express capable\n");
+               return;
        }
-
-       if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
-               return -EINVAL;
        rc_mpss = parent->pcie_mpss;
        rc_mps = ffs(pcie_get_mps(parent)) - 8;
        /* Find out supported and configured values for endpoint (us) */
@@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
                ep_mrrs = max_mrrs;
                pcie_set_readrq(dd->pcidev, ep_mrrs);
        }
-
-       return 0;
 }
 
 /* End of PCIe capability tuning */
index a8af96d2b1b0ae0dc5e98ae08e3b5c9b5faa50e9..d486355880cb0da37e23755e8ed49197fed90fc8 100644 (file)
@@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
         * reuse of stale settings established in our previous pass through.
         */
        if (ppd->qsfp_info.reset_needed) {
-               reset_qsfp(ppd);
+               ret = reset_qsfp(ppd);
+               if (ret)
+                       return ret;
                refresh_qsfp_cache(ppd, &ppd->qsfp_info);
        } else {
                ppd->qsfp_info.reset_needed = 1;
index 9b1566468744ed81a922188f220e7ef64629273b..a65e4cbdce2f6ad336d58a226e84ac35fc7f7eda 100644 (file)
@@ -201,7 +201,6 @@ enum init_completion_state {
        CEQ_CREATED,
        ILQ_CREATED,
        IEQ_CREATED,
-       INET_NOTIFIER,
        IP_ADDR_REGISTERED,
        RDMA_DEV_REGISTERED
 };
index 14f36ba4e5bebf8e35970c9cc5b6030c95ffa213..5230dd3c938c2c506bc590afd8ef054d300ac480 100644 (file)
@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
 }
 
 /**
- * listen_port_in_use - determine if port is in use
- * @port: Listen port number
+ * i40iw_port_in_use - determine if port is in use
+ * @port: port number
+ * @active_side: flag for listener side vs active side
  */
-static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
+static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
 {
        struct i40iw_cm_listener *listen_node;
+       struct i40iw_cm_node *cm_node;
        unsigned long flags;
        bool ret = false;
 
-       spin_lock_irqsave(&cm_core->listen_list_lock, flags);
-       list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
-               if (listen_node->loc_port == port) {
-                       ret = true;
-                       break;
+       if (active_side) {
+               /* search connected node list */
+               spin_lock_irqsave(&cm_core->ht_lock, flags);
+               list_for_each_entry(cm_node, &cm_core->connected_nodes, list) {
+                       if (cm_node->loc_port == port) {
+                               ret = true;
+                               break;
+                       }
+               }
+                       if (!ret)
+                               clear_bit(port, cm_core->active_side_ports);
+               spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+       } else {
+               spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+               list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+                       if (listen_node->loc_port == port) {
+                               ret = true;
+                               break;
+                       }
                }
+               spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
        }
-       spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
        return ret;
 }
 
@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
                spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
 
                if (listener->iwdev) {
-                       if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
+                       if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
                                i40iw_manage_apbvt(listener->iwdev,
                                                   listener->loc_port,
                                                   I40IW_MANAGE_APBVT_DEL);
@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
        if (cm_node->listener) {
                i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
        } else {
-               if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
-                   cm_node->apbvt_set) {
+               if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
                        i40iw_manage_apbvt(cm_node->iwdev,
                                           cm_node->loc_port,
                                           I40IW_MANAGE_APBVT_DEL);
-                       i40iw_get_addr_info(cm_node, &nfo);
-                       if (cm_node->qhash_set) {
-                               i40iw_manage_qhash(cm_node->iwdev,
-                                                  &nfo,
-                                                  I40IW_QHASH_TYPE_TCP_ESTABLISHED,
-                                                  I40IW_QHASH_MANAGE_TYPE_DELETE,
-                                                  NULL,
-                                                  false);
-                               cm_node->qhash_set = 0;
-                       }
+                       cm_node->apbvt_set = 0;
+               }
+               i40iw_get_addr_info(cm_node, &nfo);
+               if (cm_node->qhash_set) {
+                       i40iw_manage_qhash(cm_node->iwdev,
+                                          &nfo,
+                                          I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                          I40IW_QHASH_MANAGE_TYPE_DELETE,
+                                          NULL,
+                                          false);
+                       cm_node->qhash_set = 0;
                }
        }
 
@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
        tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
        if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
                tcp_info->insert_vlan_tag = true;
-               tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
+               tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
+                                                 cm_node->vlan_id);
        }
        if (cm_node->ipv4) {
                tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct sockaddr_in *raddr;
        struct sockaddr_in6 *laddr6;
        struct sockaddr_in6 *raddr6;
-       bool qhash_set = false;
-       int apbvt_set = 0;
-       int err = 0;
-       enum i40iw_status_code status;
+       int ret = 0;
+       unsigned long flags;
 
        ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
        if (!ibqp)
@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        cm_info.user_pri = rt_tos2priority(cm_id->tos);
        i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
                    __func__, cm_id->tos, cm_info.user_pri);
-       if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
-           (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
-                                    raddr6->sin6_addr.in6_u.u6_addr32,
-                                    sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
-               status = i40iw_manage_qhash(iwdev,
-                                           &cm_info,
-                                           I40IW_QHASH_TYPE_TCP_ESTABLISHED,
-                                           I40IW_QHASH_MANAGE_TYPE_ADD,
-                                           NULL,
-                                           true);
-               if (status)
-                       return -EINVAL;
-               qhash_set = true;
-       }
-       status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
-       if (status) {
-               i40iw_manage_qhash(iwdev,
-                                  &cm_info,
-                                  I40IW_QHASH_TYPE_TCP_ESTABLISHED,
-                                  I40IW_QHASH_MANAGE_TYPE_DELETE,
-                                  NULL,
-                                  false);
-               return -EINVAL;
-       }
-
-       apbvt_set = 1;
        cm_id->add_ref(cm_id);
        cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
                                       conn_param->private_data_len,
@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                                       &cm_info);
 
        if (IS_ERR(cm_node)) {
-               err = PTR_ERR(cm_node);
-               goto err_out;
+               ret = PTR_ERR(cm_node);
+               cm_id->rem_ref(cm_id);
+               return ret;
+       }
+
+       if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
+           (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
+                                    raddr6->sin6_addr.in6_u.u6_addr32,
+                                    sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
+               if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                      I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               cm_node->qhash_set = true;
        }
 
+       spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+       if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
+               spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+               if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
+                       ret =  -EINVAL;
+                       goto err;
+               }
+       } else {
+               spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+       }
+
+       cm_node->apbvt_set = true;
        i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
        if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
            !cm_node->ord_size)
                cm_node->ord_size = 1;
 
-       cm_node->apbvt_set = apbvt_set;
-       cm_node->qhash_set = qhash_set;
        iwqp->cm_node = cm_node;
        cm_node->iwqp = iwqp;
        iwqp->cm_id = cm_id;
@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
                cm_node->state = I40IW_CM_STATE_SYN_SENT;
-               err = i40iw_send_syn(cm_node, 0);
-               if (err) {
-                       i40iw_rem_ref_cm_node(cm_node);
-                       goto err_out;
-               }
+               ret = i40iw_send_syn(cm_node, 0);
+               if (ret)
+                       goto err;
        }
 
        i40iw_debug(cm_node->dev,
@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                    cm_node->rem_port,
                    cm_node,
                    cm_node->cm_id);
+
        return 0;
 
-err_out:
+err:
        if (cm_info.ipv4)
                i40iw_debug(&iwdev->sc_dev,
                            I40IW_DEBUG_CM,
@@ -3867,22 +3879,10 @@ err_out:
                            "Api - connect() FAILED: dest addr=%pI6",
                            cm_info.rem_addr);
 
-       if (qhash_set)
-               i40iw_manage_qhash(iwdev,
-                                  &cm_info,
-                                  I40IW_QHASH_TYPE_TCP_ESTABLISHED,
-                                  I40IW_QHASH_MANAGE_TYPE_DELETE,
-                                  NULL,
-                                  false);
-
-       if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
-                                                  cm_info.loc_port))
-               i40iw_manage_apbvt(iwdev,
-                                  cm_info.loc_port,
-                                  I40IW_MANAGE_APBVT_DEL);
+       i40iw_rem_ref_cm_node(cm_node);
        cm_id->rem_ref(cm_id);
        iwdev->cm_core.stats_connect_errs++;
-       return err;
+       return ret;
 }
 
 /**
index 2e52e38ffcf37cf5673868f873536907dddc2cf1..45abef76295b24429accdce4e7a49d91dbc33cce 100644 (file)
@@ -71,6 +71,9 @@
 #define        I40IW_HW_IRD_SETTING_32 32
 #define        I40IW_HW_IRD_SETTING_64 64
 
+#define MAX_PORTS              65536
+#define I40IW_VLAN_PRIO_SHIFT   13
+
 enum ietf_mpa_flags {
        IETF_MPA_FLAGS_MARKERS = 0x80,  /* receive Markers */
        IETF_MPA_FLAGS_CRC = 0x40,      /* receive Markers */
@@ -411,6 +414,8 @@ struct i40iw_cm_core {
        spinlock_t ht_lock; /* manage hash table */
        spinlock_t listen_list_lock; /* listen list */
 
+       unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
+
        u64     stats_nodes_created;
        u64     stats_nodes_destroyed;
        u64     stats_listen_created;
index d1f5345f04f08eb916539162b024f7b59e9b9a94..42ca5346777ddfc4a5d433daf61146ed428fc424 100644 (file)
@@ -48,7 +48,7 @@
  * @wqe: cqp wqe for header
  * @header: header for the cqp wqe
  */
-static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
 {
        wmb();            /* make sure WQE is populated before polarity is set */
        set_64bit_val(wqe, 24, header);
index cc742c3132c6f49afa38282a9f694e2fa68385f7..27590ae21881e91508bc0eb88308e97a7693f094 100644 (file)
@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = {
        .notifier_call = i40iw_net_event
 };
 
-static atomic_t i40iw_notifiers_registered;
-
 /**
  * i40iw_find_i40e_handler - find a handler given a client info
  * @ldev: pointer to a client info
@@ -1376,11 +1374,20 @@ error:
  */
 static void i40iw_register_notifiers(void)
 {
-       if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
-               register_inetaddr_notifier(&i40iw_inetaddr_notifier);
-               register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
-               register_netevent_notifier(&i40iw_net_notifier);
-       }
+       register_inetaddr_notifier(&i40iw_inetaddr_notifier);
+       register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+       register_netevent_notifier(&i40iw_net_notifier);
+}
+
+/**
+ * i40iw_unregister_notifiers - unregister tcp ip notifiers
+ */
+
+static void i40iw_unregister_notifiers(void)
+{
+       unregister_netevent_notifier(&i40iw_net_notifier);
+       unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
+       unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
 }
 
 /**
@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
        u32 i;
        u32 size;
 
+       if (!ldev->msix_count) {
+               i40iw_pr_err("No MSI-X vectors\n");
+               return I40IW_ERR_CONFIG;
+       }
+
        iwdev->msix_count = ldev->msix_count;
 
        size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
                if (!iwdev->reset)
                        i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
                /* fallthrough */
-       case INET_NOTIFIER:
-               if (!atomic_dec_return(&i40iw_notifiers_registered)) {
-                       unregister_netevent_notifier(&i40iw_net_notifier);
-                       unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
-                       unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
-               }
                /* fallthrough */
        case PBLE_CHUNK_MEM:
                i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
 
        status = i40iw_save_msix_info(iwdev, ldev);
        if (status)
-               goto exit;
+               return status;
        iwdev->hw.dev_context = (void *)ldev->pcidev;
        iwdev->hw.hw_addr = ldev->hw_addr;
        status = i40iw_allocate_dma_mem(&iwdev->hw,
@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
                        break;
                iwdev->init_state = PBLE_CHUNK_MEM;
                iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
-               i40iw_register_notifiers();
-               iwdev->init_state = INET_NOTIFIER;
                status = i40iw_add_mac_ip(iwdev);
                if (status)
                        break;
@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void)
        i40iw_client.type = I40E_CLIENT_IWARP;
        spin_lock_init(&i40iw_handler_lock);
        ret = i40e_register_client(&i40iw_client);
+       i40iw_register_notifiers();
+
        return ret;
 }
 
@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void)
  */
 static void __exit i40iw_exit_module(void)
 {
+       i40iw_unregister_notifiers();
        i40e_unregister_client(&i40iw_client);
 }
 
index e217a1259f5703ac1ef1ac4d3ed70acd986e2336..5498ad01c280fde7f476778afba3669c72fcff8c 100644 (file)
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
                                                 struct i40iw_fast_reg_stag_info *info,
                                                 bool post_sq);
 
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
+
 /* HMC/FPM functions */
 enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
                                            u8 hmc_fn_id);
index c2cab20c4bc5d24e258a95bcf7d4f94f8985ced3..59f70676f0e0305ad03567192a340ecb0ef0c7a5 100644 (file)
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
                get_64bit_val(wqe, 24, &offset24);
 
        offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
-       set_64bit_val(wqe, 24, offset24);
 
        set_64bit_val(wqe, 0, buf->mem.pa);
        set_64bit_val(wqe, 8,
                      LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
-       set_64bit_val(wqe, 24, offset24);
+       i40iw_insert_wqe_hdr(wqe, offset24);
 }
 
 /**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
        set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
        set_64bit_val(wqe, 16, header[0]);
 
-       /* Ensure all data is written before writing valid bit */
-       wmb();
-       set_64bit_val(wqe, 24, header[1]);
+       i40iw_insert_wqe_hdr(wqe, header[1]);
 
        i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
        i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
                 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 
-       set_64bit_val(wqe, 24, header);
+       i40iw_insert_wqe_hdr(wqe, header);
 
        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
        i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
            LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
            LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
            LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-       set_64bit_val(wqe, 24, header);
+       i40iw_insert_wqe_hdr(wqe, header);
 
        i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
                        wqe, I40IW_CQP_WQE_SIZE * 8);
index 62f1f45b8737a4716d04d8a340ab5612ef4f1047..e52dbbb4165ec555d94eb5f55f5310c71562b387 100644 (file)
@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
                return NOTIFY_DONE;
 
        iwdev = &hdl->device;
-       if (iwdev->init_state < INET_NOTIFIER)
+       if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
                return NOTIFY_DONE;
 
        netdev = iwdev->ldev->netdev;
@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
                return NOTIFY_DONE;
 
        iwdev = &hdl->device;
-       if (iwdev->init_state < INET_NOTIFIER)
+       if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
                return NOTIFY_DONE;
 
        netdev = iwdev->ldev->netdev;
@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
                if (!iwhdl)
                        return NOTIFY_DONE;
                iwdev = &iwhdl->device;
-               if (iwdev->init_state < INET_NOTIFIER)
+               if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
                        return NOTIFY_DONE;
                p = (__be32 *)neigh->primary_key;
                i40iw_copy_ip_ntohl(local_ipaddr, p);
index 1aa411034a272457ba9a0fda16761d808b38f5f8..62be0a41ad0b2ffeccb0b6e73d9a29621e4944f1 100644 (file)
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
        attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
        attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
        attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+       attr->port_num = 1;
        init_attr->event_handler = iwqp->ibqp.event_handler;
        init_attr->qp_context = iwqp->ibqp.qp_context;
        init_attr->send_cq = iwqp->ibqp.send_cq;
        init_attr->recv_cq = iwqp->ibqp.recv_cq;
        init_attr->srq = iwqp->ibqp.srq;
        init_attr->cap = attr->cap;
+       init_attr->port_num = 1;
        return 0;
 }
 
@@ -1027,7 +1029,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                                iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
                                iwqp->last_aeq = I40IW_AE_RESET_SENT;
                                spin_unlock_irqrestore(&iwqp->lock, flags);
+                               i40iw_cm_disconn(iwqp);
                        }
+               } else {
+                       spin_lock_irqsave(&iwqp->lock, flags);
+                       if (iwqp->cm_id) {
+                               if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+                                       iwqp->cm_id->add_ref(iwqp->cm_id);
+                                       i40iw_schedule_cm_timer(iwqp->cm_node,
+                                                               (struct i40iw_puda_buf *)iwqp,
+                                                                I40IW_TIMER_TYPE_CLOSE, 1, 0);
+                               }
+                       }
+                       spin_unlock_irqrestore(&iwqp->lock, flags);
                }
        }
        return 0;
index ab3c562d5ba7f7fbfa95cb730576a64a2d038e49..552f7bd4ecc38bf17aff093fa037b423c4d0026b 100644 (file)
@@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        }
 
        if (MLX5_CAP_GEN(mdev, tag_matching)) {
-               props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
-               props->xrq_caps.max_num_tags =
+               props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+               props->tm_caps.max_num_tags =
                        (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
-               props->xrq_caps.flags = IB_TM_CAP_RC;
-               props->xrq_caps.max_ops =
+               props->tm_caps.flags = IB_TM_CAP_RC;
+               props->tm_caps.max_ops =
                        1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
-               props->xrq_caps.max_sge = MLX5_TM_MAX_SGE;
+               props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
        }
 
        if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
        if (!dbg)
                return -ENOMEM;
 
+       dev->delay_drop.dbg = dbg;
+
        dbg->dir_debugfs =
                debugfs_create_dir("delay_drop",
                                   dev->mdev->priv.dbg_root);
        if (!dbg->dir_debugfs)
-               return -ENOMEM;
+               goto out_debugfs;
 
        dbg->events_cnt_debugfs =
                debugfs_create_atomic_t("num_timeout_events", 0400,
@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
        if (!dbg->timeout_debugfs)
                goto out_debugfs;
 
-       dev->delay_drop.dbg = dbg;
-
        return 0;
 
 out_debugfs:
@@ -4174,9 +4174,9 @@ err_bfreg:
 err_uar_page:
        mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
 
-err_cnt:
-       mlx5_ib_cleanup_cong_debugfs(dev);
 err_cong:
+       mlx5_ib_cleanup_cong_debugfs(dev);
+err_cnt:
        if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
                mlx5_ib_dealloc_counters(dev);
 
index 914f212e7ef60652a655682a2d668c9cec19b64a..f3dbd75a0a968eade1c316e0fec9179e0012730a 100644 (file)
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
 {
        unsigned long tmp;
        unsigned long m;
-       int i, k;
-       u64 base = 0;
-       int p = 0;
-       int skip;
-       int mask;
-       u64 len;
-       u64 pfn;
+       u64 base = ~0, p = 0;
+       u64 len, pfn;
+       int i = 0;
        struct scatterlist *sg;
        int entry;
        unsigned long page_shift = umem->page_shift;
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
        m = find_first_bit(&tmp, BITS_PER_LONG);
        if (max_page_shift)
                m = min_t(unsigned long, max_page_shift - page_shift, m);
-       skip = 1 << m;
-       mask = skip - 1;
-       i = 0;
+
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                len = sg_dma_len(sg) >> page_shift;
                pfn = sg_dma_address(sg) >> page_shift;
-               for (k = 0; k < len; k++) {
-                       if (!(i & mask)) {
-                               tmp = (unsigned long)pfn;
-                               m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
-                               skip = 1 << m;
-                               mask = skip - 1;
-                               base = pfn;
-                               p = 0;
-                       } else {
-                               if (base + p != pfn) {
-                                       tmp = (unsigned long)p;
-                                       m = find_first_bit(&tmp, BITS_PER_LONG);
-                                       skip = 1 << m;
-                                       mask = skip - 1;
-                                       base = pfn;
-                                       p = 0;
-                               }
-                       }
-                       p++;
-                       i++;
+               if (base + p != pfn) {
+                       /* If either the offset or the new
+                        * base are unaligned update m
+                        */
+                       tmp = (unsigned long)(pfn | p);
+                       if (!IS_ALIGNED(tmp, 1 << m))
+                               m = find_first_bit(&tmp, BITS_PER_LONG);
+
+                       base = pfn;
+                       p = 0;
                }
+
+               p += len;
+               i += len;
        }
 
        if (i) {
index 0e2789d9bb4d0575561aab569e227125cbc38882..37bbc543847a528f73f613df066fdbb4e36484c1 100644 (file)
@@ -47,7 +47,8 @@ enum {
 
 #define MLX5_UMR_ALIGN 2048
 
-static int clean_mr(struct mlx5_ib_mr *mr);
+static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 
@@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
                err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
                                         update_xlt_flags);
+
                if (err) {
-                       mlx5_ib_dereg_mr(&mr->ibmr);
+                       dereg_mr(dev, mr);
                        return ERR_PTR(err);
                }
        }
@@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
                                  &npages, &page_shift, &ncont, &order);
                if (err < 0) {
-                       clean_mr(mr);
+                       clean_mr(dev, mr);
                        return err;
                }
        }
@@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                if (err) {
                        mlx5_ib_warn(dev, "Failed to rereg UMR\n");
                        ib_umem_release(mr->umem);
-                       clean_mr(mr);
+                       clean_mr(dev, mr);
                        return err;
                }
        }
@@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
        }
 }
 
-static int clean_mr(struct mlx5_ib_mr *mr)
+static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
-       struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
        int allocated_from_cache = mr->allocated_from_cache;
        int err;
 
@@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
        return 0;
 }
 
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
-       struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
-       struct mlx5_ib_mr *mr = to_mmr(ibmr);
        int npages = mr->npages;
        struct ib_umem *umem = mr->umem;
 
@@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
        }
 #endif
 
-       clean_mr(mr);
+       clean_mr(dev, mr);
 
        if (umem) {
                ib_umem_release(umem);
@@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
        return 0;
 }
 
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+       struct mlx5_ib_mr *mr = to_mmr(ibmr);
+
+       return dereg_mr(dev, mr);
+}
+
 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
                               enum ib_mr_type mr_type,
                               u32 max_num_sg)
index f0dc5f4aa177e26d622574ec638368fcb7d988cf..442b9bdc0f03bc70d955700e51703b14676b1661 100644 (file)
@@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                                            mr->ibmr.iova);
                        set_wqe_32bit_value(wqe->wqe_words,
                                            NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
-                                           mr->ibmr.length);
+                                           lower_32_bits(mr->ibmr.length));
                        set_wqe_32bit_value(wqe->wqe_words,
                                            NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
                        set_wqe_32bit_value(wqe->wqe_words,
@@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                                            mr->npages * 8);
 
                        nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, "
-                                 "length: %d, rkey: %0x, pgl_paddr: %llx, "
+                                 "length: %lld, rkey: %0x, pgl_paddr: %llx, "
                                  "page_list_len: %u, wqe_misc: %x\n",
                                  (unsigned long long) mr->ibmr.iova,
                                  mr->ibmr.length,
index dcb5942f9fb5a830f37e1757dd64cc0d836523b9..65b166cc743748fde5f89153dd96ed1950abb762 100644 (file)
@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status)
                case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
                        err_num = -EAGAIN;
                        break;
+               default:
+                       err_num = -EFAULT;
                }
+               break;
        default:
                err_num = -EFAULT;
        }
index b2bb42e2805ddfb220b7033620d988e0877f3d6a..254083b524bd90e1557f69b91949b9dca6806223 100644 (file)
@@ -387,7 +387,7 @@ struct qedr_qp {
                u8 wqe_size;
 
                u8 smac[ETH_ALEN];
-               u16 vlan_id;
+               u16 vlan;
                int rc;
        } *rqe_wr_id;
 
index 4689e802b33264bb8b66bacf89cd77ac294d4372..ad8965397cf76a3ae8a1e5d1d36df2f2a33a6676 100644 (file)
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
 
        qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
                -EINVAL : 0;
-       qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+       qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
        /* note: length stands for data length i.e. GRH is excluded */
        qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
                data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        struct qedr_cq *cq = get_qedr_cq(ibcq);
        struct qedr_qp *qp = dev->gsi_qp;
        unsigned long flags;
+       u16 vlan_id;
        int i = 0;
 
        spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
                wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
                ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
                wc[i].wc_flags |= IB_WC_WITH_SMAC;
-               if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+
+               vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
+                         VLAN_VID_MASK;
+               if (vlan_id) {
                        wc[i].wc_flags |= IB_WC_WITH_VLAN;
-                       wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+                       wc[i].vlan_id = vlan_id;
+                       wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
+                                   VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
                }
 
                qedr_inc_sw_cons(&qp->rq);
index 663a0c301c4382d942fc594d1529b53a0936811b..984aa3484928d691db083c2c694aa90632d803d7 100644 (file)
@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib(
        return (enum ib_wc_status)status;
 }
 
-static inline int pvrdma_wc_opcode_to_ib(int opcode)
-{
-       return opcode;
+static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
+{
+       switch (opcode) {
+       case PVRDMA_WC_SEND:
+               return IB_WC_SEND;
+       case PVRDMA_WC_RDMA_WRITE:
+               return IB_WC_RDMA_WRITE;
+       case PVRDMA_WC_RDMA_READ:
+               return IB_WC_RDMA_READ;
+       case PVRDMA_WC_COMP_SWAP:
+               return IB_WC_COMP_SWAP;
+       case PVRDMA_WC_FETCH_ADD:
+               return IB_WC_FETCH_ADD;
+       case PVRDMA_WC_LOCAL_INV:
+               return IB_WC_LOCAL_INV;
+       case PVRDMA_WC_FAST_REG_MR:
+               return IB_WC_REG_MR;
+       case PVRDMA_WC_MASKED_COMP_SWAP:
+               return IB_WC_MASKED_COMP_SWAP;
+       case PVRDMA_WC_MASKED_FETCH_ADD:
+               return IB_WC_MASKED_FETCH_ADD;
+       case PVRDMA_WC_RECV:
+               return IB_WC_RECV;
+       case PVRDMA_WC_RECV_RDMA_WITH_IMM:
+               return IB_WC_RECV_RDMA_WITH_IMM;
+       default:
+               return IB_WC_SEND;
+       }
 }
 
 static inline int pvrdma_wc_flags_to_ib(int flags)
index 14b62f7472b4f6632f344d7d4ac2eb81518cf3c8..7774654c2ccbce600f99e751c7b37e15e7f1dd8a 100644 (file)
@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
            wc->status != IB_WC_WR_FLUSH_ERR) {
                struct ipoib_neigh *neigh;
 
-               if (wc->status != IB_WC_RNR_RETRY_EXC_ERR)
-                       ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
-                                  wc->status, wr_id, wc->vendor_err);
+               /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
+                * so don't make waves.
+                */
+               if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
+                   wc->status == IB_WC_RETRY_EXC_ERR)
+                       ipoib_dbg(priv,
+                                 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+                                  __func__, wc->status, wr_id, wc->vendor_err);
                else
-                       ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
-                                 wc->status, wr_id, wc->vendor_err);
+                       ipoib_warn(priv,
+                                   "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+                                  __func__, wc->status, wr_id, wc->vendor_err);
 
                spin_lock_irqsave(&priv->lock, flags);
                neigh = tx->neigh;
index 2e075377242e2baccc54cda5859d5b3ba7e768d0..6cd61638b44142029b85d0f057b8d04c6b2138c6 100644 (file)
@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
                 */
                priv->dev->broadcast[8] = priv->pkey >> 8;
                priv->dev->broadcast[9] = priv->pkey & 0xff;
-
-               /*
-                * Update the broadcast address in the priv->broadcast object,
-                * in case it already exists, otherwise no one will do that.
-                */
-               if (priv->broadcast) {
-                       spin_lock_irq(&priv->lock);
-                       memcpy(priv->broadcast->mcmember.mgid.raw,
-                              priv->dev->broadcast + 4,
-                       sizeof(union ib_gid));
-                       spin_unlock_irq(&priv->lock);
-               }
-
                return 0;
        }
 
index bac95b509a9b2edc23af7b5e8505fc03acbb80c0..dcc77014018db037cd24540dfe8ddf06d9b70fef 100644 (file)
@@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format,
 {
        struct ipoib_dev_priv *priv;
        struct ib_port_attr attr;
+       struct rdma_netdev *rn;
        int result = -ENOMEM;
 
        priv = ipoib_intf_alloc(hca, port, format);
@@ -2279,7 +2280,8 @@ register_failed:
        ipoib_dev_cleanup(priv->dev);
 
 device_init_failed:
-       free_netdev(priv->dev);
+       rn = netdev_priv(priv->dev);
+       rn->free_rdma_netdev(priv->dev);
        kfree(priv);
 
 alloc_mem_failed:
@@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
                return;
 
        list_for_each_entry_safe(priv, tmp, dev_list, list) {
-               struct rdma_netdev *rn = netdev_priv(priv->dev);
+               struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
 
                ib_unregister_event_handler(&priv->event_handler);
                flush_workqueue(ipoib_workqueue);
@@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
                unregister_netdev(priv->dev);
                mutex_unlock(&priv->sysfs_mutex);
 
-               rn->free_rdma_netdev(priv->dev);
+               parent_rn->free_rdma_netdev(priv->dev);
+
+               list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
+                       struct rdma_netdev *child_rn;
 
-               list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
+                       child_rn = netdev_priv(cpriv->dev);
+                       child_rn->free_rdma_netdev(cpriv->dev);
                        kfree(cpriv);
+               }
 
                kfree(priv);
        }
index 9927cd6b7082b1dc24cbfa38431e6156646bf15f..55a9b71ed05a7ff8ff0ac9f7eb84af1d4f6476a0 100644 (file)
@@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
                return restart_syscall();
        }
 
-       priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
-       if (!priv) {
+       if (!down_write_trylock(&ppriv->vlan_rwsem)) {
                rtnl_unlock();
                mutex_unlock(&ppriv->sysfs_mutex);
-               return -ENOMEM;
+               return restart_syscall();
        }
 
-       down_write(&ppriv->vlan_rwsem);
+       priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+       if (!priv) {
+               result = -ENOMEM;
+               goto out;
+       }
 
        /*
         * First ensure this isn't a duplicate. We check the parent device and
@@ -175,8 +178,11 @@ out:
        rtnl_unlock();
        mutex_unlock(&ppriv->sysfs_mutex);
 
-       if (result) {
-               free_netdev(priv->dev);
+       if (result && priv) {
+               struct rdma_netdev *rn;
+
+               rn = netdev_priv(priv->dev);
+               rn->free_rdma_netdev(priv->dev);
                kfree(priv);
        }
 
@@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
                return restart_syscall();
        }
 
-       down_write(&ppriv->vlan_rwsem);
+       if (!down_write_trylock(&ppriv->vlan_rwsem)) {
+               rtnl_unlock();
+               mutex_unlock(&ppriv->sysfs_mutex);
+               return restart_syscall();
+       }
+
        list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
                if (priv->pkey == pkey &&
                    priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
        mutex_unlock(&ppriv->sysfs_mutex);
 
        if (dev) {
-               free_netdev(dev);
+               struct rdma_netdev *rn;
+
+               rn = netdev_priv(dev);
+               rn->free_rdma_netdev(priv->dev);
                kfree(priv);
                return 0;
        }
index 9c3e9ab53a415710b0e8bef65647efc4716dc239..322209d5ff5829b6dfd5f4de54f4d11fe141c63d 100644 (file)
@@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
 {
        int i;
 
-       iser_err("page vec npages %d data length %d\n",
+       iser_err("page vec npages %d data length %lld\n",
                 page_vec->npages, page_vec->fake_mr.length);
        for (i = 0; i < page_vec->npages; i++)
                iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
index 8f2042432c85151b25fab1e885fe321e1563d641..66a46c84e28f543bc27f91479d5da669984e0d89 100644 (file)
@@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
 EXPORT_SYMBOL_GPL(input_ff_erase);
 
 /*
- * flush_effects - erase all effects owned by a file handle
+ * input_ff_flush - erase all effects owned by a file handle
+ * @dev: input device to erase effect from
+ * @file: purported owner of the effects
+ *
+ * This function erases all force-feedback effects associated with
+ * the given owner from specified device. Note that @file may be %NULL,
+ * in which case all effects will be erased.
  */
-static int flush_effects(struct input_dev *dev, struct file *file)
+int input_ff_flush(struct input_dev *dev, struct file *file)
 {
        struct ff_device *ff = dev->ff;
        int i;
@@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(input_ff_flush);
 
 /**
  * input_ff_event() - generic handler for force-feedback events
@@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
        mutex_init(&ff->mutex);
 
        dev->ff = ff;
-       dev->flush = flush_effects;
+       dev->flush = input_ff_flush;
        dev->event = input_ff_event;
        __set_bit(EV_FF, dev->evbit);
 
index d268fdc23c6419e2540cc941edb66231be3a900d..762bfb9487dc961cf1c7d12a18a0d10dd3386b4c 100644 (file)
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev,
 }
 EXPORT_SYMBOL(input_set_keycode);
 
+bool input_match_device_id(const struct input_dev *dev,
+                          const struct input_device_id *id)
+{
+       if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
+               if (id->bustype != dev->id.bustype)
+                       return false;
+
+       if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
+               if (id->vendor != dev->id.vendor)
+                       return false;
+
+       if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
+               if (id->product != dev->id.product)
+                       return false;
+
+       if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
+               if (id->version != dev->id.version)
+                       return false;
+
+       if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
+           !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
+           !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
+           !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
+           !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
+           !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
+           !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
+           !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
+           !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
+           !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(input_match_device_id);
+
 static const struct input_device_id *input_match_device(struct input_handler *handler,
                                                        struct input_dev *dev)
 {
        const struct input_device_id *id;
 
        for (id = handler->id_table; id->flags || id->driver_info; id++) {
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
-                       if (id->bustype != dev->id.bustype)
-                               continue;
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
-                       if (id->vendor != dev->id.vendor)
-                               continue;
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
-                       if (id->product != dev->id.product)
-                               continue;
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
-                       if (id->version != dev->id.version)
-                               continue;
-
-               if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
-                       continue;
-
-               if (!handler->match || handler->match(handler, dev))
+               if (input_match_device_id(dev, id) &&
+                   (!handler->match || handler->match(handler, dev))) {
                        return id;
+               }
        }
 
        return NULL;
index 29d677c714d25fa2fca5d41713f32a5a9fe3e64f..7b29a894403981c233120fc282f62d93f3d5f71f 100644 (file)
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev)
                input_close_device(handle);
 }
 
+/*
+ * These codes are copied from from hid-ids.h, unfortunately there is no common
+ * usb_ids/bt_ids.h header.
+ */
+#define USB_VENDOR_ID_SONY                     0x054c
+#define USB_DEVICE_ID_SONY_PS3_CONTROLLER              0x0268
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER              0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2            0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE       0x0ba0
+
+#define USB_VENDOR_ID_THQ                      0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW                    0xcb17
+
+#define ACCEL_DEV(vnd, prd)                                            \
+       {                                                               \
+               .flags = INPUT_DEVICE_ID_MATCH_VENDOR |                 \
+                               INPUT_DEVICE_ID_MATCH_PRODUCT |         \
+                               INPUT_DEVICE_ID_MATCH_PROPBIT,          \
+               .vendor = (vnd),                                        \
+               .product = (prd),                                       \
+               .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) },      \
+       }
+
+static const struct input_device_id joydev_blacklist[] = {
+       /* Avoid touchpads and touchscreens */
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                               INPUT_DEVICE_ID_MATCH_KEYBIT,
+               .evbit = { BIT_MASK(EV_KEY) },
+               .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+       },
+       /* Avoid tablets, digitisers and similar devices */
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                               INPUT_DEVICE_ID_MATCH_KEYBIT,
+               .evbit = { BIT_MASK(EV_KEY) },
+               .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
+       },
+       /* Disable accelerometers on composite devices */
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+       ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
+       { /* sentinel */ }
+};
+
+static bool joydev_dev_is_blacklisted(struct input_dev *dev)
+{
+       const struct input_device_id *id;
+
+       for (id = joydev_blacklist; id->flags; id++) {
+               if (input_match_device_id(dev, id)) {
+                       dev_dbg(&dev->dev,
+                               "joydev: blacklisting '%s'\n", dev->name);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
 {
        DECLARE_BITMAP(jd_scratch, KEY_CNT);
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
 
 static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
 {
-       /* Avoid touchpads and touchscreens */
-       if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit))
-               return false;
-
-       /* Avoid tablets, digitisers and similar devices */
-       if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
+       /* Disable blacklisted devices */
+       if (joydev_dev_is_blacklisted(dev))
                return false;
 
        /* Avoid absolute mice */
index e37e335e406ffbb13d4fa323705373fa2fa58d16..6da607d3b81172a38f95c672e37976bedc8d097d 100644 (file)
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
 static int tca8418_configure(struct tca8418_keypad *keypad_data,
                             u32 rows, u32 cols)
 {
-       int reg, error;
-
-       /* Write config register, if this fails assume device not present */
-       error = tca8418_write_byte(keypad_data, REG_CFG,
-                               CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
-       if (error < 0)
-               return -ENODEV;
-
+       int reg, error = 0;
 
        /* Assemble a mask for row and column registers */
        reg  =  ~(~0 << rows);
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data,
        error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
        error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
 
+       if (error)
+               return error;
+
+       error = tca8418_write_byte(keypad_data, REG_CFG,
+                               CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+
        return error;
 }
 
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
        struct input_dev *input;
        u32 rows = 0, cols = 0;
        int error, row_shift, max_keys;
+       u8 reg;
 
        /* Check i2c driver capabilities */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client,
        keypad_data->client = client;
        keypad_data->row_shift = row_shift;
 
-       /* Initialize the chip or fail if chip isn't present */
-       error = tca8418_configure(keypad_data, rows, cols);
-       if (error < 0)
-               return error;
+       /* Read key lock register, if this fails assume device not present */
+       error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, &reg);
+       if (error)
+               return -ENODEV;
 
        /* Configure input device */
        input = devm_input_allocate_device(dev);
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
                return error;
        }
 
+       /* Initialize the chip */
+       error = tca8418_configure(keypad_data, rows, cols);
+       if (error < 0)
+               return error;
+
        error = input_register_device(input);
        if (error) {
                dev_err(dev, "Unable to register input device, error: %d\n",
index 6cee5adc3b5cfa1ec322db10c49d84f1bcbc2ae3..debeeaeb88127baae0a65dba44875084af034945 100644 (file)
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = {
        },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(platform, axp_pek_id_match);
 
 static struct platform_driver axp20x_pek_driver = {
        .probe          = axp20x_pek_probe,
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver);
 MODULE_DESCRIPTION("axp20x Power Button");
 MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:axp20x-pek");
index 6bf82ea8c918adcbbba700100ead7d270a60695c..ae473123583bb22bedbb33426f11eb8f802ea368 100644 (file)
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
                return NULL;
        }
 
-       while (buflen > 0) {
+       while (buflen >= sizeof(*union_desc)) {
                union_desc = (struct usb_cdc_union_desc *)buf;
 
+               if (union_desc->bLength > buflen) {
+                       dev_err(&intf->dev, "Too large descriptor\n");
+                       return NULL;
+               }
+
                if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
                    union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
                        dev_dbg(&intf->dev, "Found union header\n");
-                       return union_desc;
+
+                       if (union_desc->bLength >= sizeof(*union_desc))
+                               return union_desc;
+
+                       dev_err(&intf->dev,
+                               "Union descriptor to short (%d vs %zd\n)",
+                               union_desc->bLength, sizeof(*union_desc));
+                       return NULL;
                }
 
                buflen -= union_desc->bLength;
index 022be0e22eba97b10b95e653f48cee44f1d4ba36..443151de90c6b506f6847b656a68d84577576ad1 100644 (file)
@@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev,
                                        uinput_request_alloc_id(udev, request));
 }
 
-static void uinput_request_done(struct uinput_device *udev,
-                               struct uinput_request *request)
+static void uinput_request_release_slot(struct uinput_device *udev,
+                                       unsigned int id)
 {
        /* Mark slot as available */
-       udev->requests[request->id] = NULL;
-       wake_up(&udev->requests_waitq);
+       spin_lock(&udev->requests_lock);
+       udev->requests[id] = NULL;
+       spin_unlock(&udev->requests_lock);
 
-       complete(&request->done);
+       wake_up(&udev->requests_waitq);
 }
 
 static int uinput_request_send(struct uinput_device *udev,
@@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev,
 static int uinput_request_submit(struct uinput_device *udev,
                                 struct uinput_request *request)
 {
-       int error;
+       int retval;
 
-       error = uinput_request_reserve_slot(udev, request);
-       if (error)
-               return error;
+       retval = uinput_request_reserve_slot(udev, request);
+       if (retval)
+               return retval;
 
-       error = uinput_request_send(udev, request);
-       if (error) {
-               uinput_request_done(udev, request);
-               return error;
-       }
+       retval = uinput_request_send(udev, request);
+       if (retval)
+               goto out;
 
        wait_for_completion(&request->done);
-       return request->retval;
+       retval = request->retval;
+
+ out:
+       uinput_request_release_slot(udev, request->id);
+       return retval;
 }
 
 /*
@@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev)
                request = udev->requests[i];
                if (request) {
                        request->retval = -ENODEV;
-                       uinput_request_done(udev, request);
+                       complete(&request->done);
                }
        }
 
@@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id)
        return uinput_request_submit(udev, &request);
 }
 
+static int uinput_dev_flush(struct input_dev *dev, struct file *file)
+{
+       /*
+        * If we are called with file == NULL that means we are tearing
+        * down the device, and therefore we can not handle FF erase
+        * requests: either we are handling UI_DEV_DESTROY (and holding
+        * the udev->mutex), or the file descriptor is closed and there is
+        * nobody on the other side anymore.
+        */
+       return file ? input_ff_flush(dev, file) : 0;
+}
+
 static void uinput_destroy_device(struct uinput_device *udev)
 {
        const char *name, *phys;
@@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev)
                dev->ff->playback = uinput_dev_playback;
                dev->ff->set_gain = uinput_dev_set_gain;
                dev->ff->set_autocenter = uinput_dev_set_autocenter;
+               /*
+                * The standard input_ff_flush() implementation does
+                * not quite work for uinput as we can't reasonably
+                * handle FF requests during device teardown.
+                */
+               dev->flush = uinput_dev_flush;
        }
 
        error = input_register_device(udev->dev);
@@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
                        }
 
                        req->retval = ff_up.retval;
-                       uinput_request_done(udev, req);
+                       complete(&req->done);
                        goto out;
 
                case UI_END_FF_ERASE:
@@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
                        }
 
                        req->retval = ff_erase.retval;
-                       uinput_request_done(udev, req);
+                       complete(&req->done);
                        goto out;
        }
 
index 0e761d079dc4669720d2d4899f13ecbbfdb22b52..6d6b092e2da901969b8e3a2cfe65f2d0f2651098 100644 (file)
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0605", 0 },
        { "ELAN0609", 0 },
        { "ELAN060B", 0 },
+       { "ELAN0611", 0 },
        { "ELAN1000", 0 },
        { }
 };
index 15b1330606c1c3fcfc28cf109f89987d0425f75a..e19eb60b3d2f5adcdf0182673680de25ddd15483 100644 (file)
@@ -598,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client,
        }
 
        /* Wait for F/W to update one page ROM data. */
-       msleep(20);
+       msleep(35);
 
        error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
        if (error) {
index 5af0b7d200bc23ff0f2287d6e0b69a631fb8bb22..ee5466a374bf0f370000cf002d94bf2514d02318 100644 (file)
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
                .sensor_pdata = {
                        .sensor_type = rmi_sensor_touchpad,
                        .axis_align.flip_y = true,
-                       /* to prevent cursors jumps: */
-                       .kernel_tracking = true,
+                       .kernel_tracking = false,
                        .topbuttonpad = topbuttonpad,
                },
                .f30_data = {
index 34dfee555b201b0577e6e5fc0c58c63e7cc8cac7..82e0f0d43d55271c92c774ba325b1bc40099f83e 100644 (file)
@@ -232,9 +232,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
        unsigned int trackstick_button = BTN_LEFT;
        bool button_mapped = false;
        int i;
+       int button_count = min_t(u8, f30->gpioled_count, TRACKSTICK_RANGE_END);
 
        f30->gpioled_key_map = devm_kcalloc(&fn->dev,
-                                           f30->gpioled_count,
+                                           button_count,
                                            sizeof(f30->gpioled_key_map[0]),
                                            GFP_KERNEL);
        if (!f30->gpioled_key_map) {
@@ -242,7 +243,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
                return -ENOMEM;
        }
 
-       for (i = 0; i < f30->gpioled_count; i++) {
+       for (i = 0; i < button_count; i++) {
                if (!rmi_f30_is_valid_button(i, f30->ctrl))
                        continue;
 
index b796e891e2eed22e1ffdac3f3098fdf7afbe4ac6..4b8b9d7aa75e2785991fc5838bf55728c715e6fe 100644 (file)
@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
 
        /* Walk  this report and pull out the info we need */
        while (i < length) {
-               prefix = report[i];
-
-               /* Skip over prefix */
-               i++;
+               prefix = report[i++];
 
                /* Determine data size and save the data in the proper variable */
-               size = PREF_SIZE(prefix);
+               size = (1U << PREF_SIZE(prefix)) >> 1;
+               if (i + size > length) {
+                       dev_err(ddev,
+                               "Not enough data (need %d, have %d)\n",
+                               i + size, length);
+                       break;
+               }
+
                switch (size) {
                case 1:
                        data = report[i];
@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
                case 2:
                        data16 = get_unaligned_le16(&report[i]);
                        break;
-               case 3:
-                       size = 4;
+               case 4:
                        data32 = get_unaligned_le32(&report[i]);
                        break;
                }
index 32d2762448aa24168c9dd0ad38c8c9eb1ec13ff3..b3bbad7d228296118f35a2d4bff7c295b5e9839c 100644 (file)
@@ -72,6 +72,9 @@ struct goodix_ts_data {
 #define GOODIX_REG_CONFIG_DATA         0x8047
 #define GOODIX_REG_ID                  0x8140
 
+#define GOODIX_BUFFER_STATUS_READY     BIT(7)
+#define GOODIX_BUFFER_STATUS_TIMEOUT   20
+
 #define RESOLUTION_LOC         1
 #define MAX_CONTACTS_LOC       5
 #define TRIGGER_LOC            6
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id)
 
 static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
 {
+       unsigned long max_timeout;
        int touch_num;
        int error;
 
-       error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data,
-                               GOODIX_CONTACT_SIZE + 1);
-       if (error) {
-               dev_err(&ts->client->dev, "I2C transfer error: %d\n", error);
-               return error;
-       }
+       /*
+        * The 'buffer status' bit, which indicates that the data is valid, is
+        * not set as soon as the interrupt is raised, but slightly after.
+        * This takes around 10 ms to happen, so we poll for 20 ms.
+        */
+       max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
+       do {
+               error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
+                                       data, GOODIX_CONTACT_SIZE + 1);
+               if (error) {
+                       dev_err(&ts->client->dev, "I2C transfer error: %d\n",
+                                       error);
+                       return error;
+               }
 
-       if (!(data[0] & 0x80))
-               return -EAGAIN;
+               if (data[0] & GOODIX_BUFFER_STATUS_READY) {
+                       touch_num = data[0] & 0x0f;
+                       if (touch_num > ts->max_touch_num)
+                               return -EPROTO;
+
+                       if (touch_num > 1) {
+                               data += 1 + GOODIX_CONTACT_SIZE;
+                               error = goodix_i2c_read(ts->client,
+                                               GOODIX_READ_COOR_ADDR +
+                                                       1 + GOODIX_CONTACT_SIZE,
+                                               data,
+                                               GOODIX_CONTACT_SIZE *
+                                                       (touch_num - 1));
+                               if (error)
+                                       return error;
+                       }
+
+                       return touch_num;
+               }
 
-       touch_num = data[0] & 0x0f;
-       if (touch_num > ts->max_touch_num)
-               return -EPROTO;
-
-       if (touch_num > 1) {
-               data += 1 + GOODIX_CONTACT_SIZE;
-               error = goodix_i2c_read(ts->client,
-                                       GOODIX_READ_COOR_ADDR +
-                                               1 + GOODIX_CONTACT_SIZE,
-                                       data,
-                                       GOODIX_CONTACT_SIZE * (touch_num - 1));
-               if (error)
-                       return error;
-       }
+               usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
+       } while (time_before(jiffies, max_timeout));
 
-       return touch_num;
+       /*
+        * The Goodix panel will send spurious interrupts after a
+        * 'finger up' event, which will always cause a timeout.
+        */
+       return 0;
 }
 
 static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
index 157fdb4bb2e8ba931697d18281afb3887beceaca..8c6c6178ec12fd26ed9832311419da243ed4c154 100644 (file)
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
        sdata->input->open = stmfts_input_open;
        sdata->input->close = stmfts_input_close;
 
+       input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
+       input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
        touchscreen_parse_properties(sdata->input, true, &sdata->prop);
 
-       input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
-                                               sdata->prop.max_x, 0, 0);
-       input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
-                                               sdata->prop.max_y, 0, 0);
        input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
        input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
        input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
index 7953381d939ab49f26f460bf7bac808600cb2549..f1043ae71dccb5c4a10126279ff0b1a3c6142089 100644 (file)
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev)
                break;
        case 5:
                config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 |
-                               ts_dev->bit_xn | ts_dev->bit_yp;
+                               STEPCONFIG_XNP | STEPCONFIG_YPN;
                break;
        case 8:
                config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp);
index 49bd2ab8c5075859bca8e2e90519e7da6cd6f48f..f3a21343e636a8f26066f206129d45924cdc9588 100644 (file)
@@ -278,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG
 config IPMMU_VMSA
        bool "Renesas VMSA-compatible IPMMU"
        depends on ARM || IOMMU_DMA
-       depends on ARCH_RENESAS || COMPILE_TEST
+       depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU
@@ -373,7 +373,8 @@ config MTK_IOMMU_V1
 config QCOM_IOMMU
        # Note: iommu drivers cannot (yet?) be built as modules
        bool "Qualcomm IOMMU Support"
-       depends on ARCH_QCOM || COMPILE_TEST
+       depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
+       depends on HAS_DMA
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU
index 51f8215877f552ed168424107078ac786febaf3b..8e8874d23717ab5120c87f180146562be3e03ef6 100644 (file)
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
 
 int __init amd_iommu_init_dma_ops(void)
 {
-       swiotlb        = iommu_pass_through ? 1 : 0;
+       swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
        iommu_detected = 1;
 
        /*
         * In case we don't initialize SWIOTLB (actually the common case
-        * when AMD IOMMU is enabled), make sure there are global
-        * dma_ops set as a fall-back for devices not handled by this
-        * driver (for example non-PCI devices).
+        * when AMD IOMMU is enabled and SME is not active), make sure there
+        * are global dma_ops set as a fall-back for devices not handled by
+        * this driver (for example non-PCI devices). When SME is active,
+        * make sure that swiotlb variable remains set so the global dma_ops
+        * continue to be SWIOTLB.
         */
        if (!swiotlb)
                dma_ops = &nommu_dma_ops;
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
        mutex_unlock(&domain->api_lock);
 
        domain_flush_tlb_pde(domain);
+       domain_flush_complete(domain);
 
        return unmap_size;
 }
index 382de42b835939e167604053060e9944e19131ba..6fe2d03460730cabe0aa78060114844ad1a2a3cf 100644 (file)
@@ -874,7 +874,7 @@ static bool copy_device_table(void)
                hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
                entry = (((u64) hi) << 32) + lo;
                if (last_entry && last_entry != entry) {
-                       pr_err("IOMMU:%d should use the same dev table as others!/n",
+                       pr_err("IOMMU:%d should use the same dev table as others!\n",
                                iommu->index);
                        return false;
                }
@@ -882,7 +882,7 @@ static bool copy_device_table(void)
 
                old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
                if (old_devtb_size != dev_table_size) {
-                       pr_err("The device table size of IOMMU:%d is not expected!/n",
+                       pr_err("The device table size of IOMMU:%d is not expected!\n",
                                iommu->index);
                        return false;
                }
@@ -890,7 +890,7 @@ static bool copy_device_table(void)
 
        old_devtb_phys = entry & PAGE_MASK;
        if (old_devtb_phys >= 0x100000000ULL) {
-               pr_err("The address of old device table is above 4G, not trustworthy!/n");
+               pr_err("The address of old device table is above 4G, not trustworthy!\n");
                return false;
        }
        old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -901,7 +901,7 @@ static bool copy_device_table(void)
        old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
                                get_order(dev_table_size));
        if (old_dev_tbl_cpy == NULL) {
-               pr_err("Failed to allocate memory for copying old device table!/n");
+               pr_err("Failed to allocate memory for copying old device table!\n");
                return false;
        }
 
index ca5ebaeafd6acee8d11033b34ba912eeebc04689..57c920c1372d09f927a7dcdeadc25375bf4164a8 100644 (file)
@@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
 #define        dmar_parse_one_rhsa             dmar_res_noop
 #endif
 
-static void __init
+static void
 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
 {
        struct acpi_dmar_hardware_unit *drhd;
index f596fcc3289830f5a74ec93d438aa626bebf42fe..25c2c75f5332efe3e98d7e1c290db71d515364f5 100644 (file)
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
                                pm_runtime_force_resume)
 };
 
-static const struct of_device_id sysmmu_of_match[] __initconst = {
+static const struct of_device_id sysmmu_of_match[] = {
        { .compatible   = "samsung,exynos-sysmmu", },
        { },
 };
index d665d0dc16e8f787813a6106d15bd83afacc4f34..6961fc393f0b25828f5981fad35ea744c7768b41 100644 (file)
@@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
 static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
                               struct io_pgtable_cfg *cfg)
 {
-       if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+       if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
                return;
 
        dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
index bd515be5b380e32ac224b55d5b9a2dfff26a15ef..16d33ac19db0f77837c30f44de044a3a46b9c558 100644 (file)
@@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
        int ret;
 
        spin_lock_irqsave(&dom->pgtlock, flags);
-       ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
+       ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
+                           size, prot);
        spin_unlock_irqrestore(&dom->pgtlock, flags);
 
        return ret;
index e60e3dba85a0d7c0457d9bdf650de2c350a5d11b..50947ebb6d1700e3f81acb5806c1b1a8b183c145 100644 (file)
@@ -157,10 +157,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
 
        err = of_iommu_xlate(info->dev, &iommu_spec);
        of_node_put(iommu_spec.np);
-       if (err)
-               return err;
-
-       return info->np == pdev->bus->dev.of_node;
+       return err;
 }
 
 const struct iommu_ops *of_iommu_configure(struct device *dev,
index e8d89343d6139424e2caf0baa112955b7005b2e4..e88395605e32dc32960974a4e8707a30037b107b 100644 (file)
@@ -107,6 +107,10 @@ struct its_node {
 
 #define ITS_ITT_ALIGN          SZ_256
 
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS     (16)
+#define ITS_MAX_VPEID          (1 << (ITS_MAX_VPEID_BITS))
+
 /* Convert page order to size in bytes */
 #define PAGE_ORDER_TO_SIZE(o)  (PAGE_SIZE << (o))
 
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size)
 
 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
 {
-       its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
+       its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
 }
 
 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid)
 
 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
 {
-       its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
+       its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
 }
 
 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
 
 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
 {
-       its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+       its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
 }
 
 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
        u64 val = its_read_baser(its, baser);
        u64 esz = GITS_BASER_ENTRY_SIZE(val);
        u64 type = GITS_BASER_TYPE(val);
+       u64 baser_phys, tmp;
        u32 alloc_pages;
        void *base;
-       u64 tmp;
 
 retry_alloc_baser:
        alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
@@ -1496,8 +1500,24 @@ retry_alloc_baser:
        if (!base)
                return -ENOMEM;
 
+       baser_phys = virt_to_phys(base);
+
+       /* Check if the physical address of the memory is above 48bits */
+       if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+
+               /* 52bit PA is supported only when PageSize=64K */
+               if (psz != SZ_64K) {
+                       pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+                       free_pages((unsigned long)base, order);
+                       return -ENXIO;
+               }
+
+               /* Convert 52bit PA to 48bit field */
+               baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+       }
+
 retry_baser:
-       val = (virt_to_phys(base)                                |
+       val = (baser_phys                                        |
                (type << GITS_BASER_TYPE_SHIFT)                  |
                ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)       |
                ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
@@ -1582,13 +1602,12 @@ retry_baser:
 
 static bool its_parse_indirect_baser(struct its_node *its,
                                     struct its_baser *baser,
-                                    u32 psz, u32 *order)
+                                    u32 psz, u32 *order, u32 ids)
 {
        u64 tmp = its_read_baser(its, baser);
        u64 type = GITS_BASER_TYPE(tmp);
        u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
        u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
-       u32 ids = its->device_ids;
        u32 new_order = *order;
        bool indirect = false;
 
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its)
                        continue;
 
                case GITS_BASER_TYPE_DEVICE:
+                       indirect = its_parse_indirect_baser(its, baser,
+                                                           psz, &order,
+                                                           its->device_ids);
                case GITS_BASER_TYPE_VCPU:
                        indirect = its_parse_indirect_baser(its, baser,
-                                                           psz, &order);
+                                                           psz, &order,
+                                                           ITS_MAX_VPEID_BITS);
                        break;
                }
 
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = {
 
 static int its_vpe_id_alloc(void)
 {
-       return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+       return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
 }
 
 static void its_vpe_id_free(u16 id)
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void)
                return -ENOMEM;
        }
 
-       BUG_ON(entries != vpe_proxy.dev->nr_ites);
+       BUG_ON(entries > vpe_proxy.dev->nr_ites);
 
        raw_spin_lock_init(&vpe_proxy.lock);
        vpe_proxy.next_victim = 0;
index 519149ec905378d59e9abb931515542b91b133c6..b5df99c6f680f940a48223455a377a1ef8e07cb2 100644 (file)
@@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn)
 {
        const __be32 *cell;
        u64 hwid;
-       int i;
+       int cpu;
 
        cell = of_get_property(dn, "reg", NULL);
        if (!cell)
@@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn)
        if (hwid & ~MPIDR_HWID_BITMASK)
                return -1;
 
-       for (i = 0; i < num_possible_cpus(); i++)
-               if (cpu_logical_map(i) == hwid)
-                       return i;
+       for_each_possible_cpu(cpu)
+               if (cpu_logical_map(cpu) == hwid)
+                       return cpu;
 
        return -1;
 }
index 2370e6d9e603ad17a57016974608b8aade9f85a2..cd0bcc3b7e33709a472c1952c5ee7ccdf1ea0382 100644 (file)
@@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
 {
        struct its_cmd_info info = {
                .cmd_type = MAP_VLPI,
-               .map      = map,
+               {
+                       .map      = map,
+               },
        };
 
        /*
@@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map)
 {
        struct its_cmd_info info = {
                .cmd_type = GET_VLPI,
-               .map      = map,
+               {
+                       .map      = map,
+               },
        };
 
        return irq_set_vcpu_affinity(irq, &info);
@@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
 {
        struct its_cmd_info info = {
                .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
-               .config   = config,
+               {
+                       .config   = config,
+               },
        };
 
        return irq_set_vcpu_affinity(irq, &info);
index 6e52a88bbd9e9cc7a832d67b13c717efe5cb039a..c90976d7e53ccc596b65a0864ef169f1aa1fafd8 100644 (file)
@@ -169,20 +169,19 @@ static void gic_mask_irq(struct irq_data *d)
 {
        unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
 
-       write_gic_rmask(BIT(intr));
+       write_gic_rmask(intr);
        gic_clear_pcpu_masks(intr);
 }
 
 static void gic_unmask_irq(struct irq_data *d)
 {
-       struct cpumask *affinity = irq_data_get_affinity_mask(d);
        unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
        unsigned int cpu;
 
-       write_gic_smask(BIT(intr));
+       write_gic_smask(intr);
 
        gic_clear_pcpu_masks(intr);
-       cpu = cpumask_first_and(affinity, cpu_online_mask);
+       cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
        set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
 }
 
@@ -420,13 +419,17 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
                                     irq_hw_number_t hw, unsigned int cpu)
 {
        int intr = GIC_HWIRQ_TO_SHARED(hw);
+       struct irq_data *data;
        unsigned long flags;
 
+       data = irq_get_irq_data(virq);
+
        spin_lock_irqsave(&gic_lock, flags);
        write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
        write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
        gic_clear_pcpu_masks(intr);
        set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
+       irq_data_update_effective_affinity(data, cpumask_of(cpu));
        spin_unlock_irqrestore(&gic_lock, flags);
 
        return 0;
@@ -645,7 +648,7 @@ static int __init gic_of_init(struct device_node *node,
 
        /* Find the first available CPU vector. */
        i = 0;
-       reserved = (C_SW0 | C_SW1) >> __fls(C_SW0);
+       reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
        while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
                                           i++, &cpu_vec))
                reserved |= BIT(cpu_vec);
@@ -684,11 +687,11 @@ static int __init gic_of_init(struct device_node *node,
 
        gicconfig = read_gic_config();
        gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
-       gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS);
+       gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
        gic_shared_intrs = (gic_shared_intrs + 1) * 8;
 
        gic_vpes = gicconfig & GIC_CONFIG_PVPS;
-       gic_vpes >>= __fls(GIC_CONFIG_PVPS);
+       gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
        gic_vpes = gic_vpes + 1;
 
        if (cpu_has_veic) {
@@ -767,7 +770,7 @@ static int __init gic_of_init(struct device_node *node,
        for (i = 0; i < gic_shared_intrs; i++) {
                change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
                change_gic_trig(i, GIC_TRIG_LEVEL);
-               write_gic_rmask(BIT(i));
+               write_gic_rmask(i);
        }
 
        for (i = 0; i < gic_vpes; i++) {
index bdbb5c0ff7fe3bd956c1c9d12364aee4cd8c5eba..0c085303a58302cc982f69e52b0a5ea8e5c60c25 100644 (file)
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
        for (i = 0; i < 2; i++) {
                ct[i].chip.irq_ack = irq_gc_ack_set_bit;
                ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
-               ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+               ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
                ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
                ct[i].chip.irq_set_type = tangox_irq_set_type;
                ct[i].chip.name = gc->domain->name;
index 6c44609fd83a0f266df74b80bbe91f84b3bd4a67..cd2b3c69771a24b62a7952a8e7c626fde5459c6b 100644 (file)
@@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
        isdn_net_local *lp;
        struct ippp_struct *is;
        int proto;
-       unsigned char protobuf[4];
 
        is = file->private_data;
 
@@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
        if (!lp)
                printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
        else {
-               /*
-                * Don't reset huptimer for
-                * LCP packets. (Echo requests).
-                */
-               if (copy_from_user(protobuf, buf, 4))
-                       return -EFAULT;
-               proto = PPP_PROTOCOL(protobuf);
-               if (proto != PPP_LCP)
-                       lp->huptimer = 0;
+               if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
+                       unsigned char protobuf[4];
+                       /*
+                        * Don't reset huptimer for
+                        * LCP packets. (Echo requests).
+                        */
+                       if (copy_from_user(protobuf, buf, 4))
+                               return -EFAULT;
+
+                       proto = PPP_PROTOCOL(protobuf);
+                       if (proto != PPP_LCP)
+                               lp->huptimer = 0;
 
-               if (lp->isdn_device < 0 || lp->isdn_channel < 0)
                        return 0;
+               }
 
                if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
                    lp->dialstate == 0 &&
                    (lp->flags & ISDN_NET_CONNECTED)) {
                        unsigned short hl;
                        struct sk_buff *skb;
+                       unsigned char *cpy_buf;
                        /*
                         * we need to reserve enough space in front of
                         * sk_buff. old call to dev_alloc_skb only reserved
@@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
                                return count;
                        }
                        skb_reserve(skb, hl);
-                       if (copy_from_user(skb_put(skb, count), buf, count))
+                       cpy_buf = skb_put(skb, count);
+                       if (copy_from_user(cpy_buf, buf, count))
                        {
                                kfree_skb(skb);
                                return -EFAULT;
                        }
+
+                       /*
+                        * Don't reset huptimer for
+                        * LCP packets. (Echo requests).
+                        */
+                       proto = PPP_PROTOCOL(cpy_buf);
+                       if (proto != PPP_LCP)
+                               lp->huptimer = 0;
+
                        if (is->debug & 0x40) {
                                printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
                                isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
index bbbbe08982332a53fdad499cd5643f85b97e5898..9a257f9693009c7ec3a163dc83e7c0b2e5839dde 100644 (file)
 #define AS_PEAK_mA_TO_REG(a) \
        ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250)
 
+/* LED numbers for Devicetree */
+#define AS_LED_FLASH                           0
+#define AS_LED_INDICATOR                       1
+
 enum as_mode {
        AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
        AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
@@ -491,10 +495,29 @@ static int as3645a_parse_node(struct as3645a *flash,
                              struct device_node *node)
 {
        struct as3645a_config *cfg = &flash->cfg;
+       struct device_node *child;
        const char *name;
        int rval;
 
-       flash->flash_node = of_get_child_by_name(node, "flash");
+       for_each_child_of_node(node, child) {
+               u32 id = 0;
+
+               of_property_read_u32(child, "reg", &id);
+
+               switch (id) {
+               case AS_LED_FLASH:
+                       flash->flash_node = of_node_get(child);
+                       break;
+               case AS_LED_INDICATOR:
+                       flash->indicator_node = of_node_get(child);
+                       break;
+               default:
+                       dev_warn(&flash->client->dev,
+                                "unknown LED %u encountered, ignoring\n", id);
+                       break;
+               }
+       }
+
        if (!flash->flash_node) {
                dev_err(&flash->client->dev, "can't find flash node\n");
                return -ENODEV;
@@ -534,11 +557,10 @@ static int as3645a_parse_node(struct as3645a *flash,
        of_property_read_u32(flash->flash_node, "voltage-reference",
                             &cfg->voltage_reference);
 
-       of_property_read_u32(flash->flash_node, "peak-current-limit",
+       of_property_read_u32(flash->flash_node, "ams,input-max-microamp",
                             &cfg->peak);
        cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak);
 
-       flash->indicator_node = of_get_child_by_name(node, "indicator");
        if (!flash->indicator_node) {
                dev_warn(&flash->client->dev,
                         "can't find indicator node\n");
@@ -721,6 +743,7 @@ static int as3645a_remove(struct i2c_client *client)
        as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
 
        v4l2_flash_release(flash->vf);
+       v4l2_flash_release(flash->vfind);
 
        led_classdev_flash_unregister(&flash->fled);
        led_classdev_unregister(&flash->iled_cdev);
index 7d5286b05036976059c3af6c7277a987b457c59d..1841d0359bace765cc52acd6340bb96efb5aa2ee 100644 (file)
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
 void __closure_wake_up(struct closure_waitlist *wait_list)
 {
        struct llist_node *list;
-       struct closure *cl;
+       struct closure *cl, *t;
        struct llist_node *reverse = NULL;
 
        list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
        reverse = llist_reverse_order(list);
 
        /* Then do the wakeups */
-       llist_for_each_entry(cl, reverse, list) {
+       llist_for_each_entry_safe(cl, t, reverse, list) {
                closure_set_waiting(cl, 0);
                closure_sub(cl, CLOSURE_WAITING + 1);
        }
index 24eddbdf2ab4cb9437674a3783ffbbfaca8a21fd..203144762f3688ab118ccd2e865df3c427b4dca5 100644 (file)
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
 
 extern atomic_t dm_global_event_nr;
 extern wait_queue_head_t dm_global_eventq;
+void dm_issue_global_event(void);
 
 #endif
index a55ffd4f5933fc1247b6729dcc0344964c9f01b8..96ab46512e1fdcf27e4669f22d97b553b491c196 100644 (file)
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
                kfree(cipher_api);
                return ret;
        }
+       kfree(cipher_api);
 
        return 0;
 bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
                                ti->error = "Invalid feature value for sector_size";
                                return -EINVAL;
                        }
+                       if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
+                               ti->error = "Device size is not multiple of sector_size feature";
+                               return -EINVAL;
+                       }
                        cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
                } else if (!strcasecmp(opt_string, "iv_large_sectors"))
                        set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
index 8756a6850431d007f756079a595f5ae30d51a8df..e52676fa9832c53dcd57dc37eb654e3e7eee0d88 100644 (file)
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
  * Round up the ptr to an 8-byte boundary.
  */
 #define ALIGN_MASK 7
+static inline size_t align_val(size_t val)
+{
+       return (val + ALIGN_MASK) & ~ALIGN_MASK;
+}
 static inline void *align_ptr(void *ptr)
 {
-       return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
+       return (void *)align_val((size_t)ptr);
 }
 
 /*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
        struct hash_cell *hc;
        size_t len, needed = 0;
        struct gendisk *disk;
-       struct dm_name_list *nl, *old_nl = NULL;
+       struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
        uint32_t *event_nr;
 
        down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
         */
        for (i = 0; i < NUM_BUCKETS; i++) {
                list_for_each_entry (hc, _name_buckets + i, name_list) {
-                       needed += sizeof(struct dm_name_list);
-                       needed += strlen(hc->name) + 1;
-                       needed += ALIGN_MASK;
-                       needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
+                       needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+                       needed += align_val(sizeof(uint32_t));
                }
        }
 
        /*
         * Grab our output buffer.
         */
-       nl = get_result_buffer(param, param_size, &len);
+       nl = orig_nl = get_result_buffer(param, param_size, &len);
        if (len < needed) {
                param->flags |= DM_BUFFER_FULL_FLAG;
                goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
                        strcpy(nl->name, hc->name);
 
                        old_nl = nl;
-                       event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
+                       event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
                        *event_nr = dm_get_event_nr(hc->md);
                        nl = align_ptr(event_nr + 1);
                }
        }
+       /*
+        * If mismatch happens, security may be compromised due to buffer
+        * overflow, so it's better to crash.
+        */
+       BUG_ON((char *)nl - (char *)orig_nl != needed);
 
  out:
        up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
  * which has a variable size, is not used by the function processing
  * the ioctl.
  */
-#define IOCTL_FLAGS_NO_PARAMS  1
+#define IOCTL_FLAGS_NO_PARAMS          1
+#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
 
 /*-----------------------------------------------------------------
  * Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
                ioctl_fn fn;
        } _ioctls[] = {
                {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
-               {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+               {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
                {DM_LIST_DEVICES_CMD, 0, list_devices},
 
-               {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
-               {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
-               {DM_DEV_RENAME_CMD, 0, dev_rename},
+               {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
+               {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
+               {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
                {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
                {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
                {DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
            unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
                DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
 
+       if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
+               dm_issue_global_event();
+
        /*
         * Copy the results back to userland.
         */
index 5bfe285ea9d1c815ae8014064a29e4a08566d374..2245d06d204597b537c22caf48c4f88626051f57 100644 (file)
@@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
        if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
                return DM_MAPIO_REQUEUE;
 
-       mddev->pers->make_request(mddev, bio);
+       md_handle_request(mddev, bio);
 
        return DM_MAPIO_SUBMITTED;
 }
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
 static sector_t rs_get_progress(struct raid_set *rs,
                                sector_t resync_max_sectors, bool *array_in_sync)
 {
-       sector_t r, recovery_cp, curr_resync_completed;
+       sector_t r, curr_resync_completed;
        struct mddev *mddev = &rs->md;
 
        curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
-       recovery_cp = mddev->recovery_cp;
        *array_in_sync = false;
 
        if (rs_is_raid0(rs)) {
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
                } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                        r = curr_resync_completed;
                else
-                       r = recovery_cp;
+                       r = mddev->recovery_cp;
 
-               if (r == MaxSector) {
+               if ((r == MaxSector) ||
+                   (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
+                    (mddev->curr_resync_completed == resync_max_sectors))) {
                        /*
                         * Sync complete.
                         */
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 12, 1},
+       .version = {1, 13, 0},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index 6e54145969c5ce30184cf162283db0f01796f1ca..4be85324f44dc26177c17bf9ab6acf451dca3fcf 100644 (file)
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
 
+void dm_issue_global_event(void)
+{
+       atomic_inc(&dm_global_event_nr);
+       wake_up(&dm_global_eventq);
+}
+
 /*
  * One of these is allocated per bio.
  */
@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
        dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
 
        atomic_inc(&md->event_nr);
-       atomic_inc(&dm_global_event_nr);
        wake_up(&md->eventq);
-       wake_up(&dm_global_eventq);
+       dm_issue_global_event();
 }
 
 /*
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
        }
 
        map = __bind(md, table, &limits);
+       dm_issue_global_event();
 
 out:
        mutex_unlock(&md->suspend_lock);
index 08fcaebc61bdb52b92f8ba37ccd4d2b3dac41779..0ff1bbf6c90e5cebc782268313ce39f8c66f7270 100644 (file)
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
  * call has finished, the bio has been linked into some internal structure
  * and so is visible to ->quiesce(), so we don't need the refcount any more.
  */
+void md_handle_request(struct mddev *mddev, struct bio *bio)
+{
+check_suspended:
+       rcu_read_lock();
+       if (mddev->suspended) {
+               DEFINE_WAIT(__wait);
+               for (;;) {
+                       prepare_to_wait(&mddev->sb_wait, &__wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       if (!mddev->suspended)
+                               break;
+                       rcu_read_unlock();
+                       schedule();
+                       rcu_read_lock();
+               }
+               finish_wait(&mddev->sb_wait, &__wait);
+       }
+       atomic_inc(&mddev->active_io);
+       rcu_read_unlock();
+
+       if (!mddev->pers->make_request(mddev, bio)) {
+               atomic_dec(&mddev->active_io);
+               wake_up(&mddev->sb_wait);
+               goto check_suspended;
+       }
+
+       if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+               wake_up(&mddev->sb_wait);
+}
+EXPORT_SYMBOL(md_handle_request);
+
 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
 {
        const int rw = bio_data_dir(bio);
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
                bio_endio(bio);
                return BLK_QC_T_NONE;
        }
-check_suspended:
-       rcu_read_lock();
-       if (mddev->suspended) {
-               DEFINE_WAIT(__wait);
-               for (;;) {
-                       prepare_to_wait(&mddev->sb_wait, &__wait,
-                                       TASK_UNINTERRUPTIBLE);
-                       if (!mddev->suspended)
-                               break;
-                       rcu_read_unlock();
-                       schedule();
-                       rcu_read_lock();
-               }
-               finish_wait(&mddev->sb_wait, &__wait);
-       }
-       atomic_inc(&mddev->active_io);
-       rcu_read_unlock();
 
        /*
         * save the sectors now since our bio can
@@ -310,20 +324,14 @@ check_suspended:
        sectors = bio_sectors(bio);
        /* bio could be mergeable after passing to underlayer */
        bio->bi_opf &= ~REQ_NOMERGE;
-       if (!mddev->pers->make_request(mddev, bio)) {
-               atomic_dec(&mddev->active_io);
-               wake_up(&mddev->sb_wait);
-               goto check_suspended;
-       }
+
+       md_handle_request(mddev, bio);
 
        cpu = part_stat_lock();
        part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
        part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
        part_stat_unlock();
 
-       if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
-               wake_up(&mddev->sb_wait);
-
        return BLK_QC_T_NONE;
 }
 
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
        struct mddev *mddev = container_of(ws, struct mddev, flush_work);
        struct bio *bio = mddev->flush_bio;
 
+       /*
+        * must reset flush_bio before calling into md_handle_request to avoid a
+        * deadlock, because other bios passed md_handle_request suspend check
+        * could wait for this and below md_handle_request could wait for those
+        * bios because of suspend check
+        */
+       mddev->flush_bio = NULL;
+       wake_up(&mddev->sb_wait);
+
        if (bio->bi_iter.bi_size == 0)
                /* an empty barrier - all done */
                bio_endio(bio);
        else {
                bio->bi_opf &= ~REQ_PREFLUSH;
-               mddev->pers->make_request(mddev, bio);
+               md_handle_request(mddev, bio);
        }
-
-       mddev->flush_bio = NULL;
-       wake_up(&mddev->sb_wait);
 }
 
 void md_flush_request(struct mddev *mddev, struct bio *bio)
index 561d22b9a9a8acc9479cabc389948753ffe703eb..d8287d3cd1bf81b048e90166d91afe76566202b3 100644 (file)
@@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev);
 extern int md_rdev_init(struct md_rdev *rdev);
 extern void md_rdev_clear(struct md_rdev *rdev);
 
+extern void md_handle_request(struct mddev *mddev, struct bio *bio);
 extern void mddev_suspend(struct mddev *mddev);
 extern void mddev_resume(struct mddev *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
index 4188a488114814aadf7df7b4c2ba07240b8e0408..928e24a071338ab6e1fe7668c8caa42b2803ccfe 100644 (file)
@@ -811,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                        spin_unlock(&head->batch_head->batch_lock);
                        goto unlock_out;
                }
+               /*
+                * We must assign batch_head of this stripe within the
+                * batch_lock, otherwise clear_batch_ready of batch head
+                * stripe could clear BATCH_READY bit of this stripe and
+                * this stripe->batch_head doesn't get assigned, which
+                * could confuse clear_batch_ready for this stripe
+                */
+               sh->batch_head = head->batch_head;
 
                /*
                 * at this point, head's BATCH_READY could be cleared, but we
@@ -818,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                 */
                list_add(&sh->batch_list, &head->batch_list);
                spin_unlock(&head->batch_head->batch_lock);
-
-               sh->batch_head = head->batch_head;
        } else {
                head->batch_head = head;
                sh->batch_head = head->batch_head;
@@ -4599,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
 
                set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
                                            (1 << STRIPE_PREREAD_ACTIVE) |
-                                           (1 << STRIPE_DEGRADED)),
+                                           (1 << STRIPE_DEGRADED) |
+                                           (1 << STRIPE_ON_UNPLUG_LIST)),
                              head_sh->state & (1 << STRIPE_INSYNC));
 
                sh->check_state = head_sh->check_state;
@@ -6568,14 +6575,17 @@ static ssize_t
 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
 {
        struct r5conf *conf;
-       unsigned long new;
+       unsigned int new;
        int err;
        struct r5worker_group *new_groups, *old_groups;
        int group_cnt, worker_cnt_per_group;
 
        if (len >= PAGE_SIZE)
                return -EINVAL;
-       if (kstrtoul(page, 10, &new))
+       if (kstrtouint(page, 10, &new))
+               return -EINVAL;
+       /* 8192 should be big enough */
+       if (new > 8192)
                return -EINVAL;
 
        err = mddev_lock(mddev);
index eed6c397d8400b0a25c57feb6ef23dc49eac71df..f8a808d45034e048f9b0bc2c764c51d95e22bdc9 100644 (file)
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
         */
        switch (msg->msg[1]) {
        case CEC_MSG_GET_CEC_VERSION:
-       case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
        case CEC_MSG_ABORT:
        case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
-       case CEC_MSG_GIVE_PHYSICAL_ADDR:
        case CEC_MSG_GIVE_OSD_NAME:
+               /*
+                * These messages reply with a directed message, so ignore if
+                * the initiator is Unregistered.
+                */
+               if (!adap->passthrough && from_unregistered)
+                       return 0;
+               /* Fall through */
+       case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
        case CEC_MSG_GIVE_FEATURES:
+       case CEC_MSG_GIVE_PHYSICAL_ADDR:
                /*
                 * Skip processing these messages if the passthrough mode
                 * is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
                if (adap->passthrough)
                        goto skip_processing;
                /* Ignore if addressing is wrong */
-               if (is_broadcast || from_unregistered)
+               if (is_broadcast)
                        return 0;
                break;
 
index 2fcba16161685888164565bb24e0e917024092eb..9139d01ba7ed6c9470896dea8500a433d9c05240 100644 (file)
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
 static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
                                        void (*release)(struct dvb_frontend *fe));
 
-static void dvb_frontend_free(struct kref *ref)
+static void __dvb_frontend_free(struct dvb_frontend *fe)
 {
-       struct dvb_frontend *fe =
-               container_of(ref, struct dvb_frontend, refcount);
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
 
+       if (!fepriv)
+               return;
+
        dvb_free_device(fepriv->dvbdev);
 
        dvb_frontend_invoke_release(fe, fe->ops.release);
 
        kfree(fepriv);
+       fe->frontend_priv = NULL;
+}
+
+static void dvb_frontend_free(struct kref *ref)
+{
+       struct dvb_frontend *fe =
+               container_of(ref, struct dvb_frontend, refcount);
+
+       __dvb_frontend_free(fe);
 }
 
 static void dvb_frontend_put(struct dvb_frontend *fe)
 {
-       kref_put(&fe->refcount, dvb_frontend_free);
+       /*
+        * Check if the frontend was registered, as otherwise
+        * kref was not initialized yet.
+        */
+       if (fe->frontend_priv)
+               kref_put(&fe->refcount, dvb_frontend_free);
+       else
+               __dvb_frontend_free(fe);
 }
 
 static void dvb_frontend_get(struct dvb_frontend *fe)
index 224283fe100a8fe6f6a3f17f52cfbff2a723b15e..4d086a7248e9b2508905cd026038793dc7882241 100644 (file)
@@ -55,29 +55,57 @@ struct dib3000mc_state {
 
 static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
 {
-       u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
-       u8 rb[2];
        struct i2c_msg msg[2] = {
-               { .addr = state->i2c_addr >> 1, .flags = 0,        .buf = wb, .len = 2 },
-               { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+               { .addr = state->i2c_addr >> 1, .flags = 0,        .len = 2 },
+               { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
        };
+       u16 word;
+       u8 *b;
+
+       b = kmalloc(4, GFP_KERNEL);
+       if (!b)
+               return 0;
+
+       b[0] = (reg >> 8) | 0x80;
+       b[1] = reg;
+       b[2] = 0;
+       b[3] = 0;
+
+       msg[0].buf = b;
+       msg[1].buf = b + 2;
 
        if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
                dprintk("i2c read error on %d\n",reg);
 
-       return (rb[0] << 8) | rb[1];
+       word = (b[2] << 8) | b[3];
+       kfree(b);
+
+       return word;
 }
 
 static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
 {
-       u8 b[4] = {
-               (reg >> 8) & 0xff, reg & 0xff,
-               (val >> 8) & 0xff, val & 0xff,
-       };
        struct i2c_msg msg = {
-               .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+               .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
        };
-       return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+       int rc;
+       u8 *b;
+
+       b = kmalloc(4, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       b[0] = reg >> 8;
+       b[1] = reg;
+       b[2] = val >> 8;
+       b[3] = val;
+
+       msg.buf = b;
+
+       rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+       kfree(b);
+
+       return rc;
 }
 
 static int dib3000mc_identify(struct dib3000mc_state *state)
index 7bec3e028beec10e188fea4d9f53cc40556f8ddf..5553b89b804e7d4219d6c767b96fbdb5006cce1b 100644 (file)
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                                    struct i2c_adapter *i2c,
                                    unsigned int pll_desc_id)
 {
-       u8 b1 [] = { 0 };
-       struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
-                              .buf = b1, .len = 1 };
+       u8 *b1;
+       struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
        struct dvb_pll_priv *priv = NULL;
        int ret;
        const struct dvb_pll_desc *desc;
 
+       b1 = kmalloc(1, GFP_KERNEL);
+       if (!b1)
+               return NULL;
+
+       b1[0] = 0;
+       msg.buf = b1;
+
        if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
            (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
                pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                        fe->ops.i2c_gate_ctrl(fe, 1);
 
                ret = i2c_transfer (i2c, &msg, 1);
-               if (ret != 1)
+               if (ret != 1) {
+                       kfree(b1);
                        return NULL;
+               }
                if (fe->ops.i2c_gate_ctrl)
                             fe->ops.i2c_gate_ctrl(fe, 0);
        }
 
        priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
-       if (priv == NULL)
+       if (!priv) {
+               kfree(b1);
                return NULL;
+       }
 
        priv->pll_i2c_address = pll_addr;
        priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                                "insmod option" : "autodetected");
        }
 
+       kfree(b1);
+
        return fe;
 }
 EXPORT_SYMBOL(dvb_pll_attach);
index 7e7cc49b867400093ba038baf9e14773970f15f4..3c4f7fa7b9d8ea06e7b1455ce3e0172d17322483 100644 (file)
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
 
 config VIDEO_QCOM_CAMSS
        tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
-       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
        depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
        select VIDEOBUF2_DMA_SG
        select V4L2_FWNODE
index b21b3c2dc77f2bb12f617f198a928a58cc17b592..b22d2dfcd3c29ec85c474948f67d0805571db3ab 100644 (file)
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
  *
  * Return -EINVAL or zero on success
  */
-int vfe_set_selection(struct v4l2_subdev *sd,
+static int vfe_set_selection(struct v4l2_subdev *sd,
                             struct v4l2_subdev_pad_config *cfg,
                             struct v4l2_subdev_selection *sel)
 {
index 68933d20806338629a89bdde9a5b05219ef2f5ac..9b2a401a4891c49e1388783cbf4111bf6df26af6 100644 (file)
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
                        hfi_session_abort(inst);
 
                load_scale_clocks(core);
+               INIT_LIST_HEAD(&inst->registeredbufs);
        }
 
        venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
index 1edf667d562a4df64a4806e947f192aa2a24357f..146ae6f25cdbb1eee53837aa1ea28c503163ed28 100644 (file)
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
 {
        u32 status = 0;
 
-       status = readb(cec->reg + S5P_CEC_STATUS_0);
+       status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
+       status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
        status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
        status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
        status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
index 58d200e7c8382de8edc841b17b1022a2c15ddb85..8837e2678bdeb3507e4ebce501e5ee58216334ad 100644 (file)
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
        dev_dbg(cec->dev, "irq received\n");
 
        if (status & CEC_STATUS_TX_DONE) {
-               if (status & CEC_STATUS_TX_ERROR) {
+               if (status & CEC_STATUS_TX_NACK) {
+                       dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
+                       cec->tx = STATE_NACK;
+               } else if (status & CEC_STATUS_TX_ERROR) {
                        dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
                        cec->tx = STATE_ERROR;
                } else {
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
                cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
                cec->tx = STATE_IDLE;
                break;
+       case STATE_NACK:
+               cec_transmit_done(cec->adap,
+                       CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
+                       0, 1, 0, 0);
+               cec->tx = STATE_IDLE;
+               break;
        case STATE_ERROR:
                cec_transmit_done(cec->adap,
                        CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
index 8bcd8dc1aeb9fb299d5b821511c9b1eba02d3c6a..86ded522ef27319a27fb72bd9f5285be6ac1ad8f 100644 (file)
@@ -35,6 +35,7 @@
 #define CEC_STATUS_TX_TRANSFERRING     (1 << 1)
 #define CEC_STATUS_TX_DONE             (1 << 2)
 #define CEC_STATUS_TX_ERROR            (1 << 3)
+#define CEC_STATUS_TX_NACK             (1 << 4)
 #define CEC_STATUS_TX_BYTES            (0xFF << 8)
 #define CEC_STATUS_RX_RUNNING          (1 << 16)
 #define CEC_STATUS_RX_RECEIVING                (1 << 17)
@@ -55,6 +56,7 @@ enum cec_state {
        STATE_IDLE,
        STATE_BUSY,
        STATE_DONE,
+       STATE_NACK,
        STATE_ERROR
 };
 
index ed43a4212479ff22c7e3347f95e48a811e287925..129b558acc9214abda6ce249eb3d8e2095591196 100644 (file)
@@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init);
 module_exit(ir_sharp_decode_exit);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
 MODULE_DESCRIPTION("Sharp IR protocol decoder");
index 2e487f9a2cc3fb678aa173b93a9bb141cc23ba69..4983eeb39f369c961cfdf86b69a870d98727e736 100644 (file)
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
 static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
 {
        struct i2c_msg msg[2] = {
-               { .addr = priv->cfg->i2c_address, .flags = 0,        .buf = &reg, .len = 1 },
-               { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val,  .len = 1 },
+               { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
+               { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
        };
+       int rc = 0;
+       u8 *b;
+
+       b = kmalloc(2, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       b[0] = reg;
+       b[1] = 0;
+
+       msg[0].buf = b;
+       msg[1].buf = b + 1;
 
        if (i2c_transfer(priv->i2c, msg, 2) != 2) {
                printk(KERN_WARNING "mt2060 I2C read failed\n");
-               return -EREMOTEIO;
+               rc = -EREMOTEIO;
        }
-       return 0;
+       *val = b[1];
+       kfree(b);
+
+       return rc;
 }
 
 // Writes a single register
 static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
 {
-       u8 buf[2] = { reg, val };
        struct i2c_msg msg = {
-               .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2
+               .addr = priv->cfg->i2c_address, .flags = 0, .len = 2
        };
+       u8 *buf;
+       int rc = 0;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       buf[0] = reg;
+       buf[1] = val;
+
+       msg.buf = buf;
 
        if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
                printk(KERN_WARNING "mt2060 I2C write failed\n");
-               return -EREMOTEIO;
+               rc = -EREMOTEIO;
        }
-       return 0;
+       kfree(buf);
+       return rc;
 }
 
 // Writes a set of consecutive registers
 static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
 {
        int rem, val_len;
-       u8 xfer_buf[16];
+       u8 *xfer_buf;
+       int rc = 0;
        struct i2c_msg msg = {
-               .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf
+               .addr = priv->cfg->i2c_address, .flags = 0
        };
 
+       xfer_buf = kmalloc(16, GFP_KERNEL);
+       if (!xfer_buf)
+               return -ENOMEM;
+
+       msg.buf = xfer_buf;
+
        for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
                val_len = min_t(int, rem, priv->i2c_max_regs);
                msg.len = 1 + val_len;
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
 
                if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
                        printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
-                       return -EREMOTEIO;
+                       rc = -EREMOTEIO;
+                       break;
                }
        }
 
-       return 0;
+       kfree(xfer_buf);
+       return rc;
 }
 
 // Initialisation sequences
index 5dba23ca2e5fe955cd8101909b2b2e20bbdf2e68..dc9bc1807fdfa52aede0748308d16f5c36c6c95f 100644 (file)
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
 
        down_read(&mm->mmap_sem);
 
-       for (dar = addr; dar < addr + size; dar += page_size) {
-               if (!vma || dar < vma->vm_start || dar > vma->vm_end) {
+       vma = find_vma(mm, addr);
+       if (!vma) {
+               pr_err("Can't find vma for addr %016llx\n", addr);
+               rc = -EFAULT;
+               goto out;
+       }
+       /* get the size of the pages allocated */
+       page_size = vma_kernel_pagesize(vma);
+
+       for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
+               if (dar < vma->vm_start || dar >= vma->vm_end) {
                        vma = find_vma(mm, addr);
                        if (!vma) {
                                pr_err("Can't find vma for addr %016llx\n", addr);
index c8307e8b4c163977f7b4ebf8ed2e8991fc2393d7..0ccccbaf530d258a08acf633d34d9861c630ba64 100644 (file)
 #define MEI_DEV_ID_BXT_M      0x1A9A  /* Broxton M */
 #define MEI_DEV_ID_APL_I      0x5A9A  /* Apollo Lake I */
 
+#define MEI_DEV_ID_GLK        0x319A  /* Gemini Lake */
+
 #define MEI_DEV_ID_KBP        0xA2BA  /* Kaby Point */
 #define MEI_DEV_ID_KBP_2      0xA2BB  /* Kaby Point 2 */
 
index 4ff40d3196765a444ead85dabf8730fa2777a622..78b3172c8e6e298fa8f74c93db2850c4f96b95a8 100644 (file)
@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
+
        {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
 
@@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
        /*
-       * For not wake-able HW runtime pm framework
-       * can't be used on pci device level.
-       * Use domain runtime pm callbacks instead.
-       */
-       if (!pci_dev_run_wake(pdev))
-               mei_me_set_pm_domain(dev);
+        * ME maps runtime suspend/resume to D0i states,
+        * hence we need to go around native PCI runtime service which
+        * eventually brings the device into D3cold/hot state,
+        * but the mei device cannot wake up from D3 unlike from D0i3.
+        * To get around the PCI device native runtime pm,
+        * ME uses runtime pm domain handlers which take precedence
+        * over the driver's pm handlers.
+        */
+       mei_me_set_pm_domain(dev);
 
        if (mei_pg_is_enabled(dev))
                pm_runtime_put_noidle(&pdev->dev);
@@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
        dev_dbg(&pdev->dev, "shutdown\n");
        mei_stop(dev);
 
-       if (!pci_dev_run_wake(pdev))
-               mei_me_unset_pm_domain(dev);
+       mei_me_unset_pm_domain(dev);
 
        mei_disable_interrupts(dev);
        free_irq(pdev->irq, dev);
@@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev)
        dev_dbg(&pdev->dev, "stop\n");
        mei_stop(dev);
 
-       if (!pci_dev_run_wake(pdev))
-               mei_me_unset_pm_domain(dev);
+       mei_me_unset_pm_domain(dev);
 
        mei_disable_interrupts(dev);
 
index e38a5f144373451fc87007ffc1cf4292059c5408..0566f9bfa7de6c68c44d245e7e1deec84c345f71 100644 (file)
@@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
        /*
-       * For not wake-able HW runtime pm framework
-       * can't be used on pci device level.
-       * Use domain runtime pm callbacks instead.
-       */
-       if (!pci_dev_run_wake(pdev))
-               mei_txe_set_pm_domain(dev);
+        * TXE maps runtime suspend/resume to own power gating states,
+        * hence we need to go around native PCI runtime service which
+        * eventually brings the device into D3cold/hot state.
+        * But the TXE device cannot wake up from D3 unlike from own
+        * power gating. To get around PCI device native runtime pm,
+        * TXE uses runtime pm domain handlers which take precedence.
+        */
+       mei_txe_set_pm_domain(dev);
 
        pm_runtime_put_noidle(&pdev->dev);
 
@@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
        dev_dbg(&pdev->dev, "shutdown\n");
        mei_stop(dev);
 
-       if (!pci_dev_run_wake(pdev))
-               mei_txe_unset_pm_domain(dev);
+       mei_txe_unset_pm_domain(dev);
 
        mei_disable_interrupts(dev);
        free_irq(pdev->irq, dev);
@@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev)
 
        mei_stop(dev);
 
-       if (!pci_dev_run_wake(pdev))
-               mei_txe_unset_pm_domain(dev);
+       mei_txe_unset_pm_domain(dev);
 
        mei_disable_interrupts(dev);
        free_irq(pdev->irq, dev);
@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
        else
                ret = -EAGAIN;
 
-       /*
-        * If everything is okay we're about to enter PCI low
-        * power state (D3) therefor we need to disable the
-        * interrupts towards host.
-        * However if device is not wakeable we do not enter
-        * D-low state and we need to keep the interrupt kicking
-        */
-       if (!ret && pci_dev_run_wake(pdev))
-               mei_disable_interrupts(dev);
+       /* keep irq on we are staying in D0 */
 
        dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
 
index 29fc1e662891d7bc2157ff55740bd2f110f4e14d..2ad7b5c691569e37cd366425c62c6ae0514f3857 100644 (file)
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
        }
 
        mqrq->areq.mrq = &brq->mrq;
-
-       mmc_queue_bounce_pre(mqrq);
 }
 
 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                brq = &mq_rq->brq;
                old_req = mmc_queue_req_to_req(mq_rq);
                type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-               mmc_queue_bounce_post(mq_rq);
 
                switch (status) {
                case MMC_BLK_SUCCESS:
index a7eb623f8daa3e610891ffb30157dd5e4dd4e103..36217ad5e9b1fbddb4c5dedf6dd711374fc27497 100644 (file)
@@ -1286,6 +1286,23 @@ out_err:
        return err;
 }
 
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+       int card_drv_type, drive_strength, drv_type;
+
+       card_drv_type = card->ext_csd.raw_driver_strength |
+                       mmc_driver_type_mask(0);
+
+       drive_strength = mmc_select_drive_strength(card,
+                                                  card->ext_csd.hs200_max_dtr,
+                                                  card_drv_type, &drv_type);
+
+       card->drive_strength = drive_strength;
+
+       if (drv_type)
+               mmc_set_driver_type(card->host, drv_type);
+}
+
 static int mmc_select_hs400es(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
                goto out_err;
        }
 
+       mmc_select_driver_type(card);
+
        /* Switch card to HS400 */
        val = EXT_CSD_TIMING_HS400 |
              card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1374,23 +1393,6 @@ out_err:
        return err;
 }
 
-static void mmc_select_driver_type(struct mmc_card *card)
-{
-       int card_drv_type, drive_strength, drv_type;
-
-       card_drv_type = card->ext_csd.raw_driver_strength |
-                       mmc_driver_type_mask(0);
-
-       drive_strength = mmc_select_drive_strength(card,
-                                                  card->ext_csd.hs200_max_dtr,
-                                                  card_drv_type, &drv_type);
-
-       card->drive_strength = drive_strength;
-
-       if (drv_type)
-               mmc_set_driver_type(card->host, drv_type);
-}
-
 /*
  * For device supporting HS200 mode, the following sequence
  * should be done before executing the tuning process.
index affa7370ba827917a78cff55c4e503220ab1ebaa..0a4e77a5ba33fe7e0009ab1f7f97078a38e1559c 100644 (file)
@@ -23,8 +23,6 @@
 #include "core.h"
 #include "card.h"
 
-#define MMC_QUEUE_BOUNCESZ     65536
-
 /*
  * Prepare a MMC request. This just filters out odd stuff.
  */
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
                queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
 }
 
-static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
-{
-       unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
-
-       if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
-               return 0;
-
-       if (bouncesz > host->max_req_size)
-               bouncesz = host->max_req_size;
-       if (bouncesz > host->max_seg_size)
-               bouncesz = host->max_seg_size;
-       if (bouncesz > host->max_blk_count * 512)
-               bouncesz = host->max_blk_count * 512;
-
-       if (bouncesz <= 512)
-               return 0;
-
-       return bouncesz;
-}
-
 /**
  * mmc_init_request() - initialize the MMC-specific per-request data
  * @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
        struct mmc_card *card = mq->card;
        struct mmc_host *host = card->host;
 
-       if (card->bouncesz) {
-               mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
-               if (!mq_rq->bounce_buf)
-                       return -ENOMEM;
-               if (card->bouncesz > 512) {
-                       mq_rq->sg = mmc_alloc_sg(1, gfp);
-                       if (!mq_rq->sg)
-                               return -ENOMEM;
-                       mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
-                                                       gfp);
-                       if (!mq_rq->bounce_sg)
-                               return -ENOMEM;
-               }
-       } else {
-               mq_rq->bounce_buf = NULL;
-               mq_rq->bounce_sg = NULL;
-               mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
-               if (!mq_rq->sg)
-                       return -ENOMEM;
-       }
+       mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+       if (!mq_rq->sg)
+               return -ENOMEM;
 
        return 0;
 }
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
 {
        struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
 
-       /* It is OK to kfree(NULL) so this will be smooth */
-       kfree(mq_rq->bounce_sg);
-       mq_rq->bounce_sg = NULL;
-
-       kfree(mq_rq->bounce_buf);
-       mq_rq->bounce_buf = NULL;
-
        kfree(mq_rq->sg);
        mq_rq->sg = NULL;
 }
@@ -265,18 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
        if (mmc_can_erase(card))
                mmc_queue_setup_discard(mq->queue, card);
 
-       card->bouncesz = mmc_queue_calc_bouncesz(host);
-       if (card->bouncesz) {
-               blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
-               blk_queue_max_segments(mq->queue, card->bouncesz / 512);
-               blk_queue_max_segment_size(mq->queue, card->bouncesz);
-       } else {
-               blk_queue_bounce_limit(mq->queue, limit);
-               blk_queue_max_hw_sectors(mq->queue,
-                       min(host->max_blk_count, host->max_req_size / 512));
-               blk_queue_max_segments(mq->queue, host->max_segs);
-               blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-       }
+       blk_queue_bounce_limit(mq->queue, limit);
+       blk_queue_max_hw_sectors(mq->queue,
+               min(host->max_blk_count, host->max_req_size / 512));
+       blk_queue_max_segments(mq->queue, host->max_segs);
+       blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 
        sema_init(&mq->thread_sem, 1);
 
@@ -365,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
  */
 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
 {
-       unsigned int sg_len;
-       size_t buflen;
-       struct scatterlist *sg;
        struct request *req = mmc_queue_req_to_req(mqrq);
-       int i;
-
-       if (!mqrq->bounce_buf)
-               return blk_rq_map_sg(mq->queue, req, mqrq->sg);
-
-       sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
-
-       mqrq->bounce_sg_len = sg_len;
-
-       buflen = 0;
-       for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
-               buflen += sg->length;
-
-       sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
-
-       return 1;
-}
-
-/*
- * If writing, bounce the data to the buffer before the request
- * is sent to the host driver
- */
-void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
-{
-       if (!mqrq->bounce_buf)
-               return;
-
-       if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
-               return;
-
-       sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
-               mqrq->bounce_buf, mqrq->sg[0].length);
-}
-
-/*
- * If reading, bounce the data from the buffer after the request
- * has been handled by the host driver
- */
-void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
-{
-       if (!mqrq->bounce_buf)
-               return;
-
-       if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
-               return;
 
-       sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
-               mqrq->bounce_buf, mqrq->sg[0].length);
+       return blk_rq_map_sg(mq->queue, req, mqrq->sg);
 }
index 04fc89360a7a1a5669d7d3a027edc2fd6fdbc1f6..f18d3f656baa510b6022e9f51efd3990ec5f194d 100644 (file)
@@ -49,9 +49,6 @@ enum mmc_drv_op {
 struct mmc_queue_req {
        struct mmc_blk_request  brq;
        struct scatterlist      *sg;
-       char                    *bounce_buf;
-       struct scatterlist      *bounce_sg;
-       unsigned int            bounce_sg_len;
        struct mmc_async_req    areq;
        enum mmc_drv_op         drv_op;
        int                     drv_op_result;
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
 extern void mmc_cleanup_queue(struct mmc_queue *);
 extern void mmc_queue_suspend(struct mmc_queue *);
 extern void mmc_queue_resume(struct mmc_queue *);
-
 extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
                                     struct mmc_queue_req *);
-extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
-extern void mmc_queue_bounce_post(struct mmc_queue_req *);
 
 extern int mmc_access_rpmb(struct mmc_queue *);
 
index 02179ed2a40d7a87d0d1ae7ed43e95388ef36a72..8c15637178ff3cec73b637633710f84e4dd93a00 100644 (file)
@@ -5,7 +5,7 @@
 comment "MMC/SD/SDIO Host Controller Drivers"
 
 config MMC_DEBUG
-       bool "MMC host drivers debugginG"
+       bool "MMC host drivers debugging"
        depends on MMC != n
        help
          This is an option for use by developers; most people should
index b9cc9599879978972b4c8c96f9dbddc26caebb2f..eee08d81b24214c3ea0c5555fa1f1f419249f9d6 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright (C) 2016 Cavium Inc.
  */
+#include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
 #include <linux/mmc/mmc.h>
@@ -149,8 +150,11 @@ error:
        for (i = 0; i < CAVIUM_MAX_MMC; i++) {
                if (host->slot[i])
                        cvm_mmc_of_slot_remove(host->slot[i]);
-               if (host->slot_pdev[i])
+               if (host->slot_pdev[i]) {
+                       get_device(&host->slot_pdev[i]->dev);
                        of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
+                       put_device(&host->slot_pdev[i]->dev);
+               }
        }
        clk_disable_unprepare(host->clk);
        return ret;
index 27fb625cbcf3ff459a62668a93c80a30883fbe21..fbd29f00fca05e5dc46e09d1aa10c9f619283097 100644 (file)
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
         */
        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
                     MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
-                    MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
+                    MMC_CAP_3_3V_DDR;
 
        if (host->use_sg)
                mmc->max_segs = 16;
index c885c2d4b90418a93519ab05a989a7d5c77efd76..85745ef179e22b2e49e4b23e15df311891f192dc 100644 (file)
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
        div->shift = __ffs(CLK_DIV_MASK);
        div->width = __builtin_popcountl(CLK_DIV_MASK);
        div->hw.init = &init;
-       div->flags = (CLK_DIVIDER_ONE_BASED |
-                     CLK_DIVIDER_ROUND_CLOSEST);
+       div->flags = CLK_DIVIDER_ONE_BASED;
 
        clk = devm_clk_register(host->dev, &div->hw);
        if (WARN_ON(IS_ERR(clk)))
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct meson_host *host = mmc_priv(mmc);
+       int ret;
+
+       /*
+        * If this is the initial tuning, try to get a sane Rx starting
+        * phase before doing the actual tuning.
+        */
+       if (!mmc->doing_retune) {
+               ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
+
+               if (ret)
+                       return ret;
+       }
+
+       ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
+       if (ret)
+               return ret;
 
        return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
 }
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        case MMC_POWER_UP:
                if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+               /* Reset phases */
+               clk_set_phase(host->rx_clk, 0);
+               clk_set_phase(host->tx_clk, 270);
+
                break;
 
        case MMC_POWER_ON:
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                                host->vqmmc_enabled = true;
                }
 
-               /* Reset rx phase */
-               clk_set_phase(host->rx_clk, 0);
                break;
        }
 
index 59ab194cb0099b19dcaa247977bc9ccc7c8175e1..c763b404510f3a29d3864290297b22f596701cc3 100644 (file)
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
 
        pxamci_init_ocr(host);
 
-       /*
-        * This architecture used to disable bounce buffers through its
-        * defconfig, now it is done at runtime as a host property.
-        */
-       mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
+       mmc->caps = 0;
        host->cmdat = 0;
        if (!cpu_is_pxa25x()) {
                mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
index bbaddf18a1b3c4022592f631878441ad19a1fca2..67d787fa330651738ce8c2bf4e58fd8eebcf6e2a 100644 (file)
@@ -392,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
 
 enum {
        INTEL_DSM_FNS           =  0,
+       INTEL_DSM_V18_SWITCH    =  3,
        INTEL_DSM_DRV_STRENGTH  =  9,
        INTEL_DSM_D3_RETUNE     = 10,
 };
@@ -447,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
        int err;
        u32 val;
 
+       intel_host->d3_retune = true;
+
        err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
        if (err) {
                pr_debug("%s: DSM not supported, error %d\n",
@@ -557,6 +560,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
        sdhci_writel(host, val, INTEL_HS400_ES_REG);
 }
 
+static void sdhci_intel_voltage_switch(struct sdhci_host *host)
+{
+       struct sdhci_pci_slot *slot = sdhci_priv(host);
+       struct intel_host *intel_host = sdhci_pci_priv(slot);
+       struct device *dev = &slot->chip->pdev->dev;
+       u32 result = 0;
+       int err;
+
+       err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result);
+       pr_debug("%s: %s DSM error %d result %u\n",
+                mmc_hostname(host->mmc), __func__, err, result);
+}
+
 static const struct sdhci_ops sdhci_intel_byt_ops = {
        .set_clock              = sdhci_set_clock,
        .set_power              = sdhci_intel_set_power,
@@ -565,6 +581,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
        .reset                  = sdhci_reset,
        .set_uhs_signaling      = sdhci_set_uhs_signaling,
        .hw_reset               = sdhci_pci_hw_reset,
+       .voltage_switch         = sdhci_intel_voltage_switch,
 };
 
 static void byt_read_dsm(struct sdhci_pci_slot *slot)
index 2eec2e652c53da9c1f0b9c6156dcdc9778d82d68..0842bbc2d7ad340011a70f43409d2503cf662ff7 100644 (file)
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
 {
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_host *host;
+       struct xenon_priv *priv;
        int err;
 
        host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
                return PTR_ERR(host);
 
        pltfm_host = sdhci_priv(host);
+       priv = sdhci_pltfm_priv(pltfm_host);
 
        /*
         * Link Xenon specific mmc_host_ops function,
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
        if (err)
                goto free_pltfm;
 
+       priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
+       if (IS_ERR(priv->axi_clk)) {
+               err = PTR_ERR(priv->axi_clk);
+               if (err == -EPROBE_DEFER)
+                       goto err_clk;
+       } else {
+               err = clk_prepare_enable(priv->axi_clk);
+               if (err)
+                       goto err_clk;
+       }
+
        err = mmc_of_parse(host->mmc);
        if (err)
-               goto err_clk;
+               goto err_clk_axi;
 
        sdhci_get_of_property(pdev);
 
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
        /* Xenon specific dt parse */
        err = xenon_probe_dt(pdev);
        if (err)
-               goto err_clk;
+               goto err_clk_axi;
 
        err = xenon_sdhc_prepare(host);
        if (err)
-               goto err_clk;
+               goto err_clk_axi;
 
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
@@ -527,6 +540,8 @@ remove_sdhc:
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        xenon_sdhc_unprepare(host);
+err_clk_axi:
+       clk_disable_unprepare(priv->axi_clk);
 err_clk:
        clk_disable_unprepare(pltfm_host->clk);
 free_pltfm:
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
 {
        struct sdhci_host *host = platform_get_drvdata(pdev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
 
        pm_runtime_get_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
        sdhci_remove_host(host, 0);
 
        xenon_sdhc_unprepare(host);
-
+       clk_disable_unprepare(priv->axi_clk);
        clk_disable_unprepare(pltfm_host->clk);
 
        sdhci_pltfm_free(pdev);
index 2bc0510c07696a11015c8e3bcee8c3497bd67384..9994995c7c56867bd24835d86bd1343e8fa3b770 100644 (file)
@@ -83,6 +83,7 @@ struct xenon_priv {
        unsigned char   bus_width;
        unsigned char   timing;
        unsigned int    clock;
+       struct clk      *axi_clk;
 
        int             phy_type;
        /*
index 12cf8288d6635eafef4677630d669c1a60238b45..a7293e186e03fc44ccb271405e99fbfaef06d770 100644 (file)
@@ -129,50 +129,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
 
 #define CMDREQ_TIMEOUT 5000
 
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a, status, i) \
-       do { \
-               if ((status) & TMIO_STAT_##a) { \
-                       if ((i)++) \
-                               printk(KERN_DEBUG " | "); \
-                       printk(KERN_DEBUG #a); \
-               } \
-       } while (0)
-
-static void pr_debug_status(u32 status)
-{
-       int i = 0;
-
-       pr_debug("status: %08x = ", status);
-       STATUS_TO_TEXT(CARD_REMOVE, status, i);
-       STATUS_TO_TEXT(CARD_INSERT, status, i);
-       STATUS_TO_TEXT(SIGSTATE, status, i);
-       STATUS_TO_TEXT(WRPROTECT, status, i);
-       STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
-       STATUS_TO_TEXT(CARD_INSERT_A, status, i);
-       STATUS_TO_TEXT(SIGSTATE_A, status, i);
-       STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
-       STATUS_TO_TEXT(STOPBIT_ERR, status, i);
-       STATUS_TO_TEXT(ILL_FUNC, status, i);
-       STATUS_TO_TEXT(CMD_BUSY, status, i);
-       STATUS_TO_TEXT(CMDRESPEND, status, i);
-       STATUS_TO_TEXT(DATAEND, status, i);
-       STATUS_TO_TEXT(CRCFAIL, status, i);
-       STATUS_TO_TEXT(DATATIMEOUT, status, i);
-       STATUS_TO_TEXT(CMDTIMEOUT, status, i);
-       STATUS_TO_TEXT(RXOVERFLOW, status, i);
-       STATUS_TO_TEXT(TXUNDERRUN, status, i);
-       STATUS_TO_TEXT(RXRDY, status, i);
-       STATUS_TO_TEXT(TXRQ, status, i);
-       STATUS_TO_TEXT(ILL_ACCESS, status, i);
-       printk("\n");
-}
-
-#else
-#define pr_debug_status(s)  do { } while (0)
-#endif
-
 static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 {
        struct tmio_mmc_host *host = mmc_priv(mmc);
@@ -762,9 +718,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
        status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
        ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
 
-       pr_debug_status(status);
-       pr_debug_status(ireg);
-
        /* Clear the status except the interrupt status */
        sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
 
index 5736b0c90b339b6bc3e8e1ae3189341910b958f2..a308e707392d595902b77a03e2078ffe8da97d9e 100644 (file)
@@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
                slave->mtd.erasesize = parent->erasesize;
        }
 
+       /*
+        * Slave erasesize might differ from the master one if the master
+        * exposes several regions with different erasesize. Adjust
+        * wr_alignment accordingly.
+        */
+       if (!(slave->mtd.flags & MTD_NO_ERASE))
+               wr_alignment = slave->mtd.erasesize;
+
        tmp = slave->offset;
        remainder = do_div(tmp, wr_alignment);
        if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
index 146af82183145d9d5164665aeaccdf789c4cca98..8268636675efc8b3d81959f0f1911a145c42fcc1 100644 (file)
@@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
        size += (req->ecc.strength + 1) * sizeof(u16);
        /* Reserve space for mu, dmu and delta. */
        size = ALIGN(size, sizeof(s32));
-       size += (req->ecc.strength + 1) * sizeof(s32);
+       size += (req->ecc.strength + 1) * sizeof(s32) * 3;
 
        user = kzalloc(size, GFP_KERNEL);
        if (!user)
index c3bb358ef01eee5b7c7dc0b0ad72b9a3e47f5220..5796468db653450494876bcf6c5da772f7f6bd3e 100644 (file)
@@ -707,7 +707,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
        res = clk_prepare_enable(host->clk);
        if (res)
-               goto err_exit1;
+               goto err_put_clk;
 
        nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
        nand_chip->dev_ready = lpc32xx_nand_device_ready;
@@ -814,6 +814,7 @@ err_exit3:
                dma_release_channel(host->dma_chan);
 err_exit2:
        clk_disable_unprepare(host->clk);
+err_put_clk:
        clk_put(host->clk);
 err_exit1:
        lpc32xx_wp_enable(host);
index bcc8cef1c615c5b0336f5310f56ee29a9c44e88a..12edaae17d81f2228eefcc50b9d5de7433119987 100644 (file)
@@ -2668,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
                             struct mtd_oob_ops *ops)
 {
-       int chipnr, realpage, page, blockmask, column;
+       int chipnr, realpage, page, column;
        struct nand_chip *chip = mtd_to_nand(mtd);
        uint32_t writelen = ops->len;
 
@@ -2704,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
 
        realpage = (int)(to >> chip->page_shift);
        page = realpage & chip->pagemask;
-       blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
 
        /* Invalidate the page cache, when we write to the cached page */
        if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
index cf1d4a15e10a63394b0410f4349e711f602c6c5d..19c000722cbc86f3246c0b84055eca92db331424 100644 (file)
@@ -1784,7 +1784,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
  * @nor:       pointer to a 'struct spi_nor'
  * @addr:      offset in the SFDP area to start reading data from
  * @len:       number of bytes to read
- * @buf:       buffer where the SFDP data are copied into
+ * @buf:       buffer where the SFDP data are copied into (dma-safe memory)
  *
  * Whatever the actual numbers of bytes for address and dummy cycles are
  * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
@@ -1829,6 +1829,36 @@ read_err:
        return ret;
 }
 
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor:       pointer to a 'struct spi_nor'
+ * @addr:      offset in the SFDP area to start reading data from
+ * @len:       number of bytes to read
+ * @buf:       buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ *          otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+                                       size_t len, void *buf)
+{
+       void *dma_safe_buf;
+       int ret;
+
+       dma_safe_buf = kmalloc(len, GFP_KERNEL);
+       if (!dma_safe_buf)
+               return -ENOMEM;
+
+       ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+       memcpy(buf, dma_safe_buf, len);
+       kfree(dma_safe_buf);
+
+       return ret;
+}
+
 struct sfdp_parameter_header {
        u8              id_lsb;
        u8              minor;
@@ -2101,7 +2131,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
                    bfpt_header->length * sizeof(u32));
        addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
        memset(&bfpt, 0, sizeof(bfpt));
-       err = spi_nor_read_sfdp(nor,  addr, len, &bfpt);
+       err = spi_nor_read_sfdp_dma_unsafe(nor,  addr, len, &bfpt);
        if (err < 0)
                return err;
 
@@ -2127,6 +2157,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
        params->size = bfpt.dwords[BFPT_DWORD(2)];
        if (params->size & BIT(31)) {
                params->size &= ~BIT(31);
+
+               /*
+                * Prevent overflows on params->size. Anyway, a NOR of 2^64
+                * bits is unlikely to exist so this error probably means
+                * the BFPT we are reading is corrupted/wrong.
+                */
+               if (params->size > 63)
+                       return -EINVAL;
+
                params->size = 1ULL << params->size;
        } else {
                params->size++;
@@ -2243,7 +2282,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
        int i, err;
 
        /* Get the SFDP header. */
-       err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
+       err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
        if (err < 0)
                return err;
 
index 13f0f219d8aa83ab0ce52823f6ddb697d58c316b..a13a4896a8bddad19ae48f8c58bbaf2f3c8dce84 100644 (file)
 /* FLEXCAN hardware feature flags
  *
  * Below is some version info we got:
- *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT Memory err RTR re-
- *                                Filter? connected?  detection  ception in MB
- *   MX25  FlexCAN2  03.00.00.00     no        no         no        no
- *   MX28  FlexCAN2  03.00.04.00    yes       yes         no        no
- *   MX35  FlexCAN2  03.00.00.00     no        no         no        no
- *   MX53  FlexCAN2  03.00.00.00    yes        no         no        no
- *   MX6s  FlexCAN3  10.00.12.00    yes       yes         no       yes
- *   VF610 FlexCAN3  ?               no       yes        yes       yes?
+ *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
+ *                                Filter? connected?  Passive detection  ception in MB
+ *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
+ *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
+ *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
-#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE        BIT(1) /* [TR]WRN_INT not connected */
 #define FLEXCAN_QUIRK_DISABLE_RXFG     BIT(2) /* Disable RX FIFO Global mask */
 #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
 #define FLEXCAN_QUIRK_DISABLE_MECR     BIT(4) /* Disable Memory error detection */
 #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP        BIT(5) /* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE        BIT(6) /* No interrupt for error passive */
 
 /* Structure of the message buffer */
 struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
 };
 
 static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
-       .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
+       .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
-static const struct flexcan_devtype_data fsl_imx28_devtype_data;
+static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
+       .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
 
 static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
        .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
-               FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+               FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
 static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
 }
 #endif
 
+static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->regs;
+       u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
+
+       flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->regs;
+       u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
+
+       flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
 static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
 {
        if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
        struct flexcan_regs __iomem *regs = priv->regs;
        irqreturn_t handled = IRQ_NONE;
        u32 reg_iflag1, reg_esr;
+       enum can_state last_state = priv->can.state;
 
        reg_iflag1 = flexcan_read(&regs->iflag1);
 
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
        }
 
-       /* state change interrupt */
-       if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+       /* state change interrupt or broken error state quirk fix is enabled */
+       if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
+           (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+                                          FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
                flexcan_irq_state(dev, reg_esr);
 
        /* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
            (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
                flexcan_irq_bus_err(dev, reg_esr);
 
+       /* availability of error interrupt among state transitions in case
+        * bus error reporting is de-activated and
+        * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
+        *  +--------------------------------------------------------------+
+        *  | +----------------------------------------------+ [stopped /  |
+        *  | |                                              |  sleeping] -+
+        *  +-+-> active <-> warning <-> passive -> bus off -+
+        *        ___________^^^^^^^^^^^^_______________________________
+        *        disabled(1)  enabled             disabled
+        *
+        * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
+        */
+       if ((last_state != priv->can.state) &&
+           (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
+           !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
+               switch (priv->can.state) {
+               case CAN_STATE_ERROR_ACTIVE:
+                       if (priv->devtype_data->quirks &
+                           FLEXCAN_QUIRK_BROKEN_WERR_STATE)
+                               flexcan_error_irq_enable(priv);
+                       else
+                               flexcan_error_irq_disable(priv);
+                       break;
+
+               case CAN_STATE_ERROR_WARNING:
+                       flexcan_error_irq_enable(priv);
+                       break;
+
+               case CAN_STATE_ERROR_PASSIVE:
+               case CAN_STATE_BUS_OFF:
+                       flexcan_error_irq_disable(priv);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
        return handled;
 }
 
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
         * on most Flexcan cores, too. Otherwise we don't get
         * any error warning or passive interrupts.
         */
-       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE ||
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
            priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
                reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
        else
index 68ef0a4cd82153cd65699ab0a4d80a5a201393e6..b0c80859f7467ef32a2683a506757c41ad13cb28 100644 (file)
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
 
        /* enter the selected mode */
        mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
-       if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
                mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
        else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
                mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
        priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
                                       CAN_CTRLMODE_LISTENONLY |
                                       CAN_CTRLMODE_LOOPBACK |
-                                      CAN_CTRLMODE_PRESUME_ACK |
                                       CAN_CTRLMODE_3_SAMPLES;
        priv->base = addr;
        priv->clk = clk;
index be928ce62d32e01c9d6ad9ac1fbcb46916f492ec..9fdb0f0bfa06a00a2ade5e74daef15798ab7d4a1 100644 (file)
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
                }
 
                cf->can_id = id & ESD_IDMASK;
-               cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+               cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
 
                if (id & ESD_EXTID)
                        cf->can_id |= CAN_EFF_FLAG;
index afcc1312dbaf8f67bce640e17dbc20c06592dc45..68ac3e88a8cecbe5b4a58da8491756ad5c26039a 100644 (file)
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
 
                gs_free_tx_context(txc);
 
+               atomic_dec(&dev->active_tx_urbs);
+
                netif_wake_queue(netdev);
        }
 
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
                          urb->transfer_buffer_length,
                          urb->transfer_buffer,
                          urb->transfer_dma);
-
-       atomic_dec(&dev->active_tx_urbs);
-
-       if (!netif_device_present(netdev))
-               return;
-
-       if (netif_queue_stopped(netdev))
-               netif_wake_queue(netdev);
 }
 
 static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
index 18cc529fb807a4ad88fc06a201b18456040c5859..9b18d96ef52633ab34bb5ff39f4f643023dc308a 100644 (file)
@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
 #define CMD_RESET_ERROR_COUNTER                49
 #define CMD_TX_ACKNOWLEDGE             50
 #define CMD_CAN_ERROR_EVENT            51
+#define CMD_FLUSH_QUEUE_REPLY          68
 
 #define CMD_LEAF_USB_THROTTLE          77
 #define CMD_LEAF_LOG_MESSAGE           106
@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
                        goto warn;
                break;
 
+       case CMD_FLUSH_QUEUE_REPLY:
+               if (dev->family != KVASER_LEAF)
+                       goto warn;
+               break;
+
        default:
 warn:          dev_warn(dev->udev->dev.parent,
                         "Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
        if (err)
                netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
 
-       if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+       err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
+       if (err)
                netdev_warn(netdev, "Cannot reset card, error %d\n", err);
 
        err = kvaser_usb_stop_chip(priv);
index dce7fa57eb553a477faadd3f15d6f0e1dcf394af..f123ed57630d59815156c46b750d987bb0171966 100644 (file)
@@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds)
 
 static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
 {
-       /* Use the same MAC Address as FD Pause frames for all ports */
-       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
+       u16 val = addr[0] << 8 | addr[1];
+
+       /* The multicast bit is always transmitted as a zero, so the switch uses
+        * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
+        */
+       val &= 0xfeff;
+
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
        REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
        REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
 
index c6678aa9b4ef0716890f8ace36cda4cf609de045..d74c7335c512df11d75dbfc8c89e030c51ad1ee8 100644 (file)
@@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
        };
        int i, err;
 
+       /* DSA and CPU ports have to be members of multiple vlans */
+       if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+               return 0;
+
        if (!vid_begin)
                return -EOPNOTSUPP;
 
@@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
        if (chip->irq > 0) {
                if (chip->info->g2_irqs > 0)
                        mv88e6xxx_g2_irq_free(chip);
+               mutex_lock(&chip->reg_lock);
                mv88e6xxx_g1_irq_free(chip);
+               mutex_unlock(&chip->reg_lock);
        }
 }
 
index b1212debc2e1c993f662ee35dcbbf6b5e5e3ac54..967020fb26ee17a6995a7ae11df668e8ab4fe9f0 100644 (file)
@@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev,
 {
        struct ena_adapter *adapter = netdev_priv(netdev);
 
-       channels->max_rx = ENA_MAX_NUM_IO_QUEUES;
-       channels->max_tx = ENA_MAX_NUM_IO_QUEUES;
+       channels->max_rx = adapter->num_queues;
+       channels->max_tx = adapter->num_queues;
        channels->max_other = 0;
        channels->max_combined = 0;
        channels->rx_count = adapter->num_queues;
index f7dc22f65d9f64ac50cd641d30e4d83f88fe5bba..c6bd5e24005d02a6c7098cdaf492e5152eac2c08 100644 (file)
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
                u64_stats_update_begin(&rx_ring->syncp);
                rx_ring->rx_stats.bad_csum++;
                u64_stats_update_end(&rx_ring->syncp);
-               netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+               netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
                          "RX IPv4 header checksum error\n");
                return;
        }
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
                        u64_stats_update_begin(&rx_ring->syncp);
                        rx_ring->rx_stats.bad_csum++;
                        u64_stats_update_end(&rx_ring->syncp);
-                       netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+                       netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
                                  "RX L4 checksum error\n");
                        skb->ip_summed = CHECKSUM_NONE;
                        return;
@@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
        if (ena_dev->mem_bar)
                devm_iounmap(&pdev->dev, ena_dev->mem_bar);
 
-       devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+       if (ena_dev->reg_bar)
+               devm_iounmap(&pdev->dev, ena_dev->reg_bar);
 
        release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
        pci_release_selected_regions(pdev, release_bars);
index 214986436ece5a103e6452218cf34719d223b820..57e796870595bb9a305a7579154b0dbd1cbeec60 100644 (file)
 
 #define AQ_CFG_FORCE_LEGACY_INT 0U
 
-#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF   1U
-#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
+#define AQ_CFG_INTERRUPT_MODERATION_OFF                0
+#define AQ_CFG_INTERRUPT_MODERATION_ON         1
+#define AQ_CFG_INTERRUPT_MODERATION_AUTO       0xFFFFU
+
+#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
+
 #define AQ_CFG_IRQ_MASK                      0x1FFU
 
 #define AQ_CFG_VECS_MAX   8U
 
 #define AQ_CFG_SKB_FRAGS_MAX   32U
 
+/* Number of descriptors available in one ring to resume this ring queue
+ */
+#define AQ_CFG_RESTART_DESC_THRES   (AQ_CFG_SKB_FRAGS_MAX * 2)
+
 #define AQ_CFG_NAPI_WEIGHT     64U
 
 #define AQ_CFG_MULTICAST_ADDRESS_MAX     32U
index a761e91471dfd264f3aba944e5324f86d4cd9e12..d5e99b46887061a216652ca150897bba49f16223 100644 (file)
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
        return aq_nic_set_link_ksettings(aq_nic, cmd);
 }
 
-/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
-static const unsigned int aq_ethtool_stat_queue_lines = 5U;
-static const unsigned int aq_ethtool_stat_queue_chars =
-       5U * ETH_GSTRING_LEN;
 static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
        "InPackets",
        "InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
        "InOctetsDma",
        "OutOctetsDma",
        "InDroppedDma",
-       "Queue[0] InPackets",
-       "Queue[0] OutPackets",
-       "Queue[0] InJumboPackets",
-       "Queue[0] InLroPackets",
-       "Queue[0] InErrors",
-       "Queue[1] InPackets",
-       "Queue[1] OutPackets",
-       "Queue[1] InJumboPackets",
-       "Queue[1] InLroPackets",
-       "Queue[1] InErrors",
-       "Queue[2] InPackets",
-       "Queue[2] OutPackets",
-       "Queue[2] InJumboPackets",
-       "Queue[2] InLroPackets",
-       "Queue[2] InErrors",
-       "Queue[3] InPackets",
-       "Queue[3] OutPackets",
-       "Queue[3] InJumboPackets",
-       "Queue[3] InLroPackets",
-       "Queue[3] InErrors",
-       "Queue[4] InPackets",
-       "Queue[4] OutPackets",
-       "Queue[4] InJumboPackets",
-       "Queue[4] InLroPackets",
-       "Queue[4] InErrors",
-       "Queue[5] InPackets",
-       "Queue[5] OutPackets",
-       "Queue[5] InJumboPackets",
-       "Queue[5] InLroPackets",
-       "Queue[5] InErrors",
-       "Queue[6] InPackets",
-       "Queue[6] OutPackets",
-       "Queue[6] InJumboPackets",
-       "Queue[6] InLroPackets",
-       "Queue[6] InErrors",
-       "Queue[7] InPackets",
-       "Queue[7] OutPackets",
-       "Queue[7] InJumboPackets",
-       "Queue[7] InLroPackets",
-       "Queue[7] InErrors",
+};
+
+static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
+       "Queue[%d] InPackets",
+       "Queue[%d] OutPackets",
+       "Queue[%d] Restarts",
+       "Queue[%d] InJumboPackets",
+       "Queue[%d] InLroPackets",
+       "Queue[%d] InErrors",
 };
 
 static void aq_ethtool_stats(struct net_device *ndev,
                             struct ethtool_stats *stats, u64 *data)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
 
-/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
-       BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
-       memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
+       memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
+                               ARRAY_SIZE(aq_ethtool_queue_stat_names) *
+                               cfg->vecs) * sizeof(u64));
        aq_nic_get_stats(aq_nic, data);
 }
 
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
 
        strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
-               (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
+       drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
+               cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = regs_count;
        drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
 static void aq_ethtool_get_strings(struct net_device *ndev,
                                   u32 stringset, u8 *data)
 {
+       int i, si;
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
        struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
-
-       if (stringset == ETH_SS_STATS)
-               memcpy(data, *aq_ethtool_stat_names,
-                      sizeof(aq_ethtool_stat_names) -
-                      (AQ_CFG_VECS_MAX - cfg->vecs) *
-                      aq_ethtool_stat_queue_chars);
+       u8 *p = data;
+
+       if (stringset == ETH_SS_STATS) {
+               memcpy(p, *aq_ethtool_stat_names,
+                      sizeof(aq_ethtool_stat_names));
+               p = p + sizeof(aq_ethtool_stat_names);
+               for (i = 0; i < cfg->vecs; i++) {
+                       for (si = 0;
+                               si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
+                               si++) {
+                               snprintf(p, ETH_GSTRING_LEN,
+                                        aq_ethtool_queue_stat_names[si], i);
+                               p += ETH_GSTRING_LEN;
+                       }
+               }
+       }
 }
 
 static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
 
        switch (stringset) {
        case ETH_SS_STATS:
-               ret = ARRAY_SIZE(aq_ethtool_stat_names) -
-                       (AQ_CFG_VECS_MAX - cfg->vecs) *
-                       aq_ethtool_stat_queue_lines;
+               ret = ARRAY_SIZE(aq_ethtool_stat_names) +
+                       cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
                break;
        default:
                ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
        return err;
 }
 
+int aq_ethtool_get_coalesce(struct net_device *ndev,
+                           struct ethtool_coalesce *coal)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+       if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
+           cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
+               coal->rx_coalesce_usecs = cfg->rx_itr;
+               coal->tx_coalesce_usecs = cfg->tx_itr;
+               coal->rx_max_coalesced_frames = 0;
+               coal->tx_max_coalesced_frames = 0;
+       } else {
+               coal->rx_coalesce_usecs = 0;
+               coal->tx_coalesce_usecs = 0;
+               coal->rx_max_coalesced_frames = 1;
+               coal->tx_max_coalesced_frames = 1;
+       }
+       return 0;
+}
+
+int aq_ethtool_set_coalesce(struct net_device *ndev,
+                           struct ethtool_coalesce *coal)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+       /* This is not yet supported
+        */
+       if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
+               return -EOPNOTSUPP;
+
+       /* Atlantic only supports timing based coalescing
+        */
+       if (coal->rx_max_coalesced_frames > 1 ||
+           coal->rx_coalesce_usecs_irq ||
+           coal->rx_max_coalesced_frames_irq)
+               return -EOPNOTSUPP;
+
+       if (coal->tx_max_coalesced_frames > 1 ||
+           coal->tx_coalesce_usecs_irq ||
+           coal->tx_max_coalesced_frames_irq)
+               return -EOPNOTSUPP;
+
+       /* We do not support frame counting. Check this
+        */
+       if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
+               return -EOPNOTSUPP;
+       if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
+               return -EOPNOTSUPP;
+
+       if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
+           coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
+               return -EINVAL;
+
+       cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
+
+       cfg->rx_itr = coal->rx_coalesce_usecs;
+       cfg->tx_itr = coal->tx_coalesce_usecs;
+
+       return aq_nic_update_interrupt_moderation_settings(aq_nic);
+}
+
 const struct ethtool_ops aq_ethtool_ops = {
        .get_link            = aq_ethtool_get_link,
        .get_regs_len        = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
        .get_ethtool_stats   = aq_ethtool_stats,
        .get_link_ksettings  = aq_ethtool_get_link_ksettings,
        .set_link_ksettings  = aq_ethtool_set_link_ksettings,
+       .get_coalesce        = aq_ethtool_get_coalesce,
+       .set_coalesce        = aq_ethtool_set_coalesce,
 };
index bf9b3f020e106cb07fd7630073f146d7f54ccfa9..0207927dc8a6ab4ac76c46fb17669b7e50e7ae1e 100644 (file)
@@ -151,8 +151,7 @@ struct aq_hw_ops {
                                     [ETH_ALEN],
                                     u32 count);
 
-       int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
-                                          bool itr_enabled);
+       int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
 
        int (*hw_rss_set)(struct aq_hw_s *self,
                          struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
        int (*hw_get_regs)(struct aq_hw_s *self,
                           struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
 
+       int (*hw_update_stats)(struct aq_hw_s *self);
+
        int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
                               unsigned int *p_count);
 
index 6ac9e2602d6d8ea1fefd0de613ee633b905cbd8b..483e97691eeae2de4604e49cdb8fd8d60fb0dda4 100644 (file)
@@ -16,6 +16,7 @@
 #include "aq_pci_func.h"
 #include "aq_nic_internal.h"
 
+#include <linux/moduleparam.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/timer.h>
 #include <linux/tcp.h>
 #include <net/ip.h>
 
+static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
+module_param_named(aq_itr, aq_itr, uint, 0644);
+MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
+
+static unsigned int aq_itr_tx;
+module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
+
+static unsigned int aq_itr_rx;
+module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+
 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
 {
        struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
 
        cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
 
-       cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
-       cfg->itr = cfg->is_interrupt_moderation ?
-               AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
+       cfg->itr = aq_itr;
+       cfg->tx_itr = aq_itr_tx;
+       cfg->rx_itr = aq_itr_rx;
 
        cfg->is_rss = AQ_CFG_IS_RSS_DEF;
        cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -119,6 +132,37 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
        return 0;
 }
 
+static int aq_nic_update_link_status(struct aq_nic_s *self)
+{
+       int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
+
+       if (err)
+               return err;
+
+       if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
+               pr_info("%s: link change old %d new %d\n",
+                       AQ_CFG_DRV_NAME, self->link_status.mbps,
+                       self->aq_hw->aq_link_status.mbps);
+               aq_nic_update_interrupt_moderation_settings(self);
+       }
+
+       self->link_status = self->aq_hw->aq_link_status;
+       if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
+               aq_utils_obj_set(&self->header.flags,
+                                AQ_NIC_FLAG_STARTED);
+               aq_utils_obj_clear(&self->header.flags,
+                                  AQ_NIC_LINK_DOWN);
+               netif_carrier_on(self->ndev);
+               netif_tx_wake_all_queues(self->ndev);
+       }
+       if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
+               netif_carrier_off(self->ndev);
+               netif_tx_disable(self->ndev);
+               aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+       }
+       return 0;
+}
+
 static void aq_nic_service_timer_cb(unsigned long param)
 {
        struct aq_nic_s *self = (struct aq_nic_s *)param;
@@ -131,25 +175,12 @@ static void aq_nic_service_timer_cb(unsigned long param)
        if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
                goto err_exit;
 
-       err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
-       if (err < 0)
+       err = aq_nic_update_link_status(self);
+       if (err)
                goto err_exit;
 
-       self->link_status = self->aq_hw->aq_link_status;
-
-       self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
-                   self->aq_nic_cfg.is_interrupt_moderation);
-
-       if (self->link_status.mbps) {
-               aq_utils_obj_set(&self->header.flags,
-                                AQ_NIC_FLAG_STARTED);
-               aq_utils_obj_clear(&self->header.flags,
-                                  AQ_NIC_LINK_DOWN);
-               netif_carrier_on(self->ndev);
-       } else {
-               netif_carrier_off(self->ndev);
-               aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
-       }
+       if (self->aq_hw_ops.hw_update_stats)
+               self->aq_hw_ops.hw_update_stats(self->aq_hw);
 
        memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
        memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -214,7 +245,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
        SET_NETDEV_DEV(ndev, dev);
 
        ndev->if_port = port;
-       ndev->min_mtu = ETH_MIN_MTU;
        self->ndev = ndev;
 
        self->aq_pci_func = aq_pci_func;
@@ -241,7 +271,6 @@ err_exit:
 int aq_nic_ndev_register(struct aq_nic_s *self)
 {
        int err = 0;
-       unsigned int i = 0U;
 
        if (!self->ndev) {
                err = -EINVAL;
@@ -263,8 +292,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
 
        netif_carrier_off(self->ndev);
 
-       for (i = AQ_CFG_VECS_MAX; i--;)
-               aq_nic_ndev_queue_stop(self, i);
+       netif_tx_disable(self->ndev);
 
        err = register_netdev(self->ndev);
        if (err < 0)
@@ -283,6 +311,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
        self->ndev->features = aq_hw_caps->hw_features;
        self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
        self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
+       self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
 
        return 0;
 }
@@ -318,12 +347,9 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
                err = -EINVAL;
                goto err_exit;
        }
-       if (netif_running(ndev)) {
-               unsigned int i;
-
-               for (i = AQ_CFG_VECS_MAX; i--;)
-                       netif_stop_subqueue(ndev, i);
-       }
+       if (netif_running(ndev))
+               netif_tx_disable(ndev);
+       netif_carrier_off(self->ndev);
 
        for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
                self->aq_vecs++) {
@@ -383,16 +409,6 @@ err_exit:
        return err;
 }
 
-void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
-{
-       netif_start_subqueue(self->ndev, idx);
-}
-
-void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
-{
-       netif_stop_subqueue(self->ndev, idx);
-}
-
 int aq_nic_start(struct aq_nic_s *self)
 {
        struct aq_vec_s *aq_vec = NULL;
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
-       err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
-                           self->aq_nic_cfg.is_interrupt_moderation);
-       if (err < 0)
+       err = aq_nic_update_interrupt_moderation_settings(self);
+       if (err)
                goto err_exit;
        setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
                    (unsigned long)self);
@@ -451,10 +466,6 @@ int aq_nic_start(struct aq_nic_s *self)
                        goto err_exit;
        }
 
-       for (i = 0U, aq_vec = self->aq_vec[0];
-               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
-               aq_nic_ndev_queue_start(self, i);
-
        err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
        if (err < 0)
                goto err_exit;
@@ -463,6 +474,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
+       netif_tx_start_all_queues(self->ndev);
+
 err_exit:
        return err;
 }
@@ -475,6 +488,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int frag_count = 0U;
        unsigned int dx = ring->sw_tail;
+       struct aq_ring_buff_s *first = NULL;
        struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
 
        if (unlikely(skb_is_gso(skb))) {
@@ -485,6 +499,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
                dx_buff->len_l4 = tcp_hdrlen(skb);
                dx_buff->mss = skb_shinfo(skb)->gso_size;
                dx_buff->is_txc = 1U;
+               dx_buff->eop_index = 0xffffU;
 
                dx_buff->is_ipv6 =
                        (ip_hdr(skb)->version == 6) ? 1U : 0U;
@@ -504,6 +519,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
        if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
                goto exit;
 
+       first = dx_buff;
        dx_buff->len_pkt = skb->len;
        dx_buff->is_sop = 1U;
        dx_buff->is_mapped = 1U;
@@ -532,40 +548,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
 
        for (; nr_frags--; ++frag_count) {
                unsigned int frag_len = 0U;
+               unsigned int buff_offset = 0U;
+               unsigned int buff_size = 0U;
                dma_addr_t frag_pa;
                skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
 
                frag_len = skb_frag_size(frag);
-               frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
-                                          frag_len, DMA_TO_DEVICE);
 
-               if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
-                       goto mapping_error;
+               while (frag_len) {
+                       if (frag_len > AQ_CFG_TX_FRAME_MAX)
+                               buff_size = AQ_CFG_TX_FRAME_MAX;
+                       else
+                               buff_size = frag_len;
+
+                       frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
+                                                  frag,
+                                                  buff_offset,
+                                                  buff_size,
+                                                  DMA_TO_DEVICE);
+
+                       if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
+                                                      frag_pa)))
+                               goto mapping_error;
 
-               while (frag_len > AQ_CFG_TX_FRAME_MAX) {
                        dx = aq_ring_next_dx(ring, dx);
                        dx_buff = &ring->buff_ring[dx];
 
                        dx_buff->flags = 0U;
-                       dx_buff->len = AQ_CFG_TX_FRAME_MAX;
+                       dx_buff->len = buff_size;
                        dx_buff->pa = frag_pa;
                        dx_buff->is_mapped = 1U;
+                       dx_buff->eop_index = 0xffffU;
+
+                       frag_len -= buff_size;
+                       buff_offset += buff_size;
 
-                       frag_len -= AQ_CFG_TX_FRAME_MAX;
-                       frag_pa += AQ_CFG_TX_FRAME_MAX;
                        ++ret;
                }
-
-               dx = aq_ring_next_dx(ring, dx);
-               dx_buff = &ring->buff_ring[dx];
-
-               dx_buff->flags = 0U;
-               dx_buff->len = frag_len;
-               dx_buff->pa = frag_pa;
-               dx_buff->is_mapped = 1U;
-               ++ret;
        }
 
+       first->eop_index = dx;
        dx_buff->is_eop = 1U;
        dx_buff->skb = skb;
        goto exit;
@@ -602,7 +624,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
        unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
        unsigned int tc = 0U;
        int err = NETDEV_TX_OK;
-       bool is_nic_in_bad_state;
 
        frags = skb_shinfo(skb)->nr_frags + 1;
 
@@ -613,13 +634,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
                goto err_exit;
        }
 
-       is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
-                                               AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
-                                               (aq_ring_avail_dx(ring) <
-                                               AQ_CFG_SKB_FRAGS_MAX);
+       aq_ring_update_queue_state(ring);
 
-       if (is_nic_in_bad_state) {
-               aq_nic_ndev_queue_stop(self, ring->idx);
+       /* Above status update may stop the queue. Check this. */
+       if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
                err = NETDEV_TX_BUSY;
                goto err_exit;
        }
@@ -631,9 +649,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
                                                      ring,
                                                      frags);
                if (err >= 0) {
-                       if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
-                               aq_nic_ndev_queue_stop(self, ring->idx);
-
                        ++ring->stats.tx.packets;
                        ring->stats.tx.bytes += skb->len;
                }
@@ -645,6 +660,11 @@ err_exit:
        return err;
 }
 
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
+{
+       return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+}
+
 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
 {
        int err = 0;
@@ -693,16 +713,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
 
 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
 {
-       int err = 0;
-
-       if (new_mtu > self->aq_hw_caps.mtu) {
-               err = -EINVAL;
-               goto err_exit;
-       }
        self->aq_nic_cfg.mtu = new_mtu;
 
-err_exit:
-       return err;
+       return 0;
 }
 
 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
@@ -905,9 +918,8 @@ int aq_nic_stop(struct aq_nic_s *self)
        struct aq_vec_s *aq_vec = NULL;
        unsigned int i = 0U;
 
-       for (i = 0U, aq_vec = self->aq_vec[0];
-               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
-               aq_nic_ndev_queue_stop(self, i);
+       netif_tx_disable(self->ndev);
+       netif_carrier_off(self->ndev);
 
        del_timer_sync(&self->service_timer);
 
index 7fc2a5ecb2b7a01f594388e929c7affcb299af6f..4309983acdd6f7502fa05869f79336fd459dc2fe 100644 (file)
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
        u32 vecs;               /* vecs==allocated irqs */
        u32 irq_type;
        u32 itr;
+       u16 rx_itr;
+       u16 tx_itr;
        u32 num_rss_queues;
        u32 mtu;
        u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
        u16 is_mc_list_enabled;
        u16 mc_list_count;
        bool is_autoneg;
-       bool is_interrupt_moderation;
        bool is_polling;
        bool is_rss;
        bool is_lro;
@@ -83,8 +84,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
 int aq_nic_init(struct aq_nic_s *self);
 int aq_nic_cfg_start(struct aq_nic_s *self);
 int aq_nic_ndev_register(struct aq_nic_s *self);
-void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
-void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
 void aq_nic_ndev_free(struct aq_nic_s *self);
 int aq_nic_start(struct aq_nic_s *self);
 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
@@ -106,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
 u32 aq_nic_get_fw_version(struct aq_nic_s *self);
 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
 
 #endif /* AQ_NIC_H */
index 4c6c882c6a1c424238473ea40ecf9f0ebf7cee28..cadaa646c89f4b741382b4beee72c6ec3e3bfc18 100644 (file)
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
        int err = 0;
        unsigned int bar = 0U;
        unsigned int port = 0U;
+       unsigned int numvecs = 0U;
 
        err = pci_enable_device(self->pdev);
        if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
                }
        }
 
-       /*enable interrupts */
+       numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
+       numvecs = min(numvecs, num_online_cpus());
+
+       /* enable interrupts */
 #if !AQ_CFG_FORCE_LEGACY_INT
-       err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
-                             self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
+       err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
 
        if (err < 0) {
                err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
                if (err < 0)
                        goto err_exit;
        }
-#endif
+#endif /* AQ_CFG_FORCE_LEGACY_INT */
 
        /* net device init */
        for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
                aq_nic_ndev_free(self->port[port]);
        }
 
+       if (self->mmio)
+               iounmap(self->mmio);
+
        kfree(self);
 
 err_exit:;
index 4eee1996a8259e561c15a17b9a3792ef79f280e2..0654e0c76bc27cfb8d2e7d5fd809c0f427a4e4bf 100644 (file)
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
        return 0;
 }
 
+static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
+                                      unsigned int t)
+{
+       return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+}
+
+void aq_ring_update_queue_state(struct aq_ring_s *ring)
+{
+       if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
+               aq_ring_queue_stop(ring);
+       else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
+               aq_ring_queue_wake(ring);
+}
+
+void aq_ring_queue_wake(struct aq_ring_s *ring)
+{
+       struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+       if (__netif_subqueue_stopped(ndev, ring->idx)) {
+               netif_wake_subqueue(ndev, ring->idx);
+               ring->stats.tx.queue_restarts++;
+       }
+}
+
+void aq_ring_queue_stop(struct aq_ring_s *ring)
+{
+       struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
+
+       if (!__netif_subqueue_stopped(ndev, ring->idx))
+               netif_stop_subqueue(ndev, ring->idx);
+}
+
 void aq_ring_tx_clean(struct aq_ring_s *self)
 {
        struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
                struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
 
                if (likely(buff->is_mapped)) {
-                       if (unlikely(buff->is_sop))
+                       if (unlikely(buff->is_sop)) {
+                               if (!buff->is_eop &&
+                                   buff->eop_index != 0xffffU &&
+                                   (!aq_ring_dx_in_range(self->sw_head,
+                                               buff->eop_index,
+                                               self->hw_head)))
+                                       break;
+
                                dma_unmap_single(dev, buff->pa, buff->len,
                                                 DMA_TO_DEVICE);
-                       else
+                       } else {
                                dma_unmap_page(dev, buff->pa, buff->len,
                                               DMA_TO_DEVICE);
+                       }
                }
 
                if (unlikely(buff->is_eop))
                        dev_kfree_skb_any(buff->skb);
-       }
-}
 
-static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
-                                              unsigned int t)
-{
-       return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+               buff->pa = 0U;
+               buff->eop_index = 0xffffU;
+       }
 }
 
 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
index 782176c5f4f800f090353ee3184a98918dd1d0a3..5844078764bd90463c632d94ba5d1c45e8a22c3d 100644 (file)
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
        };
        union {
                struct {
-                       u32 len:16;
+                       u16 len;
                        u32 is_ip_cso:1;
                        u32 is_udp_cso:1;
                        u32 is_tcp_cso:1;
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
                        u32 is_cleaned:1;
                        u32 is_error:1;
                        u32 rsvd3:6;
+                       u16 eop_index;
+                       u16 rsvd4;
                };
-               u32 flags;
+               u64 flags;
        };
 };
 
@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
        u64 errors;
        u64 packets;
        u64 bytes;
+       u64 queue_restarts;
 };
 
 union aq_ring_stats_s {
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
 int aq_ring_init(struct aq_ring_s *self);
 void aq_ring_rx_deinit(struct aq_ring_s *self);
 void aq_ring_free(struct aq_ring_s *self);
+void aq_ring_update_queue_state(struct aq_ring_s *ring);
+void aq_ring_queue_wake(struct aq_ring_s *ring);
+void aq_ring_queue_stop(struct aq_ring_s *ring);
 void aq_ring_tx_clean(struct aq_ring_s *self);
 int aq_ring_rx_clean(struct aq_ring_s *self,
                     struct napi_struct *napi,
index ebf588004c4677140934b152eeab13d4d3aecbf7..5fecc9a099ef7fd34d3a36b9ee1eb9c01a3f9fcd 100644 (file)
@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
                        if (ring[AQ_VEC_TX_ID].sw_head !=
                            ring[AQ_VEC_TX_ID].hw_head) {
                                aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
-
-                               if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
-                                   AQ_CFG_SKB_FRAGS_MAX) {
-                                       aq_nic_ndev_queue_start(self->aq_nic,
-                                               ring[AQ_VEC_TX_ID].idx);
-                               }
+                               aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
                                was_tx_cleaned = true;
                        }
 
@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self,
                stats_tx->packets += tx->packets;
                stats_tx->bytes += tx->bytes;
                stats_tx->errors += tx->errors;
+               stats_tx->queue_restarts += tx->queue_restarts;
        }
 }
 
@@ -377,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
        memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
        aq_vec_add_stats(self, &stats_rx, &stats_tx);
 
+       /* This data should mimic aq_ethtool_queue_stat_names structure
+        */
        data[count] += stats_rx.packets;
        data[++count] += stats_tx.packets;
+       data[++count] += stats_tx.queue_restarts;
        data[++count] += stats_rx.jumbo_packets;
        data[++count] += stats_rx.lro_packets;
        data[++count] += stats_rx.errors;
index c5a02df7a48b719a65b169938746d777f3f0b5a0..07b3c49a16a4266b4fb312bb79198f9ba0c60f04 100644 (file)
@@ -765,24 +765,23 @@ err_exit:
        return err;
 }
 
-static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
-                                                bool itr_enabled)
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 {
        unsigned int i = 0U;
+       u32 itr_rx;
 
-       if (itr_enabled && self->aq_nic_cfg->itr) {
-               if (self->aq_nic_cfg->itr != 0xFFFFU) {
+       if (self->aq_nic_cfg->itr) {
+               if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
                        u32 itr_ = (self->aq_nic_cfg->itr >> 1);
 
                        itr_ = min(AQ_CFG_IRQ_MASK, itr_);
 
-                       PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
-                                       (itr_ << 0x10);
+                       itr_rx = 0x80000000U | (itr_ << 0x10);
                } else  {
                        u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
 
                        if (n < self->aq_link_status.mbps) {
-                               PHAL_ATLANTIC_A0->itr_rx = 0U;
+                               itr_rx = 0U;
                        } else {
                                static unsigned int hw_timers_tbl_[] = {
                                        0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
                                        hw_atl_utils_mbps_2_speed_index(
                                                self->aq_link_status.mbps);
 
-                               PHAL_ATLANTIC_A0->itr_rx =
-                                       0x80000000U |
+                               itr_rx = 0x80000000U |
                                        (hw_timers_tbl_[speed_index] << 0x10U);
                        }
 
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
                        aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
                }
        } else {
-               PHAL_ATLANTIC_A0->itr_rx = 0U;
+               itr_rx = 0U;
        }
 
        for (i = HW_ATL_A0_RINGS_MAX; i--;)
-               reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
+               reg_irq_thr_set(self, itr_rx, i);
 
        return aq_hw_err_from_flags(self);
 }
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
        .hw_rss_set                  = hw_atl_a0_hw_rss_set,
        .hw_rss_hash_set             = hw_atl_a0_hw_rss_hash_set,
        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
+       .hw_update_stats             = hw_atl_utils_update_stats,
        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
index 21784cc39dabdb9005a0c4bff26c64b0ac7a5286..ec68c20efcbdb6079b9dba4b8200ad8f1f450233 100644 (file)
@@ -788,39 +788,45 @@ err_exit:
        return err;
 }
 
-static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
-                                                bool itr_enabled)
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 {
        unsigned int i = 0U;
+       u32 itr_tx = 2U;
+       u32 itr_rx = 2U;
 
-       if (itr_enabled && self->aq_nic_cfg->itr) {
+       switch (self->aq_nic_cfg->itr) {
+       case  AQ_CFG_INTERRUPT_MODERATION_ON:
+       case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
                tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
                tdm_tdm_intr_moder_en_set(self, 1U);
                rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
                rdm_rdm_intr_moder_en_set(self, 1U);
 
-               PHAL_ATLANTIC_B0->itr_tx = 2U;
-               PHAL_ATLANTIC_B0->itr_rx = 2U;
+               if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+                       /* HW timers are in 2us units */
+                       int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+                       int tx_min_timer = tx_max_timer / 2;
 
-               if (self->aq_nic_cfg->itr != 0xFFFFU) {
-                       unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
-                       unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
+                       int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+                       int rx_min_timer = rx_max_timer / 2;
 
-                       max_timer = min(0x1FFU, max_timer);
-                       min_timer = min(0xFFU, min_timer);
+                       tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
+                       tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
+                       rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
+                       rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
 
-                       PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
-                       PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
-                       PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
-                       PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
+                       itr_tx |= tx_min_timer << 0x8U;
+                       itr_tx |= tx_max_timer << 0x10U;
+                       itr_rx |= rx_min_timer << 0x8U;
+                       itr_rx |= rx_max_timer << 0x10U;
                } else {
                        static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
-                               {0xffU, 0xffU}, /* 10Gbit */
-                               {0xffU, 0x1ffU}, /* 5Gbit */
-                               {0xffU, 0x1ffU}, /* 5Gbit 5GS */
-                               {0xffU, 0x1ffU}, /* 2.5Gbit */
-                               {0xffU, 0x1ffU}, /* 1Gbit */
-                               {0xffU, 0x1ffU}, /* 100Mbit */
+                               {0xfU, 0xffU}, /* 10Gbit */
+                               {0xfU, 0x1ffU}, /* 5Gbit */
+                               {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+                               {0xfU, 0x1ffU}, /* 2.5Gbit */
+                               {0xfU, 0x1ffU}, /* 1Gbit */
+                               {0xfU, 0x1ffU}, /* 100Mbit */
                        };
 
                        static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
                                        hw_atl_utils_mbps_2_speed_index(
                                                self->aq_link_status.mbps);
 
-                       PHAL_ATLANTIC_B0->itr_tx |=
-                               hw_atl_b0_timers_table_tx_[speed_index]
-                               [0] << 0x8U; /* set min timer value */
-                       PHAL_ATLANTIC_B0->itr_tx |=
-                               hw_atl_b0_timers_table_tx_[speed_index]
-                               [1] << 0x10U; /* set max timer value */
-
-                       PHAL_ATLANTIC_B0->itr_rx |=
-                               hw_atl_b0_timers_table_rx_[speed_index]
-                               [0] << 0x8U; /* set min timer value */
-                       PHAL_ATLANTIC_B0->itr_rx |=
-                               hw_atl_b0_timers_table_rx_[speed_index]
-                               [1] << 0x10U; /* set max timer value */
+                       /* Update user visible ITR settings */
+                       self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
+                                                       [speed_index][1] * 2;
+                       self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
+                                                       [speed_index][1] * 2;
+
+                       itr_tx |= hw_atl_b0_timers_table_tx_
+                                               [speed_index][0] << 0x8U;
+                       itr_tx |= hw_atl_b0_timers_table_tx_
+                                               [speed_index][1] << 0x10U;
+
+                       itr_rx |= hw_atl_b0_timers_table_rx_
+                                               [speed_index][0] << 0x8U;
+                       itr_rx |= hw_atl_b0_timers_table_rx_
+                                               [speed_index][1] << 0x10U;
                }
-       } else {
+               break;
+       case AQ_CFG_INTERRUPT_MODERATION_OFF:
                tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
                tdm_tdm_intr_moder_en_set(self, 0U);
                rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
                rdm_rdm_intr_moder_en_set(self, 0U);
-               PHAL_ATLANTIC_B0->itr_tx = 0U;
-               PHAL_ATLANTIC_B0->itr_rx = 0U;
+               itr_tx = 0U;
+               itr_rx = 0U;
+               break;
        }
 
        for (i = HW_ATL_B0_RINGS_MAX; i--;) {
-               reg_tx_intr_moder_ctrl_set(self,
-                                          PHAL_ATLANTIC_B0->itr_tx, i);
-               reg_rx_intr_moder_ctrl_set(self,
-                                          PHAL_ATLANTIC_B0->itr_rx, i);
+               reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+               reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
        }
 
        return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
        .hw_rss_set                  = hw_atl_b0_hw_rss_set,
        .hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
+       .hw_update_stats             = hw_atl_utils_update_stats,
        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
index f3957e9303405c3f26c9f7f7d6507009d5804534..9aa2c6edfca23276335cd9c4bf23c8528f9305e1 100644 (file)
@@ -16,7 +16,7 @@
 
 #include "../aq_common.h"
 
-#define HW_ATL_B0_MTU_JUMBO (16000U)
+#define HW_ATL_B0_MTU_JUMBO  16352U
 #define HW_ATL_B0_MTU        1514U
 
 #define HW_ATL_B0_TX_RINGS 4U
 
 #define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
 
+#define HW_ATL_INTR_MODER_MAX  0x1FF
+#define HW_ATL_INTR_MODER_MIN  0xFF
+
 /* Hardware tx descriptor */
 struct __packed hw_atl_txd_s {
        u64 buf_addr;
index 4f5ec9a0fbfb82b7bcf25fb234c5d6c3180c544a..1fe016fc4bc704361ca68ee39f3e443715505e8c 100644 (file)
@@ -255,6 +255,15 @@ err_exit:
        return err;
 }
 
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+                              struct hw_aq_atl_utils_mbox_header *pmbox)
+{
+       return hw_atl_utils_fw_downld_dwords(self,
+                                     PHAL_ATLANTIC->mbox_addr,
+                                     (u32 *)(void *)pmbox,
+                                     sizeof(*pmbox) / sizeof(u32));
+}
+
 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
                                 struct hw_aq_atl_utils_mbox *pmbox)
 {
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
        if (err < 0)
                goto err_exit;
 
-       if (pmbox != &PHAL_ATLANTIC->mbox)
-               memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
-
        if (IS_CHIP_FEATURE(REVISION_A0)) {
                unsigned int mtu = self->aq_nic_cfg ?
                                        self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 {
        int err = 0;
        u32 transaction_id = 0;
+       struct hw_aq_atl_utils_mbox_header mbox;
 
        if (state == MPI_RESET) {
-               hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+               hw_atl_utils_mpi_read_mbox(self, &mbox);
 
-               transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
+               transaction_id = mbox.transaction_id;
 
                AQ_HW_WAIT_FOR(transaction_id !=
-                               (hw_atl_utils_mpi_read_stats
-                                       (self, &PHAL_ATLANTIC->mbox),
-                                       PHAL_ATLANTIC->mbox.transaction_id),
-                                       1000U, 100U);
+                               (hw_atl_utils_mpi_read_mbox(self, &mbox),
+                                mbox.transaction_id),
+                              1000U, 100U);
                if (err < 0)
                        goto err_exit;
        }
@@ -351,8 +357,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
                        break;
 
                default:
-                       link_status->mbps = 0U;
-                       break;
+                       return -EBUSY;
                }
        }
 
@@ -493,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
        return 0;
 }
 
+int hw_atl_utils_update_stats(struct aq_hw_s *self)
+{
+       struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+       struct hw_aq_atl_utils_mbox mbox;
+
+       if (!self->aq_link_status.mbps)
+               return 0;
+
+       hw_atl_utils_mpi_read_stats(self, &mbox);
+
+#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
+                       mbox.stats._N_ - hw_self->last_stats._N_)
+
+       AQ_SDELTA(uprc);
+       AQ_SDELTA(mprc);
+       AQ_SDELTA(bprc);
+       AQ_SDELTA(erpt);
+
+       AQ_SDELTA(uptc);
+       AQ_SDELTA(mptc);
+       AQ_SDELTA(bptc);
+       AQ_SDELTA(erpr);
+
+       AQ_SDELTA(ubrc);
+       AQ_SDELTA(ubtc);
+       AQ_SDELTA(mbrc);
+       AQ_SDELTA(mbtc);
+       AQ_SDELTA(bbrc);
+       AQ_SDELTA(bbtc);
+       AQ_SDELTA(dpc);
+
+#undef AQ_SDELTA
+
+       memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+
+       return 0;
+}
+
 int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
                              u64 *data, unsigned int *p_count)
 {
-       struct hw_atl_stats_s *stats = NULL;
+       struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+       struct hw_atl_stats_s *stats = &hw_self->curr_stats;
        int i = 0;
 
-       hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
-
-       stats = &PHAL_ATLANTIC->mbox.stats;
-
        data[i] = stats->uprc + stats->mprc + stats->bprc;
        data[++i] = stats->uprc;
        data[++i] = stats->mprc;
index e0360a6b2202ef5b4ac683a44edcde9bf20ebedc..c99cc690e425bb72907df675e04a196819cfec02 100644 (file)
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
        };
 };
 
-struct __packed hw_aq_atl_utils_mbox {
+struct __packed hw_aq_atl_utils_mbox_header {
        u32 version;
        u32 transaction_id;
-       int error;
+       u32 error;
+};
+
+struct __packed hw_aq_atl_utils_mbox {
+       struct hw_aq_atl_utils_mbox_header header;
        struct hw_atl_stats_s stats;
 };
 
 struct __packed hw_atl_s {
        struct aq_hw_s base;
-       struct hw_aq_atl_utils_mbox mbox;
+       struct hw_atl_stats_s last_stats;
+       struct hw_atl_stats_s curr_stats;
        u64 speed;
-       u32 itr_tx;
-       u32 itr_rx;
        unsigned int chip_features;
        u32 fw_ver_actual;
        atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
 
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
 
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+                              struct hw_aq_atl_utils_mbox_header *pmbox);
+
 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
                                 struct hw_aq_atl_utils_mbox *pmbox);
 
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
 
 int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
 
+int hw_atl_utils_update_stats(struct aq_hw_s *self);
+
 int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
                              u64 *data,
                              unsigned int *p_count);
index c3c53f6cd9e62cb6551533153f455a2cd7285954..83eec9a8c27511b40e39d18b072dc0afc9aacff4 100644 (file)
@@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
        netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
 }
 
+static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
+                                       u64 *tx_bytes, u64 *tx_packets)
+{
+       struct bcm_sysport_tx_ring *ring;
+       u64 bytes = 0, packets = 0;
+       unsigned int start;
+       unsigned int q;
+
+       for (q = 0; q < priv->netdev->num_tx_queues; q++) {
+               ring = &priv->tx_rings[q];
+               do {
+                       start = u64_stats_fetch_begin_irq(&priv->syncp);
+                       bytes = ring->bytes;
+                       packets = ring->packets;
+               } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+
+               *tx_bytes += bytes;
+               *tx_packets += packets;
+       }
+}
+
 static void bcm_sysport_get_stats(struct net_device *dev,
                                  struct ethtool_stats *stats, u64 *data)
 {
@@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev,
        struct bcm_sysport_stats64 *stats64 = &priv->stats64;
        struct u64_stats_sync *syncp = &priv->syncp;
        struct bcm_sysport_tx_ring *ring;
+       u64 tx_bytes = 0, tx_packets = 0;
        unsigned int start;
        int i, j;
 
-       if (netif_running(dev))
+       if (netif_running(dev)) {
                bcm_sysport_update_mib_counters(priv);
+               bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
+               stats64->tx_bytes = tx_bytes;
+               stats64->tx_packets = tx_packets;
+       }
 
        for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
                const struct bcm_sysport_stats *s;
@@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                        continue;
                p += s->stat_offset;
 
-               if (s->stat_sizeof == sizeof(u64))
+               if (s->stat_sizeof == sizeof(u64) &&
+                   s->type == BCM_SYSPORT_STAT_NETDEV64) {
                        do {
                                start = u64_stats_fetch_begin_irq(syncp);
                                data[i] = *(u64 *)p;
                        } while (u64_stats_fetch_retry_irq(syncp, start));
-               else
+               else
                        data[i] = *(u32 *)p;
                j++;
        }
@@ -1716,27 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct bcm_sysport_stats64 *stats64 = &priv->stats64;
-       struct bcm_sysport_tx_ring *ring;
-       u64 tx_packets = 0, tx_bytes = 0;
        unsigned int start;
-       unsigned int q;
 
        netdev_stats_to_stats64(stats, &dev->stats);
 
-       for (q = 0; q < dev->num_tx_queues; q++) {
-               ring = &priv->tx_rings[q];
-               do {
-                       start = u64_stats_fetch_begin_irq(&priv->syncp);
-                       tx_bytes = ring->bytes;
-                       tx_packets = ring->packets;
-               } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
-
-               stats->tx_bytes += tx_bytes;
-               stats->tx_packets += tx_packets;
-       }
-
-       stats64->tx_bytes = stats->tx_bytes;
-       stats64->tx_packets = stats->tx_packets;
+       bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
+                                   &stats->tx_packets);
 
        do {
                start = u64_stats_fetch_begin_irq(&priv->syncp);
index aacec8bc19d5fbf6fe0f007d8a6a59fe2df23c8d..dc5de275352a7f589b0d410b6127bee4ea4b815c 100644 (file)
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
        ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
 };
 
+static struct workqueue_struct *bnxt_pf_wq;
+
 static bool bnxt_vf_pciid(enum board_idx idx)
 {
        return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
        return 0;
 }
 
+static void bnxt_queue_sp_work(struct bnxt *bp)
+{
+       if (BNXT_PF(bp))
+               queue_work(bnxt_pf_wq, &bp->sp_task);
+       else
+               schedule_work(&bp->sp_task);
+}
+
+static void bnxt_cancel_sp_work(struct bnxt *bp)
+{
+       if (BNXT_PF(bp))
+               flush_workqueue(bnxt_pf_wq);
+       else
+               cancel_work_sync(&bp->sp_task);
+}
+
 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
 {
        if (!rxr->bnapi->in_reset) {
                rxr->bnapi->in_reset = true;
                set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
        }
        rxr->rx_next_cons = 0xffff;
 }
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
        default:
                goto async_event_process_exit;
        }
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 async_event_process_exit:
        bnxt_ulp_async_events(bp, cmpl);
        return 0;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
 
                set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
                set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
                break;
 
        case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
        return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
 }
 
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
+                             int timeout)
+{
+       return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
+}
+
 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
 {
        int rc;
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        }
 
        if (link_re_init) {
+               mutex_lock(&bp->link_lock);
                rc = bnxt_update_phy_setting(bp);
+               mutex_unlock(&bp->link_lock);
                if (rc)
                        netdev_warn(bp->dev, "failed to update phy settings\n");
        }
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
                vnic->rx_mask = mask;
 
                set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
        }
 }
 
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
 
        netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
        set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
        if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
            bp->stats_coal_ticks) {
                set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
        }
 bnxt_restart_timer:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
        if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
                bnxt_hwrm_port_qstats(bp);
 
-       /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
-        * must be the last functions to be called before exiting.
-        */
        if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
-               int rc = 0;
+               int rc;
 
+               mutex_lock(&bp->link_lock);
                if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
                                       &bp->sp_event))
                        bnxt_hwrm_phy_qcaps(bp);
 
-               bnxt_rtnl_lock_sp(bp);
-               if (test_bit(BNXT_STATE_OPEN, &bp->state))
-                       rc = bnxt_update_link(bp, true);
-               bnxt_rtnl_unlock_sp(bp);
+               rc = bnxt_update_link(bp, true);
+               mutex_unlock(&bp->link_lock);
                if (rc)
                        netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
                                   rc);
        }
        if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
-               bnxt_rtnl_lock_sp(bp);
-               if (test_bit(BNXT_STATE_OPEN, &bp->state))
-                       bnxt_get_port_module_status(bp);
-               bnxt_rtnl_unlock_sp(bp);
+               mutex_lock(&bp->link_lock);
+               bnxt_get_port_module_status(bp);
+               mutex_unlock(&bp->link_lock);
        }
+       /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
+        * must be the last functions to be called before exiting.
+        */
        if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
                bnxt_reset(bp, false);
 
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
        spin_unlock_bh(&bp->ntp_fltr_lock);
 
        set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 
        return new_fltr->sw_id;
 
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
                if (bp->vxlan_port_cnt == 1) {
                        bp->vxlan_port = ti->port;
                        set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
-                       schedule_work(&bp->sp_task);
+                       bnxt_queue_sp_work(bp);
                }
                break;
        case UDP_TUNNEL_TYPE_GENEVE:
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
                return;
        }
 
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 }
 
 static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
                return;
        }
 
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 }
 
 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        pci_disable_pcie_error_reporting(pdev);
        unregister_netdev(dev);
        bnxt_shutdown_tc(bp);
-       cancel_work_sync(&bp->sp_task);
+       bnxt_cancel_sp_work(bp);
        bp->sp_event = 0;
 
        bnxt_clear_int_mode(bp);
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
                           rc);
                return rc;
        }
+       mutex_init(&bp->link_lock);
 
        rc = bnxt_update_link(bp, false);
        if (rc) {
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
        enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
        enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
 
-       if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+       if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
            speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
                netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
        else
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        else
                device_set_wakeup_capable(&pdev->dev, false);
 
-       if (BNXT_PF(bp))
+       if (BNXT_PF(bp)) {
+               if (!bnxt_pf_wq) {
+                       bnxt_pf_wq =
+                               create_singlethread_workqueue("bnxt_pf_wq");
+                       if (!bnxt_pf_wq) {
+                               dev_err(&pdev->dev, "Unable to create workqueue.\n");
+                               goto init_err_pci_clean;
+                       }
+               }
                bnxt_init_tc(bp);
+       }
 
        rc = register_netdev(dev);
        if (rc)
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
 #endif
 };
 
-module_pci_driver(bnxt_pci_driver);
+static int __init bnxt_init(void)
+{
+       return pci_register_driver(&bnxt_pci_driver);
+}
+
+static void __exit bnxt_exit(void)
+{
+       pci_unregister_driver(&bnxt_pci_driver);
+       if (bnxt_pf_wq)
+               destroy_workqueue(bnxt_pf_wq);
+}
+
+module_init(bnxt_init);
+module_exit(bnxt_exit);
index 7b888d4b2b552a406b8157b3f037a299dc7645a4..c911e69ff25f67d0402b1f06e48de7245cea8a88 100644 (file)
@@ -1290,6 +1290,10 @@ struct bnxt {
        unsigned long           *ntp_fltr_bmap;
        int                     ntp_fltr_count;
 
+       /* To protect link related settings during link changes and
+        * ethtool settings changes.
+        */
+       struct mutex            link_lock;
        struct bnxt_link_info   link_info;
        struct ethtool_eee      eee;
        u32                     lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
 void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
 int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
 int hwrm_send_message(struct bnxt *, void *, u32, int);
 int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
index aa1f3a2c7a7865cce7970e2e7b75f74b218d5df7..fed37cd9ae1d464af02335c072b4b3676b024e0b 100644 (file)
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
        req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
-       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc) {
                u8 *pri2cos = &resp->pri0_cos_queue_id;
                int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
                        }
                }
        }
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
 }
 
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
        int rc, i;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
-       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (rc)
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
                return rc;
+       }
 
        data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
        for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
                        }
                }
        }
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return 0;
 }
 
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
        int rc;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
-       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (rc)
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
                return rc;
+       }
 
        pri_mask = le32_to_cpu(resp->flags);
        pfc->pfc_en = pri_mask;
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return 0;
 }
 
index 8eff05a3e0e4b748c3a0d35661ce7b1901ddef61..3cbe771b335296ce526122e892349e741ddb43b9 100644 (file)
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
        u32 ethtool_speed;
 
        ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+       mutex_lock(&bp->link_lock);
        bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
 
        ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
                        base->port = PORT_FIBRE;
        }
        base->phy_address = link_info->phy_addr;
+       mutex_unlock(&bp->link_lock);
 
        return 0;
 }
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
        if (!BNXT_SINGLE_PF(bp))
                return -EOPNOTSUPP;
 
+       mutex_lock(&bp->link_lock);
        if (base->autoneg == AUTONEG_ENABLE) {
                BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
                                        advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
                rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
 
 set_setting_exit:
+       mutex_unlock(&bp->link_lock);
        return rc;
 }
 
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
        req.dir_ordinal = cpu_to_le16(ordinal);
        req.dir_ext = cpu_to_le16(ext);
        req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
-       rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc == 0) {
                if (index)
                        *index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
                if (data_length)
                        *data_length = le32_to_cpu(output->dir_data_length);
        }
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
 }
 
index d37925a8a65b6d79d96063d0eabcac9bae8dadd9..5ee18660bc33a2572320ac3210029e883841b044 100644 (file)
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
        int rc = 0, vfs_supported;
        int min_rx_rings, min_tx_rings, min_rss_ctxs;
        int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+       int avail_cp, avail_stat;
 
        /* Check if we can enable requested num of vf's. At a mininum
         * we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
         */
        vfs_supported = *num_vfs;
 
+       avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
+       avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+       avail_cp = min_t(int, avail_cp, avail_stat);
+
        while (vfs_supported) {
                min_rx_rings = vfs_supported;
                min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
                            min_rx_rings)
                                rx_ok = 1;
                }
-               if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+               if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+                   avail_cp < min_rx_rings)
                        rx_ok = 0;
 
-               if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+               if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+                   avail_cp >= min_tx_rings)
                        tx_ok = 1;
 
                if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
index ccd699fb2d706ec51252998f7957ae875cd812ea..7dd3d131043a76ba4b83b43f755d2b782c07139e 100644 (file)
@@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
 {
        int rc = 0;
 
+       if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
+           cls_flower->common.chain_index)
+               return -EOPNOTSUPP;
+
        switch (cls_flower->command) {
        case TC_CLSFLOWER_REPLACE:
                rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
index cec94bbb2ea5ad17a7bec44a76d34c0d8f9cd128..8bc126a156e80a1d18366a4b6cc3bd8cd2764954 100644 (file)
@@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
 
        ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
        if (ret)
-               return -ENOMEM;
+               goto error;
 
        n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
        for (i = 0, j = 0; i < cp->max_cid_space; i++) {
index e7f54948173f7e6764b57ba5b99636ba206ba323..5b19826a7e16c731799a4b1cfc4c66d78d91f72d 100644 (file)
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
        struct lio *lio = container_of(ptp, struct lio, ptp_info);
        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
 
-       ns = timespec_to_ns(ts);
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&lio->ptp_lock, flags);
        lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
index 49b80da51ba7307eb0d7fff8116203723b30cc9b..805ab45e9b5ac85b9b09037d89364bb9ac789809 100644 (file)
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
                return true;
        default:
                bpf_warn_invalid_xdp_action(action);
+               /* fall through */
        case XDP_ABORTED:
                trace_xdp_exception(nic->netdev, prog, action);
+               /* fall through */
        case XDP_DROP:
                /* Check if it's a recycled page, if not
                 * unmap the DMA mapping.
index 38c7b21e5d63faaeae25ad45715aecb5a3469578..ede1876a9a191c2c1a63ae5ff3f839deb826d54b 100644 (file)
@@ -374,8 +374,8 @@ struct bufdesc_ex {
 #define FEC_ENET_TS_AVAIL       ((uint)0x00010000)
 #define FEC_ENET_TS_TIMER       ((uint)0x00008000)
 
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
-#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+#define FEC_NAPI_IMASK FEC_ENET_MII
 #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
 
 /* ENET interrupt coalescing macro define */
index 56f56d6ada9cfdfa6f719eb6d9793efb3a614ccd..3dc2d771a222147c89a3814a34d736e0bcf0d5c4 100644 (file)
@@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
        if (int_events == 0)
                return false;
 
-       if (int_events & FEC_ENET_RXF)
+       if (int_events & FEC_ENET_RXF_0)
                fep->work_rx |= (1 << 2);
        if (int_events & FEC_ENET_RXF_1)
                fep->work_rx |= (1 << 0);
        if (int_events & FEC_ENET_RXF_2)
                fep->work_rx |= (1 << 1);
 
-       if (int_events & FEC_ENET_TXF)
+       if (int_events & FEC_ENET_TXF_0)
                fep->work_tx |= (1 << 2);
        if (int_events & FEC_ENET_TXF_1)
                fep->work_tx |= (1 << 0);
@@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id)
        }
 
        if (fep->ptp_clock)
-               fec_ptp_check_pps_event(fep);
-
+               if (fec_ptp_check_pps_event(fep))
+                       ret = IRQ_HANDLED;
        return ret;
 }
 
index 59efbd605416fd5eed5f7987e163b8cf92e6889a..5bcb2238acb2b11368635ee70042130b02f0d7ae 100644 (file)
@@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
 }
 
 static int hnae3_match_n_instantiate(struct hnae3_client *client,
-                                    struct hnae3_ae_dev *ae_dev,
-                                    bool is_reg, bool *matched)
+                                    struct hnae3_ae_dev *ae_dev, bool is_reg)
 {
        int ret;
 
-       *matched = false;
-
        /* check if this client matches the type of ae_dev */
        if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
              hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
                return 0;
        }
-       /* there is a match of client and dev */
-       *matched = true;
 
        /* now, (un-)instantiate client by calling lower layer */
        if (is_reg) {
@@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client)
 {
        struct hnae3_client *client_tmp;
        struct hnae3_ae_dev *ae_dev;
-       bool matched;
        int ret = 0;
 
        mutex_lock(&hnae3_common_lock);
@@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client)
                /* if the client could not be initialized on current port, for
                 * any error reasons, move on to next available port
                 */
-               ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched);
+               ret = hnae3_match_n_instantiate(client, ae_dev, true);
                if (ret)
                        dev_err(&ae_dev->pdev->dev,
                                "match and instantiation failed for port\n");
@@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client);
 void hnae3_unregister_client(struct hnae3_client *client)
 {
        struct hnae3_ae_dev *ae_dev;
-       bool matched;
 
        mutex_lock(&hnae3_common_lock);
        /* un-initialize the client on every matched port */
        list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
-               hnae3_match_n_instantiate(client, ae_dev, false, &matched);
+               hnae3_match_n_instantiate(client, ae_dev, false);
        }
 
        list_del(&client->node);
@@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
        const struct pci_device_id *id;
        struct hnae3_ae_dev *ae_dev;
        struct hnae3_client *client;
-       bool matched;
        int ret = 0;
 
        mutex_lock(&hnae3_common_lock);
@@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
                 * initialize the figure out client instance
                 */
                list_for_each_entry(client, &hnae3_client_list, node) {
-                       ret = hnae3_match_n_instantiate(client, ae_dev, true,
-                                                       &matched);
+                       ret = hnae3_match_n_instantiate(client, ae_dev, true);
                        if (ret)
                                dev_err(&ae_dev->pdev->dev,
                                        "match and instantiation failed\n");
-                       if (matched)
-                               break;
                }
        }
 
@@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
        const struct pci_device_id *id;
        struct hnae3_ae_dev *ae_dev;
        struct hnae3_client *client;
-       bool matched;
 
        mutex_lock(&hnae3_common_lock);
        /* Check if there are matched ae_dev */
@@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
                /* check the client list for the match with this ae_dev type and
                 * un-initialize the figure out client instance
                 */
-               list_for_each_entry(client, &hnae3_client_list, node) {
-                       hnae3_match_n_instantiate(client, ae_dev, false,
-                                                 &matched);
-                       if (matched)
-                               break;
-               }
+               list_for_each_entry(client, &hnae3_client_list, node)
+                       hnae3_match_n_instantiate(client, ae_dev, false);
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
                hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
        const struct pci_device_id *id;
        struct hnae3_ae_algo *ae_algo;
        struct hnae3_client *client;
-       bool matched;
        int ret = 0;
 
        mutex_lock(&hnae3_common_lock);
@@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
         * initialize the figure out client instance
         */
        list_for_each_entry(client, &hnae3_client_list, node) {
-               ret = hnae3_match_n_instantiate(client, ae_dev, true,
-                                               &matched);
+               ret = hnae3_match_n_instantiate(client, ae_dev, true);
                if (ret)
                        dev_err(&ae_dev->pdev->dev,
                                "match and instantiation failed\n");
-               if (matched)
-                       break;
        }
 
 out_err:
@@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
        const struct pci_device_id *id;
        struct hnae3_ae_algo *ae_algo;
        struct hnae3_client *client;
-       bool matched;
 
        mutex_lock(&hnae3_common_lock);
        /* Check if there are matched ae_algo */
@@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
                if (!id)
                        continue;
 
-               list_for_each_entry(client, &hnae3_client_list, node) {
-                       hnae3_match_n_instantiate(client, ae_dev, false,
-                                                 &matched);
-                       if (matched)
-                               break;
-               }
+               list_for_each_entry(client, &hnae3_client_list, node)
+                       hnae3_match_n_instantiate(client, ae_dev, false);
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
                hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
index b2f28ae81273d71a6e31e3a70575570c0aae9ea2..1a01cadfe5f3035d21b35602ccf8c1c016854574 100644 (file)
 #define HNAE3_CLASS_NAME_SIZE 16
 
 #define HNAE3_DEV_INITED_B                     0x0
-#define HNAE_DEV_SUPPORT_ROCE_B                        0x1
+#define HNAE3_DEV_SUPPORT_ROCE_B               0x1
+#define HNAE3_DEV_SUPPORT_DCB_B                        0x2
+
+#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
+               BIT(HNAE3_DEV_SUPPORT_ROCE_B))
+
+#define hnae3_dev_roce_supported(hdev) \
+       hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+
+#define hnae3_dev_dcb_supported(hdev) \
+       hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
 
 #define ring_ptr_move_fw(ring, p) \
        ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -366,12 +376,12 @@ struct hnae3_ae_algo {
 struct hnae3_tc_info {
        u16     tqp_offset;     /* TQP offset from base TQP */
        u16     tqp_count;      /* Total TQPs */
-       u8      up;             /* user priority */
        u8      tc;             /* TC index */
        bool    enable;         /* If this TC is enable or not */
 };
 
 #define HNAE3_MAX_TC           8
+#define HNAE3_MAX_USER_PRIO    8
 struct hnae3_knic_private_info {
        struct net_device *netdev; /* Set by KNIC client when init instance */
        u16 rss_size;              /* Allocated RSS queues */
@@ -379,6 +389,7 @@ struct hnae3_knic_private_info {
        u16 num_desc;
 
        u8 num_tc;                 /* Total number of enabled TCs */
+       u8 prio_tc[HNAE3_MAX_USER_PRIO];  /* TC indexed by prio */
        struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
 
        u16 num_tqps;             /* total number of TQPs in this handle */
index 91ae0135ee503848ec1ed8d3d691632485ad2170..758cf394813126450ce190b24d16af5e41f7b92b 100644 (file)
@@ -238,7 +238,7 @@ struct hclge_tqp_map {
        u8 rsv[18];
 };
 
-#define HCLGE_VECTOR_ELEMENTS_PER_CMD  11
+#define HCLGE_VECTOR_ELEMENTS_PER_CMD  10
 
 enum hclge_int_type {
        HCLGE_INT_TX,
@@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain {
 #define HCLGE_INT_TYPE_S       0
 #define HCLGE_INT_TYPE_M       0x3
 #define HCLGE_TQP_ID_S         2
-#define HCLGE_TQP_ID_M         (0x3fff << HCLGE_TQP_ID_S)
+#define HCLGE_TQP_ID_M         (0x7ff << HCLGE_TQP_ID_S)
+#define HCLGE_INT_GL_IDX_S     13
+#define HCLGE_INT_GL_IDX_M     (0x3 << HCLGE_INT_GL_IDX_S)
        __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
+       u8 vfid;
+       u8 rsv;
 };
 
 #define HCLGE_TC_NUM           8
@@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc {
 
 struct hclge_rx_priv_buff {
        __le16 buf_num[HCLGE_TC_NUM];
-       u8 rsv[8];
+       __le16 shared_buf;
+       u8 rsv[6];
 };
 
 struct hclge_query_version {
@@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue {
 #define HCLGE_DEFAULT_TX_BUF           0x4000   /* 16k  bytes */
 #define HCLGE_TOTAL_PKT_BUF            0x108000 /* 1.03125M bytes */
 #define HCLGE_DEFAULT_DV               0xA000   /* 40k byte */
+#define HCLGE_DEFAULT_NON_DCB_DV       0x7800  /* 30K byte */
 
 #define HCLGE_TYPE_CRQ                 0
 #define HCLGE_TYPE_CSQ                 1
index bb45365fb817eb648dcaa160e2173257e027ad2a..c1cdbfd83bdba9bcef1ab3f46729f9a8bffb09c8 100644 (file)
@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
-       /* Required last entry */
-       {0, }
-};
-
-static const struct pci_device_id roce_pci_tbl[] = {
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
-       /* Required last entry */
+       /* required last entry */
        {0, }
 };
 
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
        hdev->num_tqps = __le16_to_cpu(req->tqp_num);
        hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
 
-       if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) {
+       if (hnae3_dev_roce_supported(hdev)) {
                hdev->num_roce_msix =
                hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
                               HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
@@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev)
        hdev->base_tqp_pid = 0;
        hdev->rss_size_max = 1;
        hdev->rx_buf_len = cfg.rx_buf_len;
-       for (i = 0; i < ETH_ALEN; i++)
-               hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
+       ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
        hdev->hw.mac.media_type = cfg.media_type;
+       hdev->hw.mac.phy_addr = cfg.phy_addr;
        hdev->num_desc = cfg.tqp_desc_num;
        hdev->tm_info.num_pg = 1;
        hdev->tm_info.num_tc = cfg.tc_num;
@@ -1454,7 +1444,11 @@ static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
        tc_num = hclge_get_tc_num(hdev);
        pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
 
-       shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+       if (hnae3_dev_dcb_supported(hdev))
+               shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+       else
+               shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
+
        shared_buf_tc = pfc_enable_num * hdev->mps +
                        (tc_num - pfc_enable_num) * hdev->mps / 2 +
                        hdev->mps;
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
        struct hclge_priv_buf *priv;
        int i;
 
+       /* When DCB is not supported, rx private
+        * buffer is not allocated.
+        */
+       if (!hnae3_dev_dcb_supported(hdev)) {
+               if (!hclge_is_rx_buf_ok(hdev, rx_all))
+                       return -ENOMEM;
+
+               return 0;
+       }
+
        /* step 1, try to alloc private buffer for all enabled tc */
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
                priv = &hdev->priv_buf[i];
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
                                priv->wl.high = 2 * hdev->mps;
                                priv->buf_size = priv->wl.high;
                        }
+               } else {
+                       priv->enable = 0;
+                       priv->wl.low = 0;
+                       priv->wl.high = 0;
+                       priv->buf_size = 0;
                }
        }
 
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
                priv = &hdev->priv_buf[i];
 
-               if (hdev->hw_tc_map & BIT(i))
-                       priv->enable = 1;
+               priv->enable = 0;
+               priv->wl.low = 0;
+               priv->wl.high = 0;
+               priv->buf_size = 0;
+
+               if (!(hdev->hw_tc_map & BIT(i)))
+                       continue;
+
+               priv->enable = 1;
 
                if (hdev->tm_info.hw_pfc_map & BIT(i)) {
                        priv->wl.low = 128;
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
                        cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
        }
 
+       req->shared_buf =
+               cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
+                           (1 << HCLGE_TC0_PRI_BUF_EN_B));
+
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
                dev_err(&hdev->pdev->dev,
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
                return ret;
        }
 
-       ret = hclge_rx_priv_wl_config(hdev);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "could not configure rx private waterline %d\n", ret);
-               return ret;
-       }
+       if (hnae3_dev_dcb_supported(hdev)) {
+               ret = hclge_rx_priv_wl_config(hdev);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "could not configure rx private waterline %d\n",
+                               ret);
+                       return ret;
+               }
 
-       ret = hclge_common_thrd_config(hdev);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "could not configure common threshold %d\n", ret);
-               return ret;
+               ret = hclge_common_thrd_config(hdev);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "could not configure common threshold %d\n",
+                               ret);
+                       return ret;
+               }
        }
 
        ret = hclge_common_wl_config(hdev);
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
        u16 tc_valid[HCLGE_MAX_TC_NUM];
        u16 tc_size[HCLGE_MAX_TC_NUM];
        u32 *rss_indir = NULL;
+       u16 rss_size = 0, roundup_size;
        const u8 *key;
        int i, ret, j;
 
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
        for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
                for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
                        vport[j].rss_indirection_tbl[i] =
-                               i % hdev->rss_size_max;
+                               i % vport[j].alloc_rss_size;
+
+                       /* vport 0 is for PF */
+                       if (j != 0)
+                               continue;
+
+                       rss_size = vport[j].alloc_rss_size;
                        rss_indir[i] = vport[j].rss_indirection_tbl[i];
                }
        }
@@ -2613,42 +2644,32 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
        if (ret)
                goto err;
 
+       /* Each TC have the same queue size, and tc_size set to hardware is
+        * the log2 of roundup power of two of rss_size, the acutal queue
+        * size is limited by indirection table.
+        */
+       if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
+               dev_err(&hdev->pdev->dev,
+                       "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+                       rss_size);
+               ret = -EINVAL;
+               goto err;
+       }
+
+       roundup_size = roundup_pow_of_two(rss_size);
+       roundup_size = ilog2(roundup_size);
+
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
-               if (hdev->hw_tc_map & BIT(i))
-                       tc_valid[i] = 1;
-               else
-                       tc_valid[i] = 0;
+               tc_valid[i] = 0;
 
-               switch (hdev->rss_size_max) {
-               case HCLGE_RSS_TC_SIZE_0:
-                       tc_size[i] = 0;
-                       break;
-               case HCLGE_RSS_TC_SIZE_1:
-                       tc_size[i] = 1;
-                       break;
-               case HCLGE_RSS_TC_SIZE_2:
-                       tc_size[i] = 2;
-                       break;
-               case HCLGE_RSS_TC_SIZE_3:
-                       tc_size[i] = 3;
-                       break;
-               case HCLGE_RSS_TC_SIZE_4:
-                       tc_size[i] = 4;
-                       break;
-               case HCLGE_RSS_TC_SIZE_5:
-                       tc_size[i] = 5;
-                       break;
-               case HCLGE_RSS_TC_SIZE_6:
-                       tc_size[i] = 6;
-                       break;
-               case HCLGE_RSS_TC_SIZE_7:
-                       tc_size[i] = 7;
-                       break;
-               default:
-                       break;
-               }
-               tc_offset[i] = hdev->rss_size_max * i;
+               if (!(hdev->hw_tc_map & BIT(i)))
+                       continue;
+
+               tc_valid[i] = 1;
+               tc_size[i] = roundup_size;
+               tc_offset[i] = rss_size * i;
        }
+
        ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
 
 err:
@@ -2679,7 +2700,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
                               hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
                hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
                               HCLGE_TQP_ID_S,  node->tqp_index);
+               hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+                              HCLGE_INT_GL_IDX_S,
+                              hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
                req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+               req->vfid = vport->vport_id;
 
                if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
                        req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2763,8 +2788,12 @@ static int hclge_unmap_ring_from_vector(
                               hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
                hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
                               HCLGE_TQP_ID_S,  node->tqp_index);
+               hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+                              HCLGE_INT_GL_IDX_S,
+                              hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
 
                req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+               req->vfid = vport->vport_id;
 
                if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
                        req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2778,7 +2807,7 @@ static int hclge_unmap_ring_from_vector(
                        }
                        i = 0;
                        hclge_cmd_setup_basic_desc(&desc,
-                                                  HCLGE_OPC_ADD_RING_TO_VECTOR,
+                                                  HCLGE_OPC_DEL_RING_TO_VECTOR,
                                                   false);
                        req->int_vector_id = vector_id;
                }
@@ -3665,6 +3694,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
 {
 #define HCLGE_VLAN_TYPE_VF_TABLE   0
 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
+       struct hnae3_handle *handle;
        int ret;
 
        ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
@@ -3674,8 +3704,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
 
        ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
                                         true);
+       if (ret)
+               return ret;
 
-       return ret;
+       handle = &hdev->vport[0].nic;
+       return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
 }
 
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -3920,8 +3953,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
                                goto err;
 
                        if (hdev->roce_client &&
-                           hnae_get_bit(hdev->ae_dev->flag,
-                                        HNAE_DEV_SUPPORT_ROCE_B)) {
+                           hnae3_dev_roce_supported(hdev)) {
                                struct hnae3_client *rc = hdev->roce_client;
 
                                ret = hclge_init_roce_base_info(vport);
@@ -3944,8 +3976,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
 
                        break;
                case HNAE3_CLIENT_ROCE:
-                       if (hnae_get_bit(hdev->ae_dev->flag,
-                                        HNAE_DEV_SUPPORT_ROCE_B)) {
+                       if (hnae3_dev_roce_supported(hdev)) {
                                hdev->roce_client = client;
                                vport->roce.client = client;
                        }
@@ -4057,7 +4088,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 {
        struct pci_dev *pdev = ae_dev->pdev;
-       const struct pci_device_id *id;
        struct hclge_dev *hdev;
        int ret;
 
@@ -4072,10 +4102,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        hdev->ae_dev = ae_dev;
        ae_dev->priv = hdev;
 
-       id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
-       if (id)
-               hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
-
        ret = hclge_pci_init(hdev);
        if (ret) {
                dev_err(&pdev->dev, "PCI init failed\n");
@@ -4138,12 +4164,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
-       ret = hclge_rss_init_hw(hdev);
-       if (ret) {
-               dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
-               return  ret;
-       }
-
        ret = hclge_init_vlan_config(hdev);
        if (ret) {
                dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -4156,6 +4176,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
+       ret = hclge_rss_init_hw(hdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+               return ret;
+       }
+
        setup_timer(&hdev->service_timer, hclge_service_timer,
                    (unsigned long)hdev);
        INIT_WORK(&hdev->service_task, hclge_service_task);
index edb10ad075eb2520a3f671d8062f043ddb1c8280..9fcfd9395424538870f4b837ad9ade21476867ae 100644 (file)
@@ -176,7 +176,6 @@ struct hclge_pg_info {
 struct hclge_tc_info {
        u8 tc_id;
        u8 tc_sch_mode;         /* 0: sp; 1: dwrr */
-       u8 up;
        u8 pgid;
        u32 bw_limit;
 };
@@ -197,6 +196,7 @@ struct hclge_tm_info {
        u8 num_tc;
        u8 num_pg;      /* It must be 1 if vNET-Base schd */
        u8 pg_dwrr[HCLGE_PG_NUM];
+       u8 prio_tc[HNAE3_MAX_USER_PRIO];
        struct hclge_pg_info pg_info[HCLGE_PG_NUM];
        struct hclge_tc_info tc_info[HNAE3_MAX_TC];
        enum hclge_fc_mode fc_mode;
@@ -477,6 +477,7 @@ struct hclge_vport {
        u8  rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
        /* User configured lookup table entries */
        u8  rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+       u16 alloc_rss_size;
 
        u16 qs_offset;
        u16 bw_limit;           /* VSI BW Limit (0 = disabled) */
index 1c577d268f008b4d39b35164b9205ba35eaefdaa..73a75d7cc5517819b8135324dbc481a0aa993423 100644 (file)
@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
 {
        u8 tc;
 
-       for (tc = 0; tc < hdev->tm_info.num_tc; tc++)
-               if (hdev->tm_info.tc_info[tc].up == pri_id)
-                       break;
+       tc = hdev->tm_info.prio_tc[pri_id];
 
        if (tc >= hdev->tm_info.num_tc)
                return -EINVAL;
@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
 
-       for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) {
+       for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
                ret = hclge_fill_pri_array(hdev, pri, pri_id);
                if (ret)
                        return ret;
@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
 
        shap_cfg_cmd->pg_id = pg_id;
 
-       hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
-       hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
-       hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
-       hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
-       hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
+       hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
+       hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
+       hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
+       hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
+       hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
 
        shap_cfg_cmd->pri_id = pri_id;
 
-       hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
-       hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
-       hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
-       hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
-       hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
+       hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
+       hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
+       hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
+       hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
+       hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
                        kinfo->num_tqps / kinfo->num_tc);
        vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
        vport->dwrr = 100;  /* 100 percent as init */
+       vport->alloc_rss_size = kinfo->rss_size;
 
        for (i = 0; i < kinfo->num_tc; i++) {
                if (hdev->hw_tc_map & BIT(i)) {
@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
                        kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
                        kinfo->tc_info[i].tqp_count = kinfo->rss_size;
                        kinfo->tc_info[i].tc = i;
-                       kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
                } else {
                        /* Set to default queue if TC is disable */
                        kinfo->tc_info[i].enable = false;
                        kinfo->tc_info[i].tqp_offset = 0;
                        kinfo->tc_info[i].tqp_count = 1;
                        kinfo->tc_info[i].tc = 0;
-                       kinfo->tc_info[i].up = 0;
                }
        }
+
+       memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
+              FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
 }
 
 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
        for (i = 0; i < hdev->tm_info.num_tc; i++) {
                hdev->tm_info.tc_info[i].tc_id = i;
                hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
-               hdev->tm_info.tc_info[i].up = i;
                hdev->tm_info.tc_info[i].pgid = 0;
                hdev->tm_info.tc_info[i].bw_limit =
                        hdev->tm_info.pg_info[0].bw_limit;
        }
 
+       for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+               hdev->tm_info.prio_tc[i] =
+                       (i >= hdev->tm_info.num_tc) ? 0 : i;
+
        hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
 }
 
@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
        if (ret)
                return ret;
 
+       /* Only DCB-supported dev supports qset back pressure setting */
+       if (!hnae3_dev_dcb_supported(hdev))
+               return 0;
+
        for (i = 0; i < hdev->tm_info.num_tc; i++) {
                ret = hclge_tm_qs_bp_cfg(hdev, i);
                if (ret)
index 7e67337dfaf2c36e8c0ecc79f68aaa3f7bc7a32c..85158b0d73fe395ffa90de916c789e5afca5d15a 100644 (file)
@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd {
        u32 rsvd1;
 };
 
-#define hclge_tm_set_feild(dest, string, val) \
+#define hclge_tm_set_field(dest, string, val) \
                        hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
                                       (HCLGE_TM_SHAP_##string##_LSH), val)
-#define hclge_tm_get_feild(src, string) \
+#define hclge_tm_get_field(src, string) \
                        hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
                                       (HCLGE_TM_SHAP_##string##_LSH))
 
index 1c3e29447891644f48214aee3edceb9efd0ba1ef..35369e1c8036f3abbade76b63eed2a103170b4c7 100644 (file)
@@ -41,11 +41,16 @@ static struct hnae3_client client;
 static const struct pci_device_id hns3_pci_tbl[] = {
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
-       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
+        HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
+        HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
+        HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
+        HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
+        HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
        /* required last entry */
        {0, }
 };
@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        ae_dev->pdev = pdev;
+       ae_dev->flag = ent->driver_data;
        ae_dev->dev_type = HNAE3_DEV_KNIC;
        pci_set_drvdata(pdev, ae_dev);
 
@@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev)
                eth_hw_addr_random(netdev);
                dev_warn(priv->dev, "using random MAC address %pM\n",
                         netdev->dev_addr);
-               /* Also copy this new MAC address into hdev */
-               if (h->ae_algo->ops->set_mac_addr)
-                       h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
        }
+
+       if (h->ae_algo->ops->set_mac_addr)
+               h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
+
 }
 
 static void hns3_nic_set_priv_ops(struct net_device *netdev)
index 2c74baa2398a016fb9468f8a1b4b3aa5d0dc2a05..fff09dcf9e3463f8b10800d2754a4631770d2d63 100644 (file)
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
        unsigned long flags;
 
        MAL_DBG2(mal, "poll(%d)" NL, budget);
- again:
+
        /* Process TX skbs */
        list_for_each(l, &mal->poll_list) {
                struct mal_commac *mc =
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
                        spin_lock_irqsave(&mal->lock, flags);
                        mal_disable_eob_irq(mal);
                        spin_unlock_irqrestore(&mal->lock, flags);
-                       goto again;
                }
                mc->ops->poll_tx(mc->dev);
        }
index cb8182f4fdfa1725f8d4024741e00219376d7326..c66abd476023a401b14c53abf7026e3147f13f62 100644 (file)
@@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
  * places them in a descriptor array, scrq_arr
  */
 
-static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
-                            union sub_crq *scrq_arr)
+static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+                           union sub_crq *scrq_arr)
 {
        union sub_crq hdr_desc;
        int tmp_len = len;
+       int num_descs = 0;
        u8 *data, *cur;
        int tmp;
 
@@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
                tmp_len -= tmp;
                *scrq_arr = hdr_desc;
                scrq_arr++;
+               num_descs++;
        }
+
+       return num_descs;
 }
 
 /**
@@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
                                int *num_entries, u8 hdr_field)
 {
        int hdr_len[3] = {0, 0, 0};
-       int tot_len, len;
+       int tot_len;
        u8 *hdr_data = txbuff->hdr_data;
 
        tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
                                 txbuff->hdr_data);
-       len = tot_len;
-       len -= 24;
-       if (len > 0)
-               num_entries += len % 29 ? len / 29 + 1 : len / 29;
-       create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+       *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
                         txbuff->indir_arr + 1);
 }
 
index ec8aa4562cc90a90dff844872278722b24daec3c..3b3983a1ffbba1d6795ead832c5aceb801023969 100644 (file)
@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        int i;
-       char *p = NULL;
        const struct e1000_stats *stat = e1000_gstrings_stats;
 
        e1000_update_stats(adapter);
-       for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+       for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
+               char *p;
+
                switch (stat->type) {
                case NETDEV_STATS:
                        p = (char *)netdev + stat->stat_offset;
@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
                default:
                        WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
                                  stat->type, i);
-                       break;
+                       continue;
                }
 
                if (stat->sizeof_stat == sizeof(u64))
                        data[i] = *(u64 *)p;
                else
                        data[i] = *(u32 *)p;
-
-               stat++;
        }
 /* BUG_ON(i != E1000_STATS_LEN); */
 }
index 98375e1e1185e7b8136515d0d3e2a90464495ccd..1982f7917a8d5d68776b2e052ac1df809f11c065 100644 (file)
@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        u32 rctl, tctl;
 
-       netif_carrier_off(netdev);
-
        /* disable receives in the hardware */
        rctl = er32(RCTL);
        ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
        E1000_WRITE_FLUSH();
        msleep(10);
 
+       /* Set the carrier off after transmits have been disabled in the
+        * hardware, to avoid race conditions with e1000_watchdog() (which
+        * may be running concurrently to us, checking for the carrier
+        * bit to decide whether it should enable transmits again). Such
+        * a race condition would result into transmission being disabled
+        * in the hardware until the next IFF_DOWN+IFF_UP cycle.
+        */
+       netif_carrier_off(netdev);
+
        napi_disable(&adapter->napi);
 
        e1000_irq_disable(adapter);
index 57505b1df98dfb65e5033808c05c4cf9954600de..d591b3e6bd7c511d974c20e2a2e27e44816fa84f 100644 (file)
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
 }
 
 /**
- * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
+ * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  * @data: word read from the Shadow RAM
index 1519dfb851d01a3628c3ad8411be96e3c2e0643e..120c68f78951dd1ae7a2218b4b64c049aee40340 100644 (file)
@@ -1037,6 +1037,32 @@ reset_latency:
        return false;
 }
 
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+                              struct i40e_rx_buffer *old_buff)
+{
+       struct i40e_rx_buffer *new_buff;
+       u16 nta = rx_ring->next_to_alloc;
+
+       new_buff = &rx_ring->rx_bi[nta];
+
+       /* update, and store next to alloc */
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+       /* transfer page from old buffer to new buffer */
+       new_buff->dma           = old_buff->dma;
+       new_buff->page          = old_buff->page;
+       new_buff->page_offset   = old_buff->page_offset;
+       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
+}
+
 /**
  * i40e_rx_is_programming_status - check for programming status descriptor
  * @qw: qword representing status_error_len in CPU ordering
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
                                          union i40e_rx_desc *rx_desc,
                                          u64 qw)
 {
-       u32 ntc = rx_ring->next_to_clean + 1;
+       struct i40e_rx_buffer *rx_buffer;
+       u32 ntc = rx_ring->next_to_clean;
        u8 id;
 
        /* fetch, update, and store next to clean */
+       rx_buffer = &rx_ring->rx_bi[ntc++];
        ntc = (ntc < rx_ring->count) ? ntc : 0;
        rx_ring->next_to_clean = ntc;
 
        prefetch(I40E_RX_DESC(rx_ring, ntc));
 
+       /* place unused page back on the ring */
+       i40e_reuse_rx_page(rx_ring, rx_buffer);
+       rx_ring->rx_stats.page_reuse_count++;
+
+       /* clear contents of buffer_info */
+       rx_buffer->page = NULL;
+
        id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
                  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
 
@@ -1638,32 +1673,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
        return false;
 }
 
-/**
- * i40e_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
-                              struct i40e_rx_buffer *old_buff)
-{
-       struct i40e_rx_buffer *new_buff;
-       u16 nta = rx_ring->next_to_alloc;
-
-       new_buff = &rx_ring->rx_bi[nta];
-
-       /* update, and store next to alloc */
-       nta++;
-       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
-       /* transfer page from old buffer to new buffer */
-       new_buff->dma           = old_buff->dma;
-       new_buff->page          = old_buff->page;
-       new_buff->page_offset   = old_buff->page_offset;
-       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
-}
-
 /**
  * i40e_page_is_reusable - check if any reuse is possible
  * @page: page struct to check
@@ -2093,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 
                if (unlikely(i40e_rx_is_programming_status(qword))) {
                        i40e_clean_programming_status(rx_ring, rx_desc, qword);
+                       cleaned_count++;
                        continue;
                }
                size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@@ -2260,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
                goto enable_int;
        }
 
-       if (ITR_IS_DYNAMIC(tx_itr_setting)) {
+       if (ITR_IS_DYNAMIC(rx_itr_setting)) {
                rx = i40e_set_new_dynamic_itr(&q_vector->rx);
                rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
        }
index fd4a46b03cc8317f44f0c4d3ee5141dd05858244..ea69af267d63522ae7f545c2cbb8a7a6e601adbc 100644 (file)
@@ -5326,7 +5326,7 @@ dma_error:
                                       DMA_TO_DEVICE);
                dma_unmap_len_set(tx_buffer, len, 0);
 
-               if (i--)
+               if (i-- == 0)
                        i += tx_ring->count;
                tx_buffer = &tx_ring->tx_buffer_info[i];
        }
index 523f9d05a810f175582e5b1474f4d398bfa0aea2..8a32eb7d47b9ba88f97ad25be9374e68020bfb29 100644 (file)
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
 {
-#ifndef CONFIG_SPARC
-       u32 regval;
-       u32 i;
-#endif
        s32 ret_val;
 
        ret_val = ixgbe_start_hw_generic(hw);
-
-#ifndef CONFIG_SPARC
-       /* Disable relaxed ordering */
-       for (i = 0; ((i < hw->mac.max_tx_queues) &&
-            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
-               regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
-       }
-
-       for (i = 0; ((i < hw->mac.max_rx_queues) &&
-            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-               regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-                           IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-       }
-#endif
        if (ret_val)
                return ret_val;
 
index 2c19070d2a0b08dee8b47a3d98bf54971741cb78..6e6ab6f6875ebc188fc11a6d9f8b1b21dea3fb59 100644 (file)
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
        }
        IXGBE_WRITE_FLUSH(hw);
 
-#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
-       /* Disable relaxed ordering */
-       for (i = 0; i < hw->mac.max_tx_queues; i++) {
-               u32 regval;
-
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
-               regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
-       }
-
-       for (i = 0; i < hw->mac.max_rx_queues; i++) {
-               u32 regval;
-
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-               regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-                           IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-       }
-#endif
        return 0;
 }
 
index 72c565712a5f83106fe92ee7aa6e2f674e89efa2..c3e7a8191128dea542db9208a8076b4cf190e60e 100644 (file)
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *temp_ring;
-       int i, err = 0;
+       int i, j, err = 0;
        u32 new_rx_count, new_tx_count;
 
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
        }
 
        /* allocate temporary buffer to store rings in */
-       i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
-       i = max_t(int, i, adapter->num_xdp_queues);
+       i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
+                 adapter->num_rx_queues);
        temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
 
        if (!temp_ring) {
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        }
                }
 
-               for (i = 0; i < adapter->num_xdp_queues; i++) {
-                       memcpy(&temp_ring[i], adapter->xdp_ring[i],
+               for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+                       memcpy(&temp_ring[i], adapter->xdp_ring[j],
                               sizeof(struct ixgbe_ring));
 
                        temp_ring[i].count = new_tx_count;
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        memcpy(adapter->tx_ring[i], &temp_ring[i],
                               sizeof(struct ixgbe_ring));
                }
-               for (i = 0; i < adapter->num_xdp_queues; i++) {
-                       ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+               for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+                       ixgbe_free_tx_resources(adapter->xdp_ring[j]);
 
-                       memcpy(adapter->xdp_ring[i], &temp_ring[i],
+                       memcpy(adapter->xdp_ring[j], &temp_ring[i],
                               sizeof(struct ixgbe_ring));
                }
 
index d962368d08d0ff4c25032e3d0b3b2f51963a39d7..6d5f31e943583df77f5fa1e6aa3e2703fff33518 100644 (file)
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
                                IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
                return;
 
-       vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+       vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
        IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
 
        if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
        return 0;
 dma_error:
        dev_err(tx_ring->dev, "TX DMA map failed\n");
-       tx_buffer = &tx_ring->tx_buffer_info[i];
 
        /* clear dma mappings for failed tx_buffer_info map */
-       while (tx_buffer != first) {
+       for (;;) {
+               tx_buffer = &tx_ring->tx_buffer_info[i];
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_page(tx_ring->dev,
                                       dma_unmap_addr(tx_buffer, dma),
                                       dma_unmap_len(tx_buffer, len),
                                       DMA_TO_DEVICE);
                dma_unmap_len_set(tx_buffer, len, 0);
-
-               if (i--)
+               if (tx_buffer == first)
+                       break;
+               if (i == 0)
                        i += tx_ring->count;
-               tx_buffer = &tx_ring->tx_buffer_info[i];
+               i--;
        }
 
-       if (dma_unmap_len(tx_buffer, len))
-               dma_unmap_single(tx_ring->dev,
-                                dma_unmap_addr(tx_buffer, dma),
-                                dma_unmap_len(tx_buffer, len),
-                                DMA_TO_DEVICE);
-       dma_unmap_len_set(tx_buffer, len, 0);
-
        dev_kfree_skb_any(first->skb);
        first->skb = NULL;
 
@@ -8529,6 +8523,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
                return ixgbe_ptp_set_ts_config(adapter, req);
        case SIOCGHWTSTAMP:
                return ixgbe_ptp_get_ts_config(adapter, req);
+       case SIOCGMIIPHY:
+               if (!adapter->hw.phy.ops.read_reg)
+                       return -EOPNOTSUPP;
+               /* fall through */
        default:
                return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
        }
index dd0ee2691c863edab216c24e1e227bdb9dc84306..a37af5813f33758d1ed8debf6bcd77590363f349 100644 (file)
 #define     MVPP2_GMAC_INBAND_AN_MASK          BIT(0)
 #define     MVPP2_GMAC_FLOW_CTRL_MASK          GENMASK(2, 1)
 #define     MVPP2_GMAC_PCS_ENABLE_MASK         BIT(3)
-#define     MVPP2_GMAC_PORT_RGMII_MASK         BIT(4)
+#define     MVPP2_GMAC_INTERNAL_CLK_MASK       BIT(4)
 #define     MVPP2_GMAC_DISABLE_PADDING         BIT(5)
 #define     MVPP2_GMAC_PORT_RESET_MASK         BIT(6)
 #define MVPP2_GMAC_AUTONEG_CONFIG              0xc
@@ -676,6 +676,7 @@ enum mvpp2_tag_type {
 #define MVPP2_PRS_RI_L3_MCAST                  BIT(15)
 #define MVPP2_PRS_RI_L3_BCAST                  (BIT(15) | BIT(16))
 #define MVPP2_PRS_RI_IP_FRAG_MASK              0x20000
+#define MVPP2_PRS_RI_IP_FRAG_TRUE              BIT(17)
 #define MVPP2_PRS_RI_UDF3_MASK                 0x300000
 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL           BIT(21)
 #define MVPP2_PRS_RI_L4_PROTO_MASK             0x1c00000
@@ -792,6 +793,7 @@ struct mvpp2 {
        struct clk *pp_clk;
        struct clk *gop_clk;
        struct clk *mg_clk;
+       struct clk *axi_clk;
 
        /* List of pointers to port structures */
        struct mvpp2_port **port_list;
@@ -1165,6 +1167,11 @@ struct mvpp2_bm_pool {
        u32 port_map;
 };
 
+#define IS_TSO_HEADER(txq_pcpu, addr) \
+       ((addr) >= (txq_pcpu)->tso_headers_dma && \
+        (addr) < (txq_pcpu)->tso_headers_dma + \
+        (txq_pcpu)->size * TSO_HEADER_SIZE)
+
 /* Queue modes */
 #define MVPP2_QDIST_SINGLE_MODE        0
 #define MVPP2_QDIST_MULTI_MODE 1
@@ -1532,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
        int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
        u16 tcam_data;
 
-       tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
+       tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
        if (tcam_data != data)
                return false;
        return true;
@@ -2315,7 +2322,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
            (proto != IPPROTO_IGMP))
                return -EINVAL;
 
-       /* Fragmented packet */
+       /* Not fragmented packet */
        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
                                        MVPP2_PE_LAST_FREE_TID);
        if (tid < 0)
@@ -2334,8 +2341,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
-       mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
-                                ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+       mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
+                                    MVPP2_PRS_TCAM_PROTO_MASK_L);
+       mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
+                                    MVPP2_PRS_TCAM_PROTO_MASK);
 
        mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
@@ -2346,7 +2357,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
        mvpp2_prs_hw_write(priv, &pe);
 
-       /* Not fragmented packet */
+       /* Fragmented packet */
        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
                                        MVPP2_PE_LAST_FREE_TID);
        if (tid < 0)
@@ -2358,8 +2369,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
        pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
 
-       mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
-       mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+       mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
+                                ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
+       mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
 
        /* Update shadow table and hw entry */
        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
@@ -2600,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
        /* place holders only - no ports */
        mvpp2_prs_mac_drop_all_set(priv, 0, false);
        mvpp2_prs_mac_promisc_set(priv, 0, false);
-       mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
-       mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
+       mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
+       mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
 }
 
 /* Set default entries for various types of dsa packets */
@@ -3382,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
        struct mvpp2_prs_entry *pe;
        int tid;
 
-       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
        if (!pe)
                return NULL;
        mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -3444,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
                if (tid < 0)
                        return tid;
 
-               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
                if (!pe)
                        return -ENOMEM;
                mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -4591,7 +4605,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
                val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
        } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
                val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
-               val |= MVPP2_GMAC_PORT_RGMII_MASK;
        }
        writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
 
@@ -5313,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                struct mvpp2_txq_pcpu_buf *tx_buf =
                        txq_pcpu->buffs + txq_pcpu->txq_get_index;
 
-               dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
-                                tx_buf->size, DMA_TO_DEVICE);
+               if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
+                       dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
+                                        tx_buf->size, DMA_TO_DEVICE);
                if (tx_buf->skb)
                        dev_kfree_skb_any(tx_buf->skb);
 
@@ -5601,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
 
                txq_pcpu->tso_headers =
                        dma_alloc_coherent(port->dev->dev.parent,
-                                          MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE,
+                                          txq_pcpu->size * TSO_HEADER_SIZE,
                                           &txq_pcpu->tso_headers_dma,
                                           GFP_KERNEL);
                if (!txq_pcpu->tso_headers)
@@ -5615,7 +5629,7 @@ cleanup:
                kfree(txq_pcpu->buffs);
 
                dma_free_coherent(port->dev->dev.parent,
-                                 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+                                 txq_pcpu->size * TSO_HEADER_SIZE,
                                  txq_pcpu->tso_headers,
                                  txq_pcpu->tso_headers_dma);
        }
@@ -5639,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
                kfree(txq_pcpu->buffs);
 
                dma_free_coherent(port->dev->dev.parent,
-                                 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+                                 txq_pcpu->size * TSO_HEADER_SIZE,
                                  txq_pcpu->tso_headers,
                                  txq_pcpu->tso_headers_dma);
        }
@@ -6204,12 +6218,15 @@ static inline void
 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
                  struct mvpp2_tx_desc *desc)
 {
+       struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+
        dma_addr_t buf_dma_addr =
                mvpp2_txdesc_dma_addr_get(port, desc);
        size_t buf_sz =
                mvpp2_txdesc_size_get(port, desc);
-       dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
-                        buf_sz, DMA_TO_DEVICE);
+       if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
+               dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+                                buf_sz, DMA_TO_DEVICE);
        mvpp2_txq_desc_put(txq);
 }
 
@@ -6482,7 +6499,7 @@ out:
        }
 
        /* Finalize TX processing */
-       if (txq_pcpu->count >= txq->done_pkts_coal)
+       if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
                mvpp2_txq_done(port, txq, txq_pcpu);
 
        /* Set the timer in case not all frags were processed */
@@ -7496,7 +7513,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
 /* Ports initialization */
 static int mvpp2_port_probe(struct platform_device *pdev,
                            struct device_node *port_node,
-                           struct mvpp2 *priv)
+                           struct mvpp2 *priv, int index)
 {
        struct device_node *phy_node;
        struct phy *comphy;
@@ -7670,7 +7687,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        }
        netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
 
-       priv->port_list[id] = port;
+       priv->port_list[index] = port;
        return 0;
 
 err_free_port_pcpu:
@@ -7963,6 +7980,18 @@ static int mvpp2_probe(struct platform_device *pdev)
                err = clk_prepare_enable(priv->mg_clk);
                if (err < 0)
                        goto err_gop_clk;
+
+               priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+               if (IS_ERR(priv->axi_clk)) {
+                       err = PTR_ERR(priv->axi_clk);
+                       if (err == -EPROBE_DEFER)
+                               goto err_gop_clk;
+                       priv->axi_clk = NULL;
+               } else {
+                       err = clk_prepare_enable(priv->axi_clk);
+                       if (err < 0)
+                               goto err_gop_clk;
+               }
        }
 
        /* Get system's tclk rate */
@@ -8005,16 +8034,19 @@ static int mvpp2_probe(struct platform_device *pdev)
        }
 
        /* Initialize ports */
+       i = 0;
        for_each_available_child_of_node(dn, port_node) {
-               err = mvpp2_port_probe(pdev, port_node, priv);
+               err = mvpp2_port_probe(pdev, port_node, priv, i);
                if (err < 0)
                        goto err_mg_clk;
+               i++;
        }
 
        platform_set_drvdata(pdev, priv);
        return 0;
 
 err_mg_clk:
+       clk_disable_unprepare(priv->axi_clk);
        if (priv->hw_version == MVPP22)
                clk_disable_unprepare(priv->mg_clk);
 err_gop_clk:
@@ -8052,6 +8084,7 @@ static int mvpp2_remove(struct platform_device *pdev)
                                  aggr_txq->descs_dma);
        }
 
+       clk_disable_unprepare(priv->axi_clk);
        clk_disable_unprepare(priv->mg_clk);
        clk_disable_unprepare(priv->pp_clk);
        clk_disable_unprepare(priv->gop_clk);
index ff60cf7342ca5a4170a477ad7fff0ead65cf5df8..fc281712869b2e1c5c3321b46d1e8c5c562fcb74 100644 (file)
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
        list_add_tail(&delayed_event->list, &priv->waiting_events_list);
 }
 
-static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
-                                     struct mlx5_core_dev *dev,
-                                     struct mlx5_priv *priv)
+static void delayed_event_release(struct mlx5_device_context *dev_ctx,
+                                 struct mlx5_priv *priv)
 {
+       struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
        struct mlx5_delayed_event *de;
        struct mlx5_delayed_event *n;
+       struct list_head temp;
 
-       /* stop delaying events */
-       priv->is_accum_events = false;
+       INIT_LIST_HEAD(&temp);
+
+       spin_lock_irq(&priv->ctx_lock);
 
-       /* fire all accumulated events before new event comes */
-       list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
+       priv->is_accum_events = false;
+       list_splice_init(&priv->waiting_events_list, &temp);
+       if (!dev_ctx->context)
+               goto out;
+       list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
                dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
+
+out:
+       spin_unlock_irq(&priv->ctx_lock);
+
+       list_for_each_entry_safe(de, n, &temp, list) {
                list_del(&de->list);
                kfree(de);
        }
 }
 
-static void cleanup_delayed_evets(struct mlx5_priv *priv)
+/* accumulating events that can come after mlx5_ib calls to
+ * ib_register_device, till adding that interface to the events list.
+ */
+static void delayed_event_start(struct mlx5_priv *priv)
 {
-       struct mlx5_delayed_event *de;
-       struct mlx5_delayed_event *n;
-
        spin_lock_irq(&priv->ctx_lock);
-       priv->is_accum_events = false;
-       list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
-               list_del(&de->list);
-               kfree(de);
-       }
+       priv->is_accum_events = true;
        spin_unlock_irq(&priv->ctx_lock);
 }
 
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
                return;
 
        dev_ctx->intf = intf;
-       /* accumulating events that can come after mlx5_ib calls to
-        * ib_register_device, till adding that interface to the events list.
-        */
 
-       priv->is_accum_events = true;
+       delayed_event_start(priv);
 
        dev_ctx->context = intf->add(dev);
        set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
                spin_lock_irq(&priv->ctx_lock);
                list_add_tail(&dev_ctx->list, &priv->ctx_list);
 
-               fire_delayed_event_locked(dev_ctx, dev, priv);
-
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
                if (dev_ctx->intf->pfault) {
                        if (priv->pfault) {
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
                }
 #endif
                spin_unlock_irq(&priv->ctx_lock);
-       } else {
-               kfree(dev_ctx);
-                /* delete all accumulated events */
-               cleanup_delayed_evets(priv);
        }
+
+       delayed_event_release(dev_ctx, priv);
+
+       if (!dev_ctx->context)
+               kfree(dev_ctx);
 }
 
 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
        if (!dev_ctx)
                return;
 
+       delayed_event_start(priv);
        if (intf->attach) {
                if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
-                       return;
+                       goto out;
                intf->attach(dev, dev_ctx->context);
                set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
        } else {
                if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
-                       return;
+                       goto out;
                dev_ctx->context = intf->add(dev);
                set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
        }
+
+out:
+       delayed_event_release(dev_ctx, priv);
 }
 
 void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
        if (priv->is_accum_events)
                add_delayed_event(priv, dev, event, param);
 
+       /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
+        * still in priv->ctx_list. In this case, only notify the dev_ctx if its
+        * ADDED or ATTACHED bit are set.
+        */
        list_for_each_entry(dev_ctx, &priv->ctx_list, list)
-               if (dev_ctx->intf->event)
+               if (dev_ctx->intf->event &&
+                   (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
+                    test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
                        dev_ctx->intf->event(dev, dev_ctx->context, event, param);
 
        spin_unlock_irqrestore(&priv->ctx_lock, flags);
index 1e3a6c3e41323d02bf787bb559741bc2a16a678a..80eef4163f52e61d7f511cdd1aaba9dd6898fcd8 100644 (file)
@@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
        {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
 
 TRACE_EVENT(mlx5_fs_set_fte,
-           TP_PROTO(const struct fs_fte *fte, bool new_fte),
+           TP_PROTO(const struct fs_fte *fte, int new_fte),
            TP_ARGS(fte, new_fte),
            TP_STRUCT__entry(
                __field(const struct fs_fte *, fte)
@@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
                __field(u32, action)
                __field(u32, flow_tag)
                __field(u8,  mask_enable)
-               __field(bool, new_fte)
+               __field(int, new_fte)
                __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
                __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
                __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
index c1d384fca4dc1195a1d0b76677176b7dfcbad538..51c4cc00a186589734121efc796f2606c0e2c3fe 100644 (file)
 #define MLX5E_CEE_STATE_UP    1
 #define MLX5E_CEE_STATE_DOWN  0
 
+enum {
+       MLX5E_VENDOR_TC_GROUP_NUM = 7,
+       MLX5E_LOWEST_PRIO_GROUP   = 0,
+};
+
 /* If dcbx mode is non-host set the dcbx mode to host.
  */
 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       u8 tc_group[IEEE_8021QAZ_MAX_TCS];
+       bool is_tc_group_6_exist = false;
+       bool is_zero_bw_ets_tc = false;
        int err = 0;
        int i;
 
@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
                err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
                if (err)
                        return err;
-       }
 
-       for (i = 0; i < ets->ets_cap; i++) {
+               err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
+               if (err)
+                       return err;
+
                err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
                if (err)
                        return err;
+
+               if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
+                   tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
+                       is_zero_bw_ets_tc = true;
+
+               if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
+                       is_tc_group_6_exist = true;
+       }
+
+       /* Report 0% ets tc if exits*/
+       if (is_zero_bw_ets_tc) {
+               for (i = 0; i < ets->ets_cap; i++)
+                       if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
+                               ets->tc_tx_bw[i] = 0;
+       }
+
+       /* Update tc_tsa based on fw setting*/
+       for (i = 0; i < ets->ets_cap; i++) {
                if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
                        priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+               else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
+                        !is_tc_group_6_exist)
+                       priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
        }
-
        memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
 
        return err;
 }
 
-enum {
-       MLX5E_VENDOR_TC_GROUP_NUM = 7,
-       MLX5E_ETS_TC_GROUP_NUM    = 0,
-};
-
 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
 {
        bool any_tc_mapped_to_ets = false;
+       bool ets_zero_bw = false;
        int strict_group;
        int i;
 
-       for (i = 0; i <= max_tc; i++)
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+       for (i = 0; i <= max_tc; i++) {
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
                        any_tc_mapped_to_ets = true;
+                       if (!ets->tc_tx_bw[i])
+                               ets_zero_bw = true;
+               }
+       }
 
-       strict_group = any_tc_mapped_to_ets ? 1 : 0;
+       /* strict group has higher priority than ets group */
+       strict_group = MLX5E_LOWEST_PRIO_GROUP;
+       if (any_tc_mapped_to_ets)
+               strict_group++;
+       if (ets_zero_bw)
+               strict_group++;
 
        for (i = 0; i <= max_tc; i++) {
                switch (ets->tc_tsa[i]) {
@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
                        tc_group[i] = strict_group++;
                        break;
                case IEEE_8021QAZ_TSA_ETS:
-                       tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
+                       tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
+                       if (ets->tc_tx_bw[i] && ets_zero_bw)
+                               tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
                        break;
                }
        }
@@ -146,8 +183,22 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
                                 u8 *tc_group, int max_tc)
 {
+       int bw_for_ets_zero_bw_tc = 0;
+       int last_ets_zero_bw_tc = -1;
+       int num_ets_zero_bw = 0;
        int i;
 
+       for (i = 0; i <= max_tc; i++) {
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
+                   !ets->tc_tx_bw[i]) {
+                       num_ets_zero_bw++;
+                       last_ets_zero_bw_tc = i;
+               }
+       }
+
+       if (num_ets_zero_bw)
+               bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
+
        for (i = 0; i <= max_tc; i++) {
                switch (ets->tc_tsa[i]) {
                case IEEE_8021QAZ_TSA_VENDOR:
@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
                        tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
                        break;
                case IEEE_8021QAZ_TSA_ETS:
-                       tc_tx_bw[i] = ets->tc_tx_bw[i];
+                       tc_tx_bw[i] = ets->tc_tx_bw[i] ?
+                                     ets->tc_tx_bw[i] :
+                                     bw_for_ets_zero_bw_tc;
                        break;
                }
        }
+
+       /* Make sure the total bw for ets zero bw group is 100% */
+       if (last_ets_zero_bw_tc != -1)
+               tc_tx_bw[last_ets_zero_bw_tc] +=
+                       MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
 }
 
+/* If there are ETS BW 0,
+ *   Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
+ *   Set group #0 to all the ETS BW 0 tcs and
+ *     equally splits the 100% BW between them
+ *   Report both group #0 and #1 as ETS type.
+ *     All the tcs in group #0 will be reported with 0% BW.
+ */
 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
                return err;
 
        memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
-
        return err;
 }
 
@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        /* Validate Bandwidth Sum */
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
-                       if (!ets->tc_tx_bw[i]) {
-                               netdev_err(netdev,
-                                          "Failed to validate ETS: BW 0 is illegal\n");
-                               return -EINVAL;
-                       }
-
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
                        bw_sum += ets->tc_tx_bw[i];
-               }
-       }
 
        if (bw_sum != 0 && bw_sum != 100) {
                netdev_err(netdev,
@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
                                      int pgid, u8 *bw_pct)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct ieee_ets ets;
 
        if (pgid >= CEE_DCBX_MAX_PGS) {
                netdev_err(netdev,
@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
                return;
        }
 
-       if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
-               *bw_pct = 0;
+       mlx5e_dcbnl_ieee_getets(netdev, &ets);
+       *bw_pct = ets.tc_tx_bw[pgid];
 }
 
 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
                ets.prio_tc[i] = i;
        }
 
-       memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
-
        /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
        ets.prio_tc[0] = 1;
        ets.prio_tc[1] = 0;
index f11fd07ac4dd17e7c916b12736a6b9823284f062..850cdc980ab5a5e9d21b85d50c6386def3c173bb 100644 (file)
@@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
        priv->fs.vlan.filter_disabled = false;
        if (priv->netdev->flags & IFF_PROMISC)
                return;
-       mlx5e_del_any_vid_rules(priv);
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 }
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
        priv->fs.vlan.filter_disabled = true;
        if (priv->netdev->flags & IFF_PROMISC)
                return;
-       mlx5e_add_any_vid_rules(priv);
+       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 }
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
index dfc29720ab77475ef421067a44bb679ef94e24f3..cc11bbbd0309d465819abe501859da971239cb0f 100644 (file)
@@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
        struct mlx5e_sw_stats temp, *s = &temp;
        struct mlx5e_rq_stats *rq_stats;
        struct mlx5e_sq_stats *sq_stats;
-       u64 tx_offload_none = 0;
        int i, j;
 
        memset(s, 0, sizeof(*s));
@@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                s->rx_lro_bytes += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_xdp_drop += rq_stats->xdp_drop;
                s->rx_xdp_tx += rq_stats->xdp_tx;
@@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                        s->tx_queue_dropped     += sq_stats->dropped;
                        s->tx_xmit_more         += sq_stats->xmit_more;
                        s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
-                       tx_offload_none         += sq_stats->csum_none;
+                       s->tx_csum_none         += sq_stats->csum_none;
+                       s->tx_csum_partial      += sq_stats->csum_partial;
                }
        }
 
-       /* Update calculated offload counters */
-       s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
-       s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
-
        s->link_down_events_phy = MLX5_GET(ppcnt_reg,
                                priv->stats.pport.phy_counters,
                                counter_set.phys_layer_cntrs.link_down_events);
@@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev,
 
        err = feature_handler(netdev, enable);
        if (err) {
-               netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
-                          enable ? "Enable" : "Disable", feature, err);
+               netdev_err(netdev, "%s feature %pNF failed, err %d\n",
+                          enable ? "Enable" : "Disable", &feature, err);
                return err;
        }
 
index f1dd638384d38348d10fc02eacf6ea188038f381..15a1687483cc5f6ed9b2943152ae1f54f9283617 100644 (file)
@@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 
        if (lro) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
+               rq->stats.csum_unnecessary++;
                return;
        }
 
@@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                        skb->csum_level = 1;
                        skb->encapsulation = 1;
                        rq->stats.csum_unnecessary_inner++;
+                       return;
                }
+               rq->stats.csum_unnecessary++;
                return;
        }
 csum_none:
index 6d199ffb1c0b54802542d8ee92ddaa066ecf209c..f8637213afc0f78b826c67b3fc7608c7079da4c7 100644 (file)
@@ -68,6 +68,7 @@ struct mlx5e_sw_stats {
        u64 rx_xdp_drop;
        u64 rx_xdp_tx;
        u64 rx_xdp_tx_full;
+       u64 tx_csum_none;
        u64 tx_csum_partial;
        u64 tx_csum_partial_inner;
        u64 tx_queue_stopped;
@@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -339,6 +341,7 @@ struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
        u64 csum_complete;
+       u64 csum_unnecessary;
        u64 csum_unnecessary_inner;
        u64 csum_none;
        u64 lro_packets;
@@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
@@ -392,6 +396,7 @@ struct mlx5e_sq_stats {
        u64 tso_bytes;
        u64 tso_inner_packets;
        u64 tso_inner_bytes;
+       u64 csum_partial;
        u64 csum_partial_inner;
        u64 nop;
        /* less likely accessed in data path */
@@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = {
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
index da503e6411da07374f3250d450fd131c64c63c88..9ba1f72060aae4c57a55c9d7a4c049e1a6f8a69b 100644 (file)
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
 };
 
 struct mlx5e_tc_flow_parse_attr {
+       struct ip_tunnel_info tun_info;
        struct mlx5_flow_spec spec;
        int num_mod_hdr_actions;
        void *mod_hdr_actions;
+       int mirred_ifindex;
 };
 
 enum {
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
                               struct mlx5e_tc_flow *flow);
 
+static int mlx5e_attach_encap(struct mlx5e_priv *priv,
+                             struct ip_tunnel_info *tun_info,
+                             struct net_device *mirred_dev,
+                             struct net_device **encap_dev,
+                             struct mlx5e_tc_flow *flow);
+
 static struct mlx5_flow_handle *
 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
-       struct mlx5_flow_handle *rule;
+       struct net_device *out_dev, *encap_dev = NULL;
+       struct mlx5_flow_handle *rule = NULL;
+       struct mlx5e_rep_priv *rpriv;
+       struct mlx5e_priv *out_priv;
        int err;
 
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+               out_dev = __dev_get_by_index(dev_net(priv->netdev),
+                                            attr->parse_attr->mirred_ifindex);
+               err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+                                        out_dev, &encap_dev, flow);
+               if (err) {
+                       rule = ERR_PTR(err);
+                       if (err != -EAGAIN)
+                               goto err_attach_encap;
+               }
+               out_priv = netdev_priv(encap_dev);
+               rpriv = out_priv->ppriv;
+               attr->out_rep = rpriv->rep;
+       }
+
        err = mlx5_eswitch_add_vlan_action(esw, attr);
        if (err) {
                rule = ERR_PTR(err);
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                }
        }
 
-       rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
-       if (IS_ERR(rule))
-               goto err_add_rule;
-
+       /* we get here if (1) there's no error (rule being null) or when
+        * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
+        */
+       if (rule != ERR_PTR(-EAGAIN)) {
+               rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+               if (IS_ERR(rule))
+                       goto err_add_rule;
+       }
        return rule;
 
 err_add_rule:
@@ -361,6 +391,7 @@ err_mod_hdr:
 err_add_vlan:
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
                mlx5e_detach_encap(priv, flow);
+err_attach_encap:
        return rule;
 }
 
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
                              struct mlx5e_encap_entry *e)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5_esw_flow_attr *esw_attr;
        struct mlx5e_tc_flow *flow;
        int err;
 
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
        mlx5e_rep_queue_neigh_stats_work(priv);
 
        list_for_each_entry(flow, &e->flows, encap) {
-               flow->esw_attr->encap_id = e->encap_id;
-               flow->rule = mlx5e_tc_add_fdb_flow(priv,
-                                                  flow->esw_attr->parse_attr,
-                                                  flow);
+               esw_attr = flow->esw_attr;
+               esw_attr->encap_id = e->encap_id;
+               flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
                if (IS_ERR(flow->rule)) {
                        err = PTR_ERR(flow->rule);
                        mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
                              struct mlx5e_encap_entry *e)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5e_tc_flow *flow;
-       struct mlx5_fc *counter;
 
        list_for_each_entry(flow, &e->flows, encap) {
                if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
                        flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
-                       counter = mlx5_flow_rule_counter(flow->rule);
-                       mlx5_del_flow_rules(flow->rule);
-                       mlx5_fc_destroy(priv->mdev, counter);
+                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
                }
        }
 
@@ -1317,6 +1347,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
        return true;
 }
 
+static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
+                                         struct tcf_exts *exts)
+{
+       const struct tc_action *a;
+       bool modify_ip_header;
+       LIST_HEAD(actions);
+       u8 htype, ip_proto;
+       void *headers_v;
+       u16 ethertype;
+       int nkeys, i;
+
+       headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+       ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
+
+       /* for non-IP we only re-write MACs, so we're okay */
+       if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
+               goto out_ok;
+
+       modify_ip_header = false;
+       tcf_exts_to_list(exts, &actions);
+       list_for_each_entry(a, &actions, list) {
+               if (!is_tcf_pedit(a))
+                       continue;
+
+               nkeys = tcf_pedit_nkeys(a);
+               for (i = 0; i < nkeys; i++) {
+                       htype = tcf_pedit_htype(a, i);
+                       if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
+                           htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
+                               modify_ip_header = true;
+                               break;
+                       }
+               }
+       }
+
+       ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
+       if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+               pr_info("can't offload re-write of ip proto %d\n", ip_proto);
+               return false;
+       }
+
+out_ok:
+       return true;
+}
+
+static bool actions_match_supported(struct mlx5e_priv *priv,
+                                   struct tcf_exts *exts,
+                                   struct mlx5e_tc_flow_parse_attr *parse_attr,
+                                   struct mlx5e_tc_flow *flow)
+{
+       u32 actions;
+
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+               actions = flow->esw_attr->action;
+       else
+               actions = flow->nic_attr->action;
+
+       if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+               return modify_header_match_supported(&parse_attr->spec, exts);
+
+       return true;
+}
+
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
                                struct mlx5e_tc_flow *flow)
@@ -1378,6 +1471,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                return -EINVAL;
        }
 
+       if (!actions_match_supported(priv, exts, parse_attr, flow))
+               return -EOPNOTSUPP;
+
        return 0;
 }
 
@@ -1564,7 +1660,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                break;
        default:
                err = -EOPNOTSUPP;
-               goto out;
+               goto free_encap;
        }
        fl4.flowi4_tos = tun_key->tos;
        fl4.daddr = tun_key->u.ipv4.dst;
@@ -1573,7 +1669,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
                                      &fl4, &n, &ttl);
        if (err)
-               goto out;
+               goto free_encap;
 
        /* used by mlx5e_detach_encap to lookup a neigh hash table
         * entry in the neigh hash table when a user deletes a rule
@@ -1590,7 +1686,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
         */
        err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
        if (err)
-               goto out;
+               goto free_encap;
 
        read_lock_bh(&n->lock);
        nud_state = n->nud_state;
@@ -1630,8 +1726,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
 
 destroy_neigh_entry:
        mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
        kfree(encap_header);
+out:
        if (n)
                neigh_release(n);
        return err;
@@ -1668,7 +1765,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
                break;
        default:
                err = -EOPNOTSUPP;
-               goto out;
+               goto free_encap;
        }
 
        fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
@@ -1678,7 +1775,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
        err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
                                      &fl6, &n, &ttl);
        if (err)
-               goto out;
+               goto free_encap;
 
        /* used by mlx5e_detach_encap to lookup a neigh hash table
         * entry in the neigh hash table when a user deletes a rule
@@ -1695,7 +1792,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
         */
        err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
        if (err)
-               goto out;
+               goto free_encap;
 
        read_lock_bh(&n->lock);
        nud_state = n->nud_state;
@@ -1736,8 +1833,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 
 destroy_neigh_entry:
        mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
        kfree(encap_header);
+out:
        if (n)
                neigh_release(n);
        return err;
@@ -1791,6 +1889,7 @@ vxlan_encap_offload_err:
                }
        }
 
+       /* must verify if encap is valid or not */
        if (found)
                goto attach_flow;
 
@@ -1817,6 +1916,8 @@ attach_flow:
        *encap_dev = e->out_dev;
        if (e->flags & MLX5_ENCAP_ENTRY_VALID)
                attr->encap_id = e->encap_id;
+       else
+               err = -EAGAIN;
 
        return err;
 
@@ -1871,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_mirred_egress_redirect(a)) {
                        int ifindex = tcf_mirred_ifindex(a);
-                       struct net_device *out_dev, *encap_dev = NULL;
+                       struct net_device *out_dev;
                        struct mlx5e_priv *out_priv;
 
                        out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1884,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                rpriv = out_priv->ppriv;
                                attr->out_rep = rpriv->rep;
                        } else if (encap) {
-                               err = mlx5e_attach_encap(priv, info,
-                                                        out_dev, &encap_dev, flow);
-                               if (err && err != -EAGAIN)
-                                       return err;
+                               parse_attr->mirred_ifindex = ifindex;
+                               parse_attr->tun_info = *info;
+                               attr->parse_attr = parse_attr;
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
                                        MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                                        MLX5_FLOW_CONTEXT_ACTION_COUNT;
-                               out_priv = netdev_priv(encap_dev);
-                               rpriv = out_priv->ppriv;
-                               attr->out_rep = rpriv->rep;
-                               attr->parse_attr = parse_attr;
+                               /* attr->out_rep is resolved when we handle encap */
                        } else {
                                pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
                                       priv->netdev->name, out_dev->name);
@@ -1934,6 +2031,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                return -EINVAL;
        }
+
+       if (!actions_match_supported(priv, exts, parse_attr, flow))
+               return -EOPNOTSUPP;
+
        return err;
 }
 
@@ -1972,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
        if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
                err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
                if (err < 0)
-                       goto err_handle_encap_flow;
+                       goto err_free;
                flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
        } else {
                err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@@ -1983,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
 
        if (IS_ERR(flow->rule)) {
                err = PTR_ERR(flow->rule);
-               goto err_free;
+               if (err != -EAGAIN)
+                       goto err_free;
        }
 
-       flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+       if (err != -EAGAIN)
+               flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+
        err = rhashtable_insert_fast(&tc->ht, &flow->node,
                                     tc->ht_params);
        if (err)
@@ -2000,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
 err_del_rule:
        mlx5e_tc_del_flow(priv, flow);
 
-err_handle_encap_flow:
-       if (err == -EAGAIN) {
-               err = rhashtable_insert_fast(&tc->ht, &flow->node,
-                                            tc->ht_params);
-               if (err)
-                       mlx5e_tc_del_flow(priv, flow);
-               else
-                       return 0;
-       }
-
 err_free:
        kvfree(parse_attr);
        kfree(flow);
index fee43e40fa16a105de58981a40be26b320332090..1d6925d4369afd39df702da0c77177aa3888a6f3 100644 (file)
@@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
                        sq->stats.csum_partial_inner++;
                } else {
                        eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+                       sq->stats.csum_partial++;
                }
        } else
                sq->stats.csum_none++;
index e37453d838dbb669c76c9506fb3afd3723788a85..c0fd2212e89087df103c30afd1cea321dca1cef3 100644 (file)
@@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
        return 0;
 }
 
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+int mlx5_fpga_caps(struct mlx5_core_dev *dev)
 {
        u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
 
-       return mlx5_core_access_reg(dev, in, sizeof(in), caps,
+       return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga,
                                    MLX5_ST_SZ_BYTES(fpga_cap),
                                    MLX5_REG_FPGA_CAP, 0, 0);
 }
index 94bdfd47c3f094a167edc185468a3f8d10c1a1c0..d05233c9b4f6c0ecfccf3868de233165c043ef58 100644 (file)
@@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters {
        u64 rx_total_drop;
 };
 
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_caps(struct mlx5_core_dev *dev);
 int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
 int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
 int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
index 9034e9960a761fa7245cbfab9dd66cf0c434a522..dc8970346521d24c32e082fdff43b7e12937202f 100644 (file)
@@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
        if (err)
                goto out;
 
-       err = mlx5_fpga_caps(fdev->mdev,
-                            fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
+       err = mlx5_fpga_caps(fdev->mdev);
        if (err)
                goto out;
 
index e0d0efd903bc9c4b4fe87f617e2bcd32ab54099c..36ecc2b2e1873a065a376d98a699b99ede01b062 100644 (file)
@@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
        }
 
        if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+                                       log_max_flow_counter,
+                                       ft->type));
                int list_size = 0;
 
                list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                        in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
                        list_size++;
                }
+               if (list_size > max_list_size) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
 
                MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
                         list_size);
        }
 
        err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
        kvfree(in);
        return err;
 }
index 5509a752f98e7bebecb8fb870df59afda17d9a5a..48dd78975062c1c3db8d75e560a1a482931f6e3a 100644 (file)
@@ -52,6 +52,7 @@ enum fs_flow_table_type {
        FS_FT_FDB             = 0X4,
        FS_FT_SNIFFER_RX        = 0X5,
        FS_FT_SNIFFER_TX        = 0X6,
+       FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
 };
 
 enum fs_flow_table_op_mod {
@@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 #define fs_for_each_dst(pos, fte)                      \
        fs_list_for_each_entry(pos, &(fte)->node.children)
 
+#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) (             \
+       (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) :         \
+       (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) :           \
+       (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) :         \
+       (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) :           \
+       (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) :         \
+       (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) :         \
+       (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
+       )
+
 #endif
index 8aea0a065e5636badb85c5a5f8d33c73bdac2502..db86e1506c8b67fa8849940c4adc958fb783ac92 100644 (file)
@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
+       unsigned long flags;
 
-       spin_lock(&health->wq_lock);
+       spin_lock_irqsave(&health->wq_lock, flags);
        set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
-       spin_unlock(&health->wq_lock);
+       spin_unlock_irqrestore(&health->wq_lock, flags);
        cancel_delayed_work_sync(&dev->priv.health.recover_work);
 }
 
index 85298051a3e4fcf74767196dcc6660114222b4cf..145e392ab84973b0fa632c8c09895069ab49865a 100644 (file)
@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
 {
        struct mlx5e_priv          *priv    = mlx5i_epriv(netdev);
        const struct mlx5e_profile *profile = priv->profile;
+       struct mlx5_core_dev       *mdev    = priv->mdev;
 
        mlx5e_detach_netdev(priv);
        profile->cleanup(priv);
        destroy_workqueue(priv->wq);
        free_netdev(netdev);
 
-       mlx5e_destroy_mdev_resources(priv->mdev);
+       mlx5e_destroy_mdev_resources(mdev);
 }
 EXPORT_SYMBOL(mlx5_rdma_netdev_free);
index 1975d4388d4f77d2380209ad6bd6bfd9733e402e..e07061f565d6432d1c6c88b78468e038f20572cc 100644 (file)
@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
 
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+                            u8 tc, u8 *tc_group)
+{
+       u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+       void *ets_tcn_conf;
+       int err;
+
+       err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
+       if (err)
+               return err;
+
+       ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
+                                   tc_configuration[tc]);
+
+       *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+                            group);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
+
 int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
 {
        u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
index 6c48e9959b65478759d3e50243ed5d90c3180076..2a8b529ce6dd176cbc29b9bb4b74cd1d1c48f671 100644 (file)
@@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                                mlx5_core_warn(dev,
                                               "failed to restore VF %d settings, err %d\n",
                                               vf, err);
-                       continue;
+                               continue;
                        }
                }
                mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
index 9d5e7cf288bef2b1eb603843c7b57945918aeb72..f3315bc874adf8228f6277936c961d7afca95353 100644 (file)
@@ -96,6 +96,7 @@ struct mlxsw_core {
        const struct mlxsw_bus *bus;
        void *bus_priv;
        const struct mlxsw_bus_info *bus_info;
+       struct workqueue_struct *emad_wq;
        struct list_head rx_listener_list;
        struct list_head event_listener_list;
        struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 {
        unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
 
-       mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
+       queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
 }
 
 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
 
 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 {
+       struct workqueue_struct *emad_wq;
        u64 tid;
        int err;
 
        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
                return 0;
 
+       emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+       if (!emad_wq)
+               return -ENOMEM;
+       mlxsw_core->emad_wq = emad_wq;
+
        /* Set the upper 32 bits of the transaction ID field to a random
         * number. This allows us to discard EMADs addressed to other
         * devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 err_emad_trap_set:
        mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
                                   mlxsw_core);
+       destroy_workqueue(mlxsw_core->emad_wq);
        return err;
 }
 
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
        mlxsw_core->emad.use_emad = false;
        mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
                                   mlxsw_core);
+       destroy_workqueue(mlxsw_core->emad_wq);
 }
 
 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
index cc27c5de5a1dd83060508910a9e99b8353a7c2be..4afc8486eb9a7ee5242d58393fbba643e2b75d74 100644 (file)
@@ -6401,6 +6401,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
        mlxsw_reg_mgpc_opcode_set(payload, opcode);
 }
 
+/* TIGCR - Tunneling IPinIP General Configuration Register
+ * -------------------------------------------------------
+ * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
+ */
+#define MLXSW_REG_TIGCR_ID 0xA801
+#define MLXSW_REG_TIGCR_LEN 0x10
+
+MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
+
+/* reg_tigcr_ipip_ttlc
+ * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
+ * header.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
+
+/* reg_tigcr_ipip_ttl_uc
+ * The TTL for IPinIP Tunnel encapsulation of unicast packets if
+ * reg_tigcr_ipip_ttlc is unset.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
+
+static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
+{
+       MLXSW_REG_ZERO(tigcr, payload);
+       mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
+       mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
+}
+
 /* SBPR - Shared Buffer Pools Register
  * -----------------------------------
  * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -6881,6 +6911,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(mcc),
        MLXSW_REG(mcda),
        MLXSW_REG(mgpc),
+       MLXSW_REG(tigcr),
        MLXSW_REG(sbpr),
        MLXSW_REG(sbcm),
        MLXSW_REG(sbpm),
index 2cfb3f5d092dbf80acaea7481248ecf11ebdd7bc..5189022a1c8c335c42901b5d288bbdf830512f46 100644 (file)
@@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
                mlxsw_sp_nexthop_rif_fini(nh);
                break;
        case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+               mlxsw_sp_nexthop_rif_fini(nh);
                mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
                break;
        }
@@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
            router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
                                                     MLXSW_SP_L3_PROTO_IPV4)) {
                nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
-               return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+               err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+               if (err)
+                       return err;
+               mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
+               return 0;
        }
 
        nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3500,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
                                         struct mlxsw_sp_fib *fib)
 {
-       struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
-       struct mlxsw_sp_lpm_tree *lpm_tree;
-
-       /* Aggregate prefix lengths across all virtual routers to make
-        * sure we only have used prefix lengths in the LPM tree.
-        */
-       mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
-       lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
-                                        fib->proto);
-       if (IS_ERR(lpm_tree))
-               goto err_tree_get;
-       mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
-
-err_tree_get:
        if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
                return;
        mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
@@ -4009,7 +4000,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
            router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
                                                     MLXSW_SP_L3_PROTO_IPV6)) {
                nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
-               return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+               err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+               if (err)
+                       return err;
+               mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
+               return 0;
        }
 
        nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -5068,6 +5063,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
        vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
        if (IS_ERR(vr))
                return ERR_CAST(vr);
+       vr->rif_count++;
 
        err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
        if (err)
@@ -5099,7 +5095,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
 
        mlxsw_sp_rif_counters_alloc(rif);
        mlxsw_sp->router->rifs[rif_index] = rif;
-       vr->rif_count++;
 
        return rif;
 
@@ -5110,6 +5105,7 @@ err_fid_get:
        kfree(rif);
 err_rif_alloc:
 err_rif_index_alloc:
+       vr->rif_count--;
        mlxsw_sp_vr_put(vr);
        return ERR_PTR(err);
 }
@@ -5124,7 +5120,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
        mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
        vr = &mlxsw_sp->router->vrs[rif->vr_id];
 
-       vr->rif_count--;
        mlxsw_sp->router->rifs[rif->rif_index] = NULL;
        mlxsw_sp_rif_counters_free(rif);
        ops->deconfigure(rif);
@@ -5132,6 +5127,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
                /* Loopback RIFs are not associated with a FID. */
                mlxsw_sp_fid_put(fid);
        kfree(rif);
+       vr->rif_count--;
        mlxsw_sp_vr_put(vr);
 }
 
@@ -5900,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
        kfree(mlxsw_sp->router->rifs);
 }
 
+static int
+mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
+{
+       char tigcr_pl[MLXSW_REG_TIGCR_LEN];
+
+       mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
+}
+
 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
 {
        mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
        INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
-       return 0;
+       return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
 }
 
 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
index db9750695dc7f63cf8d9861026995098aa9cbcf0..8ea9320014ee78141f71f883452bb81926e208c6 100644 (file)
@@ -110,6 +110,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
         */
        if (!switchdev_port_same_parent_id(in_dev, out_dev))
                return -EOPNOTSUPP;
+       if (!nfp_netdev_is_nfp_repr(out_dev))
+               return -EOPNOTSUPP;
 
        output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
        if (!output->port)
index 1c0187f0af51f87b070c21a825b00e4a48887428..e118b5f2399669f172aaf6f80be6f6c221a235d0 100644 (file)
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
 {
        void *frag;
 
-       if (!dp->xdp_prog)
+       if (!dp->xdp_prog) {
                frag = netdev_alloc_frag(dp->fl_bufsz);
-       else
-               frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
+       } else {
+               struct page *page;
+
+               page = alloc_page(GFP_KERNEL | __GFP_COLD);
+               frag = page ? page_address(page) : NULL;
+       }
        if (!frag) {
                nn_dp_warn(dp, "Failed to alloc receive page frag\n");
                return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
 {
        void *frag;
 
-       if (!dp->xdp_prog)
+       if (!dp->xdp_prog) {
                frag = napi_alloc_frag(dp->fl_bufsz);
-       else
-               frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
+       } else {
+               struct page *page;
+
+               page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+               frag = page ? page_address(page) : NULL;
+       }
        if (!frag) {
                nn_dp_warn(dp, "Failed to alloc receive page frag\n");
                return NULL;
index 07969f06df102706ebae26ff052fae4ffa132d80..dc016dfec64d653946d0f5be3314d597b189f9ff 100644 (file)
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
 
                do {
                        start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
-                       *data++ = nn->r_vecs[i].rx_pkts;
+                       data[0] = nn->r_vecs[i].rx_pkts;
                        tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
                        tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
                        tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
 
                do {
                        start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
-                       *data++ = nn->r_vecs[i].tx_pkts;
-                       *data++ = nn->r_vecs[i].tx_busy;
+                       data[1] = nn->r_vecs[i].tx_pkts;
+                       data[2] = nn->r_vecs[i].tx_busy;
                        tmp[3] = nn->r_vecs[i].hw_csum_tx;
                        tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
                        tmp[5] = nn->r_vecs[i].tx_gather;
                        tmp[6] = nn->r_vecs[i].tx_lso;
                } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
 
+               data += 3;
+
                for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
                        gathered_stats[j] += tmp[j];
        }
index bbe24639aa5a6a85d2c96130eb89bf9a599be635..c8c6231b87f3305ae570d6c5277d91415ff696e8 100644 (file)
@@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data)
 static int emac_get_sset_count(struct net_device *netdev, int sset)
 {
        switch (sset) {
+       case ETH_SS_PRIV_FLAGS:
+               return 1;
        case ETH_SS_STATS:
                return EMAC_STATS_LEN;
        default:
@@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
        unsigned int i;
 
        switch (stringset) {
+       case ETH_SS_PRIV_FLAGS:
+               strcpy(data, "single-pause-mode");
+               break;
+
        case ETH_SS_STATS:
                for (i = 0; i < EMAC_STATS_LEN; i++) {
                        strlcpy(data, emac_ethtool_stat_strings[i],
@@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev)
        return EMAC_MAX_REG_SIZE * sizeof(u32);
 }
 
+#define EMAC_PRIV_ENABLE_SINGLE_PAUSE  BIT(0)
+
+static int emac_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE);
+
+       if (netif_running(netdev))
+               return emac_reinit_locked(adpt);
+
+       return 0;
+}
+
+static u32 emac_get_priv_flags(struct net_device *netdev)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0;
+}
+
 static const struct ethtool_ops emac_ethtool_ops = {
        .get_link_ksettings = phy_ethtool_get_link_ksettings,
        .set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = {
 
        .get_regs_len    = emac_get_regs_len,
        .get_regs        = emac_get_regs,
+
+       .set_priv_flags = emac_set_priv_flags,
+       .get_priv_flags = emac_get_priv_flags,
 };
 
 void emac_set_ethtool_ops(struct net_device *netdev)
index bcd4708b374574fb06faf28d9b0a6cc90bc9c56d..3ed9033e56dbe9231583b8063128eb775abc9671 100644 (file)
@@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt)
        mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
                 DEBUG_MODE | SINGLE_PAUSE_MODE);
 
+       /* Enable single-pause-frame mode if requested.
+        *
+        * If enabled, the EMAC will send a single pause frame when the RX
+        * queue is full.  This normally leads to packet loss because
+        * the pause frame disables the remote MAC only for 33ms (the quanta),
+        * and then the remote MAC continues sending packets even though
+        * the RX queue is still full.
+        *
+        * If disabled, the EMAC sends a pause frame every 31ms until the RX
+        * queue is no longer full.  Normally, this is the preferred
+        * method of operation.  However, when the system is hung (e.g.
+        * cores are halted), the EMAC interrupt handler is never called
+        * and so the RX queue fills up quickly and stays full.  The resuling
+        * non-stop "flood" of pause frames sometimes has the effect of
+        * disabling nearby switches.  In some cases, other nearby switches
+        * are also affected, shutting down the entire network.
+        *
+        * The user can enable or disable single-pause-frame mode
+        * via ethtool.
+        */
+       mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
+
        writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
 
        writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
@@ -876,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
 
                curr_rxbuf->dma_addr =
                        dma_map_single(adpt->netdev->dev.parent, skb->data,
-                                      curr_rxbuf->length, DMA_FROM_DEVICE);
+                                      adpt->rxbuf_size, DMA_FROM_DEVICE);
+
                ret = dma_mapping_error(adpt->netdev->dev.parent,
                                        curr_rxbuf->dma_addr);
                if (ret) {
index 60850bfa3d32e5d574bddacda871cb75da2076fd..759543512117cbfcc628697a54dd7af4ab600c61 100644 (file)
@@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
 
        /* default to automatic flow control */
        adpt->automatic = true;
+
+       /* Disable single-pause-frame mode by default */
+       adpt->single_pause_mode = false;
 }
 
 /* Get the clock */
index 8ee4ec6aef2e4379060f0726a75ab8fd8ad1690c..d7c9f44209d499cbd4f7ee7be32ca692f580b12c 100644 (file)
@@ -363,6 +363,9 @@ struct emac_adapter {
        bool                            tx_flow_control;
        bool                            rx_flow_control;
 
+       /* True == use single-pause-frame mode. */
+       bool                            single_pause_mode;
+
        /* Ring parameter */
        u8                              tpd_burst;
        u8                              rfd_burst;
index 98f22551eb455a686fcdb8baada068fcfb6e89ae..1e33aea59f505db5afd9fe19a018225c6ee34792 100644 (file)
@@ -51,10 +51,7 @@ struct rmnet_walk_data {
 
 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
 {
-       rx_handler_func_t *rx_handler;
-
-       rx_handler = rcu_dereference(real_dev->rx_handler);
-       return (rx_handler == rmnet_rx_handler);
+       return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
 }
 
 /* Needs rtnl lock */
index ca22f2898664617656f7fa6e4e98df1e7aa3bc26..d24b47b8e0b27e0f44243f5a1011779c0ebd09f9 100644 (file)
@@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
        if (likely(RTL_R16(IntrStatus) & RxAckBits))
                work_done += rtl8139_rx(dev, tp, budget);
 
-       if (work_done < budget && napi_complete_done(napi, work_done)) {
+       if (work_done < budget) {
                unsigned long flags;
 
                spin_lock_irqsave(&tp->lock, flags);
-               RTL_W16_F(IntrMask, rtl8139_intr_mask);
+               if (napi_complete_done(napi, work_done))
+                       RTL_W16_F(IntrMask, rtl8139_intr_mask);
                spin_unlock_irqrestore(&tp->lock, flags);
        }
        spin_unlock(&tp->rx_lock);
index e03fcf914690c9a9e8fae548c4702f402d698f47..a3c949ea7d1a24bd8d04bf4ff75805359d838f0e 100644 (file)
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                rtl8168_driver_start(tp);
        }
 
-       device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
-
        if (pci_dev_run_wake(pdev))
                pm_runtime_put_noidle(&pdev->dev);
 
index a63ef82e7c72d079bb831ab433c2c43af173d2c0..dfae3c9d57c6d49d9de53635180a73e6521199fc 100644 (file)
@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
 int rocker_tlv_put(struct rocker_desc_info *desc_info,
                   int attrtype, int attrlen, const void *data);
 
-static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
-                                   int attrtype, u8 value)
+static inline int
+rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
 {
-       return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+       u8 tmp = value; /* work around GCC PR81715 */
+
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
 }
 
-static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
-                                    int attrtype, u16 value)
+static inline int
+rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
 {
-       return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+       u16 tmp = value;
+
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
 }
 
-static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
-                                     int attrtype, __be16 value)
+static inline int
+rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
 {
-       return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+       __be16 tmp = value;
+
+       return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
 }
 
-static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
-                                    int attrtype, u32 value)
+static inline int
+rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
 {
-       return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+       u32 tmp = value;
+
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
 }
 
-static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
-                                     int attrtype, __be32 value)
+static inline int
+rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
 {
-       return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+       __be32 tmp = value;
+
+       return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
 }
 
-static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
-                                    int attrtype, u64 value)
+static inline int
+rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
 {
-       return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+       u64 tmp = value;
+
+       return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
 }
 
 static inline struct rocker_tlv *
index dd6a2f9791cc11a390d71bcb5a1b071cd1bca068..3256e5cbad2717c0d3928109cbb677c095e3247c 100644 (file)
@@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
                plat_dat->axi->axi_wr_osr_lmt--;
        }
 
-       if (of_property_read_u32(np, "read,read-requests",
+       if (of_property_read_u32(np, "snps,read-requests",
                                 &plat_dat->axi->axi_rd_osr_lmt)) {
                /**
                 * Since the register has a reset value of 1, if property
@@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = {
        .remove = dwc_eth_dwmac_remove,
        .driver = {
                .name           = "dwc-eth-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
                .of_match_table = dwc_eth_dwmac_match,
        },
 };
index 99823f54696a1887ba4ef5f36e5863b12806bc8f..13133b30b575e74a081f35158e360d7eee8b28ab 100644 (file)
@@ -83,6 +83,117 @@ struct rk_priv_data {
        (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
         ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
 
+#define RK3128_GRF_MAC_CON0    0x0168
+#define RK3128_GRF_MAC_CON1    0x016c
+
+/* RK3128_GRF_MAC_CON0 */
+#define RK3128_GMAC_TXCLK_DLY_ENABLE   GRF_BIT(14)
+#define RK3128_GMAC_TXCLK_DLY_DISABLE  GRF_CLR_BIT(14)
+#define RK3128_GMAC_RXCLK_DLY_ENABLE   GRF_BIT(15)
+#define RK3128_GMAC_RXCLK_DLY_DISABLE  GRF_CLR_BIT(15)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3128_GRF_MAC_CON1 */
+#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
+               (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL_RMII  \
+               (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_FLOW_CTRL          GRF_BIT(9)
+#define RK3128_GMAC_FLOW_CTRL_CLR      GRF_CLR_BIT(9)
+#define RK3128_GMAC_SPEED_10M          GRF_CLR_BIT(10)
+#define RK3128_GMAC_SPEED_100M         GRF_BIT(10)
+#define RK3128_GMAC_RMII_CLK_25M       GRF_BIT(11)
+#define RK3128_GMAC_RMII_CLK_2_5M      GRF_CLR_BIT(11)
+#define RK3128_GMAC_CLK_125M           (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define RK3128_GMAC_CLK_25M            (GRF_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_2_5M           (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_RMII_MODE          GRF_BIT(14)
+#define RK3128_GMAC_RMII_MODE_CLR      GRF_CLR_BIT(14)
+
+static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
+                               int tx_delay, int rx_delay)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "Missing rockchip,grf property\n");
+               return;
+       }
+
+       regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+                    RK3128_GMAC_PHY_INTF_SEL_RGMII |
+                    RK3128_GMAC_RMII_MODE_CLR);
+       regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
+                    DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
+                    RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
+                    RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "Missing rockchip,grf property\n");
+               return;
+       }
+
+       regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+                    RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+}
+
+static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "Missing rockchip,grf property\n");
+               return;
+       }
+
+       if (speed == 10)
+               regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+                            RK3128_GMAC_CLK_2_5M);
+       else if (speed == 100)
+               regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+                            RK3128_GMAC_CLK_25M);
+       else if (speed == 1000)
+               regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+                            RK3128_GMAC_CLK_125M);
+       else
+               dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "Missing rockchip,grf property\n");
+               return;
+       }
+
+       if (speed == 10) {
+               regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+                            RK3128_GMAC_RMII_CLK_2_5M |
+                            RK3128_GMAC_SPEED_10M);
+       } else if (speed == 100) {
+               regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+                            RK3128_GMAC_RMII_CLK_25M |
+                            RK3128_GMAC_SPEED_100M);
+       } else {
+               dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+       }
+}
+
+static const struct rk_gmac_ops rk3128_ops = {
+       .set_to_rgmii = rk3128_set_to_rgmii,
+       .set_to_rmii = rk3128_set_to_rmii,
+       .set_rgmii_speed = rk3128_set_rgmii_speed,
+       .set_rmii_speed = rk3128_set_rmii_speed,
+};
+
 #define RK3228_GRF_MAC_CON0    0x0900
 #define RK3228_GRF_MAC_CON1    0x0904
 
@@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
+       { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
        { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
        { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
        { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
index c4407e8e39a35a523bda481a9cc1787ab24bc608..2f7d7ec59962a7050a278d53e2241024566bb66d 100644 (file)
@@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
 {
        void __iomem *ioaddr = hw->pcsr;
        unsigned int pmt = 0;
+       u32 config;
 
        if (mode & WAKE_MAGIC) {
                pr_debug("GMAC: WOL Magic frame\n");
@@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
                pmt |= power_down | global_unicast | wake_up_frame_en;
        }
 
+       if (pmt) {
+               /* The receiver must be enabled for WOL before powering down */
+               config = readl(ioaddr + GMAC_CONFIG);
+               config |= GMAC_CONFIG_RE;
+               writel(config, ioaddr + GMAC_CONFIG);
+       }
        writel(pmt, ioaddr + GMAC_PMT);
 }
 
index e0ef02f9503bae027268b1b058f9875b6c1365a7..4b286e27c4ca5cdbbb7c457e31bef1b2e9e7bd94 100644 (file)
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
                                        goto exit;
                                i++;
 
-                       } while ((ret == 1) || (i < 10));
+                       } while ((ret == 1) && (i < 10));
 
                        if (i == 10)
                                ret = -EBUSY;
index 67af0bdd7f10f01ecd3adf87b59105cdd07ab40e..7516ca210855b49b30378fb0200e32949ca58e49 100644 (file)
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
 
        err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
                                 !(value & DMA_BUS_MODE_SFT_RESET),
-                                100000, 10000);
+                                10000, 100000);
        if (err)
                return -EBUSY;
 
index 1763e48c84e2090678f4ffc1b55cf1d7a62382e4..16bd509290844b7854437ecb97507126b6153413 100644 (file)
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                                   struct dma_desc *np, struct sk_buff *skb)
 {
        struct skb_shared_hwtstamps *shhwtstamp = NULL;
+       struct dma_desc *desc = p;
        u64 ns;
 
        if (!priv->hwts_rx_en)
                return;
+       /* For GMAC4, the valid timestamp is from CTX next desc. */
+       if (priv->plat->has_gmac4)
+               desc = np;
 
        /* Check if timestamp is available */
-       if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
-               /* For GMAC4, the valid timestamp is from CTX next desc. */
-               if (priv->plat->has_gmac4)
-                       ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
-               else
-                       ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
-
+       if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+               ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
 {
        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        unsigned int bytes_compl = 0, pkts_compl = 0;
-       unsigned int entry = tx_q->dirty_tx;
+       unsigned int entry;
 
        netif_tx_lock(priv->dev);
 
        priv->xstats.tx_clean++;
 
+       entry = tx_q->dirty_tx;
        while (entry != tx_q->cur_tx) {
                struct sk_buff *skb = tx_q->tx_skbuff[entry];
                struct dma_desc *p;
@@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                                 * them in stmmac_rx_refill() function so that
                                 * device can reuse it.
                                 */
+                               dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
                                rx_q->rx_skbuff[entry] = NULL;
                                dma_unmap_single(priv->device,
                                                 rx_q->rx_skbuff_dma[entry],
index a366b3747eeb58aebf730fde9c44c0a94af70491..6383695004a53ef14e6c72beba5b4f8b1677d5f5 100644 (file)
@@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
        plat->rx_queues_to_use = 1;
        plat->tx_queues_to_use = 1;
 
+       /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
+        * to always set this, otherwise Queue will be classified as AVB
+        * (because MTL_QUEUE_AVB = 0).
+        */
+       plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+       plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+
        rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
        if (!rx_node)
                return;
@@ -315,6 +322,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
                { .compatible = "allwinner,sun8i-h3-emac" },
                { .compatible = "allwinner,sun8i-v3s-emac" },
                { .compatible = "allwinner,sun50i-a64-emac" },
+               {},
        };
 
        /* If phy-handle property is passed from DT, use it as the PHY */
index f6404074b7b053e2af682c7e25ad36cec21ebe3b..ed51018a813e7ba6354d296e0d6c9fba3a1f76a1 100644 (file)
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
 
 static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
 {
-#ifdef __BIG_ENDIAN
-       return (vni[0] == tun_id[2]) &&
-              (vni[1] == tun_id[1]) &&
-              (vni[2] == tun_id[0]);
-#else
        return !memcmp(vni, &tun_id[5], 3);
-#endif
 }
 
 static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
index d98cdfb1536b5600c0b2e962a2e5e003496ad129..5176be76ca7d57771d82829f8bd08b64dbdc7773 100644 (file)
@@ -150,6 +150,8 @@ struct netvsc_device_info {
        u32  num_chn;
        u32  send_sections;
        u32  recv_sections;
+       u32  send_section_size;
+       u32  recv_section_size;
 };
 
 enum rndis_device_state {
index a5511b7326af646618658060c8d289b60fb4883b..8d5077fb04929cb362e7266b45b04c1369ed1284 100644 (file)
@@ -76,9 +76,6 @@ static struct netvsc_device *alloc_net_device(void)
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 
-       net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE;
-       net_device->send_section_size = NETVSC_SEND_SECTION_SIZE;
-
        init_completion(&net_device->channel_init_wait);
        init_waitqueue_head(&net_device->subchan_open);
        INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
@@ -262,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device,
        int ret = 0;
 
        /* Get receive buffer area. */
-       buf_size = device_info->recv_sections * net_device->recv_section_size;
+       buf_size = device_info->recv_sections * device_info->recv_section_size;
        buf_size = roundup(buf_size, PAGE_SIZE);
 
        net_device->recv_buf = vzalloc(buf_size);
@@ -344,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device,
                goto cleanup;
 
        /* Now setup the send buffer. */
-       buf_size = device_info->send_sections * net_device->send_section_size;
+       buf_size = device_info->send_sections * device_info->send_section_size;
        buf_size = round_up(buf_size, PAGE_SIZE);
 
        net_device->send_buf = vzalloc(buf_size);
index d4902ee5f260f2df2fa03d680c3c7ea0d79e5dc8..a32ae02e1b6cb6fc2975b0e124aacb42370c0c5d 100644 (file)
@@ -848,7 +848,9 @@ static int netvsc_set_channels(struct net_device *net,
        device_info.num_chn = count;
        device_info.ring_size = ring_size;
        device_info.send_sections = nvdev->send_section_cnt;
+       device_info.send_section_size = nvdev->send_section_size;
        device_info.recv_sections = nvdev->recv_section_cnt;
+       device_info.recv_section_size = nvdev->recv_section_size;
 
        rndis_filter_device_remove(dev, nvdev);
 
@@ -963,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        device_info.ring_size = ring_size;
        device_info.num_chn = nvdev->num_chn;
        device_info.send_sections = nvdev->send_section_cnt;
+       device_info.send_section_size = nvdev->send_section_size;
        device_info.recv_sections = nvdev->recv_section_cnt;
+       device_info.recv_section_size = nvdev->recv_section_size;
 
        rndis_filter_device_remove(hdev, nvdev);
 
@@ -1485,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
        device_info.num_chn = nvdev->num_chn;
        device_info.ring_size = ring_size;
        device_info.send_sections = new_tx;
+       device_info.send_section_size = nvdev->send_section_size;
        device_info.recv_sections = new_rx;
+       device_info.recv_section_size = nvdev->recv_section_size;
 
        netif_device_detach(ndev);
        was_opened = rndis_filter_opened(nvdev);
@@ -1934,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev,
        device_info.ring_size = ring_size;
        device_info.num_chn = VRSS_CHANNEL_DEFAULT;
        device_info.send_sections = NETVSC_DEFAULT_TX;
+       device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
        device_info.recv_sections = NETVSC_DEFAULT_RX;
+       device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
 
        nvdev = rndis_filter_device_add(dev, &device_info);
        if (IS_ERR(nvdev)) {
index 5dea2063dbc878d1f45a56c7cfea470f820ec601..0bcc07f346c3ecb26e5d8354adb1fcd547733e84 100644 (file)
@@ -197,8 +197,8 @@ static int ipvtap_init(void)
 {
        int err;
 
-       err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
-
+       err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
+                             THIS_MODULE);
        if (err)
                goto out1;
 
index 98e4deaa3a6a1c2f89d55e8f2db54b6fc93380be..5ab1b8849c30496966be05115f752bfcf3385950 100644 (file)
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        if (unlikely(ret < 0)) {
+               aead_request_free(req);
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
                return ERR_PTR(ret);
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        if (unlikely(ret < 0)) {
+               aead_request_free(req);
                kfree_skb(skb);
                return ERR_PTR(ret);
        }
index c2d0ea2fb01933d46e43b84fbf255901f18e5b91..cba5cb3b849a78c7073d5545bd1f8067f6e1db5f 100644 (file)
@@ -204,8 +204,8 @@ static int macvtap_init(void)
 {
        int err;
 
-       err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
-
+       err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
+                             THIS_MODULE);
        if (err)
                goto out1;
 
index a9d16a3af514ead8f96a34db3d05478785cdf6d6..cd931cf9dcc262d3a1ccb4d03b5022b2180f1266 100644 (file)
@@ -160,15 +160,6 @@ config MDIO_XGENE
 
 endif
 
-menuconfig PHYLIB
-       tristate "PHY Device support and infrastructure"
-       depends on NETDEVICES
-       select MDIO_DEVICE
-       help
-         Ethernet controllers are usually attached to PHY
-         devices.  This option provides infrastructure for
-         managing PHY devices.
-
 config PHYLINK
        tristate
        depends on NETDEVICES
@@ -179,6 +170,15 @@ config PHYLINK
          configuration links, PHYs, and Serdes links with MAC level
          autonegotiation modes.
 
+menuconfig PHYLIB
+       tristate "PHY Device support and infrastructure"
+       depends on NETDEVICES
+       select MDIO_DEVICE
+       help
+         Ethernet controllers are usually attached to PHY
+         devices.  This option provides infrastructure for
+         managing PHY devices.
+
 if PHYLIB
 
 config SWPHY
index e842d2cd1ee750f8930028370c1c7fac5a52dc77..2b1e67bc1e736ceb33f7afa8462f5a4858b522df 100644 (file)
@@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
                cmd->base.port = PORT_BNC;
        else
                cmd->base.port = PORT_MII;
-
+       cmd->base.transceiver = phy_is_internal(phydev) ?
+                               XCVR_INTERNAL : XCVR_EXTERNAL;
        cmd->base.phy_address = phydev->mdio.addr;
        cmd->base.autoneg = phydev->autoneg;
        cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
index 8cf0c5901f95870fc613edba0289594a5d40640a..67f25ac29025c53903cc724fac62efdd94828510 100644 (file)
@@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
 {
        const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
        char *irq_str;
-       char irq_num[4];
+       char irq_num[8];
 
        switch(phydev->irq) {
        case PHY_POLL:
index d15dd3938ba82624fd693b5a6f77301bb6d26bde..2e5150b0b8d52c5dd784a3df1818962d64972898 100644 (file)
@@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
        priv->phy_drv->read_status(phydev);
 
        val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
-       val &= XILINX_GMII2RGMII_SPEED_MASK;
+       val &= ~XILINX_GMII2RGMII_SPEED_MASK;
 
        if (phydev->speed == SPEED_1000)
                val |= BMCR_SPEED1000;
index a404552555d488c832e7758293d7d4c1e229e679..e365866600ba048f31664dddafccf314d640385f 100644 (file)
@@ -120,7 +120,7 @@ struct ppp {
        int             n_channels;     /* how many channels are attached 54 */
        spinlock_t      rlock;          /* lock for receive side 58 */
        spinlock_t      wlock;          /* lock for transmit side 5c */
-       int             *xmit_recursion __percpu; /* xmit recursion detect */
+       int __percpu    *xmit_recursion; /* xmit recursion detect */
        int             mru;            /* max receive unit 60 */
        unsigned int    flags;          /* control bits 64 */
        unsigned int    xstate;         /* transmit state bits 68 */
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
 
 static int ppp_dev_init(struct net_device *dev)
 {
+       struct ppp *ppp;
+
        netdev_lockdep_set_classes(dev);
+
+       ppp = netdev_priv(dev);
+       /* Let the netdevice take a reference on the ppp file. This ensures
+        * that ppp_destroy_interface() won't run before the device gets
+        * unregistered.
+        */
+       atomic_inc(&ppp->file.refcnt);
+
        return 0;
 }
 
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
        wake_up_interruptible(&ppp->file.rwait);
 }
 
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+       struct ppp *ppp;
+
+       ppp = netdev_priv(dev);
+       if (atomic_dec_and_test(&ppp->file.refcnt))
+               ppp_destroy_interface(ppp);
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
        .ndo_init        = ppp_dev_init,
        .ndo_uninit      = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
        dev->tx_queue_len = 3;
        dev->type = ARPHRD_PPP;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+       dev->priv_destructor = ppp_dev_priv_destructor;
        netif_keep_dst(dev);
 }
 
index 21b71ae947fdbc2e3bbf7d5ef7c17d17aa560ba2..1b10fcc6a58ddeb660c93817b9972d0ef4783441 100644 (file)
@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
                                             &tap_proto, 0);
        if (!q)
                goto err;
+       if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+               sk_free(&q->sk);
+               goto err;
+       }
 
        RCU_INIT_POINTER(q->sock.wq, &q->wq);
        init_waitqueue_head(&q->wq.wait);
@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
        if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
                sock_set_flag(&q->sk, SOCK_ZEROCOPY);
 
-       err = -ENOMEM;
-       if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
-               goto err_array;
-
        err = tap_set_queue(tap, file, q);
-       if (err)
-               goto err_queue;
+       if (err) {
+               /* tap_sock_destruct() will take care of freeing skb_array */
+               goto err_put;
+       }
 
        dev_put(tap->dev);
 
        rtnl_unlock();
        return err;
 
-err_queue:
-       skb_array_cleanup(&q->skb_array);
-err_array:
+err_put:
        sock_put(&q->sk);
 err:
        if (tap)
@@ -1249,8 +1249,8 @@ static int tap_list_add(dev_t major, const char *device_name)
        return 0;
 }
 
-int tap_create_cdev(struct cdev *tap_cdev,
-                   dev_t *tap_major, const char *device_name)
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+                   const char *device_name, struct module *module)
 {
        int err;
 
@@ -1259,6 +1259,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
                goto out1;
 
        cdev_init(tap_cdev, &tap_fops);
+       tap_cdev->owner = module;
        err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
        if (err)
                goto out2;
index 3c9985f299503ea65dad7eb3b47e2ab3bef87800..5550f56cb895f7cb5a2c3ff5e93be79bd012e627 100644 (file)
@@ -1286,6 +1286,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
        buflen += SKB_DATA_ALIGN(len + pad);
        rcu_read_unlock();
 
+       alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
        if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
                return ERR_PTR(-ENOMEM);
 
@@ -1496,11 +1497,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        switch (tun->flags & TUN_TYPE_MASK) {
        case IFF_TUN:
                if (tun->flags & IFF_NO_PI) {
-                       switch (skb->data[0] & 0xf0) {
-                       case 0x40:
+                       u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
+
+                       switch (ip_version) {
+                       case 4:
                                pi.proto = htons(ETH_P_IP);
                                break;
-                       case 0x60:
+                       case 6:
                                pi.proto = htons(ETH_P_IPV6);
                                break;
                        default:
@@ -2025,6 +2028,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                if (!dev)
                        return -ENOMEM;
+               err = dev_get_valid_name(net, dev, name);
+               if (err < 0)
+                       goto err_free_dev;
 
                dev_net_set(dev, net);
                dev->rtnl_link_ops = &tun_link_ops;
index 8ab281b478f23bd98d71b896a0c00c4fdba7dacc..3e7a3ac3a36236054b897bc7beda770a765a95e5 100644 (file)
@@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
                desc->bInterfaceProtocol == 3);
 }
 
+static int is_novatel_rndis(struct usb_interface_descriptor *desc)
+{
+       return (desc->bInterfaceClass == USB_CLASS_MISC &&
+               desc->bInterfaceSubClass == 4 &&
+               desc->bInterfaceProtocol == 1);
+}
+
 #else
 
 #define is_rndis(desc)         0
 #define is_activesync(desc)    0
 #define is_wireless_rndis(desc)        0
+#define is_novatel_rndis(desc) 0
 
 #endif
 
@@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
         */
        rndis = (is_rndis(&intf->cur_altsetting->desc) ||
                 is_activesync(&intf->cur_altsetting->desc) ||
-                is_wireless_rndis(&intf->cur_altsetting->desc));
+                is_wireless_rndis(&intf->cur_altsetting->desc) ||
+                is_novatel_rndis(&intf->cur_altsetting->desc));
 
        memset(info, 0, sizeof(*info));
        info->control = intf;
@@ -547,9 +556,12 @@ static const struct driver_info wwan_info = {
 #define REALTEK_VENDOR_ID      0x0bda
 #define SAMSUNG_VENDOR_ID      0x04e8
 #define LENOVO_VENDOR_ID       0x17ef
+#define LINKSYS_VENDOR_ID      0x13b1
 #define NVIDIA_VENDOR_ID       0x0955
 #define HP_VENDOR_ID           0x03f0
 #define MICROSOFT_VENDOR_ID    0x045e
+#define UBLOX_VENDOR_ID                0x1546
+#define TPLINK_VENDOR_ID       0x2357
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -737,6 +749,15 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+#endif
+
 /* ThinkPad USB-C Dock (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
@@ -793,6 +814,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+       /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
@@ -843,12 +871,30 @@ static const struct usb_device_id products[] = {
        USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
        .driver_info = (kernel_ulong_t)&wwan_info,
+}, {
+       /* Huawei ME906 and ME909 */
+       USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
 }, {
        /* ZTE modules */
        USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,
                                      USB_CDC_SUBCLASS_ETHERNET,
                                      USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&zte_cdc_info,
+}, {
+       /* U-blox TOBY-L2 */
+       USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
+}, {
+       /* U-blox SARA-U2 */
+       USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
 }, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
                        USB_CDC_PROTO_NONE),
index b99a7fb09f8e31827a725151b415967699cdfa27..0161f77641fac8eafc1284a5457a7ccb273efc39 100644 (file)
@@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
                                      struct ethtool_eeprom *ee, u8 *data)
 {
        struct lan78xx_net *dev = netdev_priv(netdev);
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret)
+               return ret;
 
        ee->magic = LAN78XX_EEPROM_MAGIC;
 
-       return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+       ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
 }
 
 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
                                      struct ethtool_eeprom *ee, u8 *data)
 {
        struct lan78xx_net *dev = netdev_priv(netdev);
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret)
+               return ret;
 
-       /* Allow entire eeprom update only */
-       if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
-           (ee->offset == 0) &&
-           (ee->len == 512) &&
-           (data[0] == EEPROM_INDICATOR))
-               return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+       /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
+        * to load data from EEPROM
+        */
+       if (ee->magic == LAN78XX_EEPROM_MAGIC)
+               ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
        else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
                 (ee->offset == 0) &&
                 (ee->len == 512) &&
                 (data[0] == OTP_INDICATOR_1))
-               return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
+               ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
+
+       usb_autopm_put_interface(dev->intf);
 
-       return -EINVAL;
+       return ret;
 }
 
 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
@@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        /* LAN7801 only has RGMII mode */
        if (dev->chipid == ID_REV_CHIP_ID_7801_)
                buf &= ~MAC_CR_GMII_EN_;
-       buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
        ret = lan78xx_write_reg(dev, MAC_CR, buf);
 
        ret = lan78xx_read_reg(dev, MAC_TX, &buf);
index ceb78e2ea4f0898ea2d4fbaeb4e8bc7e3439a077..d51d9abf7986b203350167d6a2fcdcfcddf8b972 100644 (file)
@@ -613,7 +613,9 @@ enum rtl8152_flags {
 #define VENDOR_ID_MICROSOFT            0x045e
 #define VENDOR_ID_SAMSUNG              0x04e8
 #define VENDOR_ID_LENOVO               0x17ef
+#define VENDOR_ID_LINKSYS              0x13b1
 #define VENDOR_ID_NVIDIA               0x0955
+#define VENDOR_ID_TPLINK               0x2357
 
 #define MCU_TYPE_PLA                   0x0100
 #define MCU_TYPE_USB                   0x0000
@@ -5316,7 +5318,9 @@ static const struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
        {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK,  0x0601)},
        {}
 };
 
index a151f267aebb9c47312f724729e4c2755cb7512b..b807c91abe1da94fc16c7eca39c0de3a1e2070a3 100644 (file)
@@ -632,6 +632,10 @@ static const struct usb_device_id  products [] = {
        /* RNDIS for tethering */
        USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
        .driver_info = (unsigned long) &rndis_info,
+}, {
+       /* Novatel Verizon USB730L */
+       USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
+       .driver_info = (unsigned long) &rndis_info,
 },
        { },            // END
 };
index c9c711dcd0e6bb9d7ce988f42bc7e6dd7877a37b..a89b5685e68b36d5735bc9591f97f621440cb424 100644 (file)
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
        struct device *dev = i2400m_dev(i2400m);
        struct {
                struct i2400m_bootrom_header cmd;
-               u8 cmd_payload[chunk_len];
+               u8 cmd_payload[];
        } __packed *buf;
        struct i2400m_bootrom_header ack;
 
index bc1633945a564e850152a415ce04b3f4500bd51d..195dafb98131443a7b678e2fb82aa81c55f6591f 100644 (file)
@@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
 
 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
 
-#ifdef CONFIG_PM
-
-static int ath10k_pci_pm_suspend(struct device *dev)
+static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
 {
        struct ath10k *ar = dev_get_drvdata(dev);
        int ret;
@@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev)
        return ret;
 }
 
-static int ath10k_pci_pm_resume(struct device *dev)
+static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
 {
        struct ath10k *ar = dev_get_drvdata(dev);
        int ret;
@@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
                         ath10k_pci_pm_suspend,
                         ath10k_pci_pm_resume);
-#endif
 
 static struct pci_driver ath10k_pci_driver = {
        .name = "ath10k_pci",
index aaed4ab503ad16c6f4de0e3ccec5b83c3f85c6a1..4157c90ad9736b9b20ab086585eca22ff80a60c9 100644 (file)
@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
 
        eth_broadcast_addr(params_le->bssid);
        params_le->bss_type = DOT11_BSSTYPE_ANY;
-       params_le->scan_type = 0;
+       params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
        params_le->channel_num = 0;
        params_le->nprobes = cpu_to_le32(-1);
        params_le->active_time = cpu_to_le32(-1);
@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
        params_le->home_time = cpu_to_le32(-1);
        memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
 
-       /* if request is null exit so it will be all channel broadcast scan */
-       if (!request)
-               return;
-
        n_ssids = request->n_ssids;
        n_channels = request->n_channels;
+
        /* Copy channel array if applicable */
        brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
                  n_channels);
@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
                        ptr += sizeof(ssid_le);
                }
        } else {
-               brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
-               if ((request->ssids) && request->ssids->ssid_len) {
-                       brcmf_dbg(SCAN, "SSID %s len=%d\n",
-                                 params_le->ssid_le.SSID,
-                                 request->ssids->ssid_len);
-                       params_le->ssid_le.SSID_len =
-                               cpu_to_le32(request->ssids->ssid_len);
-                       memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
-                               request->ssids->ssid_len);
-               }
+               brcmf_dbg(SCAN, "Performing passive scan\n");
+               params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
        }
        /* Adding mask to channel numbers */
        params_le->channel_num =
@@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
        struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
        s32 status;
        struct brcmf_escan_result_le *escan_result_le;
+       u32 escan_buflen;
        struct brcmf_bss_info_le *bss_info_le;
        struct brcmf_bss_info_le *bss = NULL;
        u32 bi_length;
@@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
 
        if (status == BRCMF_E_STATUS_PARTIAL) {
                brcmf_dbg(SCAN, "ESCAN Partial result\n");
+               if (e->datalen < sizeof(*escan_result_le)) {
+                       brcmf_err("invalid event data length\n");
+                       goto exit;
+               }
                escan_result_le = (struct brcmf_escan_result_le *) data;
                if (!escan_result_le) {
                        brcmf_err("Invalid escan result (NULL pointer)\n");
                        goto exit;
                }
+               escan_buflen = le32_to_cpu(escan_result_le->buflen);
+               if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
+                   escan_buflen > e->datalen ||
+                   escan_buflen < sizeof(*escan_result_le)) {
+                       brcmf_err("Invalid escan buffer length: %d\n",
+                                 escan_buflen);
+                       goto exit;
+               }
                if (le16_to_cpu(escan_result_le->bss_count) != 1) {
                        brcmf_err("Invalid bss_count %d: ignoring\n",
                                  escan_result_le->bss_count);
@@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
                }
 
                bi_length = le32_to_cpu(bss_info_le->length);
-               if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
-                                       WL_ESCAN_RESULTS_FIXED_SIZE)) {
-                       brcmf_err("Invalid bss_info length %d: ignoring\n",
+               if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
+                       brcmf_err("Ignoring invalid bss_info length: %d\n",
                                  bi_length);
                        goto exit;
                }
index 4eb1e1ce9aceccc9214657fa6081ba5327c021ca..ef72baf6dd969c2478a2138ea57b9f691fdf12d8 100644 (file)
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
        if (code != BRCMF_E_IF && !fweh->evt_handler[code])
                return;
 
-       if (datalen > BRCMF_DCMD_MAXLEN)
+       if (datalen > BRCMF_DCMD_MAXLEN ||
+           datalen + sizeof(*event_packet) > packet_len)
                return;
 
        if (in_interrupt())
index 8391989b188297b2736b1c1a2375a97f84e0dd4a..e0d22fedb2b45932f04a35458f44408804b23d90 100644 (file)
 #define BRCMF_SCAN_PARAMS_COUNT_MASK   0x0000ffff
 #define BRCMF_SCAN_PARAMS_NSSID_SHIFT  16
 
+/* scan type definitions */
+#define BRCMF_SCANTYPE_DEFAULT         0xFF
+#define BRCMF_SCANTYPE_ACTIVE          0
+#define BRCMF_SCANTYPE_PASSIVE         1
+
 #define BRCMF_WSEC_MAX_PSK_LEN         32
 #define        BRCMF_WSEC_PASSPHRASE           BIT(0)
 
index b3aab2fe96eb79f751d46c0f309f33eb6042d025..ef685465f80ad6f7ae84b0763ca8afd4d8f1ea73 100644 (file)
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
 }
 
 static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
-                      u8 len)
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
+                      const u8 *dlys, u8 len)
 {
        u32 t1_offset, t2_offset;
        u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
 static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
 {
        u16 currband;
-       s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
-       s8 *lna1_gain_db = NULL;
-       s8 *lna1_gain_db_2 = NULL;
-       s8 *lna2_gain_db = NULL;
-       s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
-       s8 *tia_gain_db;
-       s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
-       s8 *tia_gainbits;
-       u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
-       u16 *rfseq_init_gain;
+       static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+       const s8 *lna1_gain_db = NULL;
+       const s8 *lna1_gain_db_2 = NULL;
+       const s8 *lna2_gain_db = NULL;
+       static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+       const s8 *tia_gain_db;
+       static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+       const s8 *tia_gainbits;
+       static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+       const u16 *rfseq_init_gain;
        u16 init_gaincode;
        u16 clip1hi_gaincode;
        u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
 
                        if ((freq <= 5080) || (freq == 5825)) {
 
-                               s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
-                               s8 lna1A_gain_db_2_rev7[] = {
-                                       11, 17, 22, 25};
-                               s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+                               static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+                               static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
+                               static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
 
                                crsminu_th = 0x3e;
                                lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
                                lna2_gain_db = lna2A_gain_db_rev7;
                        } else if ((freq >= 5500) && (freq <= 5700)) {
 
-                               s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
-                               s8 lna1A_gain_db_2_rev7[] = {
-                                       12, 18, 22, 26};
-                               s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+                               static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+                               static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+                               static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
 
                                crsminu_th = 0x45;
                                clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
                                lna2_gain_db = lna2A_gain_db_rev7;
                        } else {
 
-                               s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
-                               s8 lna1A_gain_db_2_rev7[] = {
-                                       12, 18, 22, 26};
-                               s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+                               static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+                               static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+                               static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
 
                                crsminu_th = 0x41;
                                lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
                NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
                NPHY_RFSEQ_CMD_SET_HPF_BW
        };
-       u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
-       s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
-       s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
-       s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
-       s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
-       s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
-       s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
-       s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
-       s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
-       s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
-       s8 *lna1_gain_db = NULL;
-       s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
-       s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
-       s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
-       s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
-       s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
-       s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
-       s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
-       s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
-       s8 *lna2_gain_db = NULL;
-       s8 tiaG_gain_db[] = {
+       static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+       static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+       static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+       static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+       static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+       static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+       static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+       static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+       static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+       static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+       const s8 *lna1_gain_db = NULL;
+       static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+       static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+       static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+       static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+       static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+       static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+       static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+       static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+       const s8 *lna2_gain_db = NULL;
+       static const s8 tiaG_gain_db[] = {
                0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
-       s8 tiaA_gain_db[] = {
+       static const s8 tiaA_gain_db[] = {
                0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
-       s8 tiaA_gain_db_rev4[] = {
+       static const s8 tiaA_gain_db_rev4[] = {
                0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-       s8 tiaA_gain_db_rev5[] = {
+       static const s8 tiaA_gain_db_rev5[] = {
                0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-       s8 tiaA_gain_db_rev6[] = {
+       static const s8 tiaA_gain_db_rev6[] = {
                0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-       s8 *tia_gain_db;
-       s8 tiaG_gainbits[] = {
+       const s8 *tia_gain_db;
+       static const s8 tiaG_gainbits[] = {
                0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
-       s8 tiaA_gainbits[] = {
+       static const s8 tiaA_gainbits[] = {
                0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
-       s8 tiaA_gainbits_rev4[] = {
+       static const s8 tiaA_gainbits_rev4[] = {
                0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-       s8 tiaA_gainbits_rev5[] = {
+       static const s8 tiaA_gainbits_rev5[] = {
                0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-       s8 tiaA_gainbits_rev6[] = {
+       static const s8 tiaA_gainbits_rev6[] = {
                0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-       s8 *tia_gainbits;
-       s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
-       s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
-       u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
-       u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
-       u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
-       u16 rfseqG_init_gain_rev5_elna[] = {
+       const s8 *tia_gainbits;
+       static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+       static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+       static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+       static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+       static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+       static const u16 rfseqG_init_gain_rev5_elna[] = {
                0x013f, 0x013f, 0x013f, 0x013f };
-       u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
-       u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
-       u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
-       u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
-       u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
-       u16 rfseqA_init_gain_rev4_elna[] = {
+       static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+       static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+       static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+       static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+       static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+       static const u16 rfseqA_init_gain_rev4_elna[] = {
                0x314f, 0x314f, 0x314f, 0x314f };
-       u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
-       u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
-       u16 *rfseq_init_gain;
+       static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+       static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+       const u16 *rfseq_init_gain;
        u16 initG_gaincode = 0x627e;
        u16 initG_gaincode_rev4 = 0x527e;
        u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
        u16 clip1mdA_gaincode_rev6 = 0x2084;
        u16 clip1md_gaincode = 0;
        u16 clip1loG_gaincode = 0x0074;
-       u16 clip1loG_gaincode_rev5[] = {
+       static const u16 clip1loG_gaincode_rev5[] = {
                0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
        };
-       u16 clip1loG_gaincode_rev6[] = {
+       static const u16 clip1loG_gaincode_rev6[] = {
                0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
        };
        u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
 
 static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
 {
-       u8 rfseq_rx2tx_events[] = {
+       static const u8 rfseq_rx2tx_events[] = {
                NPHY_RFSEQ_CMD_NOP,
                NPHY_RFSEQ_CMD_RXG_FBW,
                NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_RFSEQ_CMD_EXT_PA
        };
        u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
-       u8 rfseq_tx2rx_events[] = {
+       static const u8 rfseq_tx2rx_events[] = {
                NPHY_RFSEQ_CMD_NOP,
                NPHY_RFSEQ_CMD_EXT_PA,
                NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_RFSEQ_CMD_RXG_FBW,
                NPHY_RFSEQ_CMD_CLR_HIQ_DIS
        };
-       u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
-       u8 rfseq_tx2rx_events_rev3[] = {
+       static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+       static const u8 rfseq_tx2rx_events_rev3[] = {
                NPHY_REV3_RFSEQ_CMD_EXT_PA,
                NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
                NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
                NPHY_REV3_RFSEQ_CMD_END
        };
-       u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+       static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
        u8 rfseq_rx2tx_events_rev3[] = {
                NPHY_REV3_RFSEQ_CMD_NOP,
                NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
        };
        u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
 
-       u8 rfseq_rx2tx_events_rev3_ipa[] = {
+       static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
                NPHY_REV3_RFSEQ_CMD_NOP,
                NPHY_REV3_RFSEQ_CMD_RXG_FBW,
                NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
                NPHY_REV3_RFSEQ_CMD_END
        };
-       u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
-       u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+       static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+       static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
 
        s16 alpha0, alpha1, alpha2;
        s16 beta0, beta1, beta2;
        u32 leg_data_weights, ht_data_weights, nss1_data_weights,
            stbc_data_weights;
        u8 chan_freq_range = 0;
-       u16 dac_control = 0x0002;
+       static const u16 dac_control = 0x0002;
        u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
        u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
        u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
        u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
        u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
        u16 *aux_adc_gain;
-       u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
-       u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+       static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+       static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
        s32 min_nvar_val = 0x18d;
        s32 min_nvar_offset_6mbps = 20;
        u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
        u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
        u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
        u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
-       u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
-       u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
-       u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+       static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+       static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+       static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
        u16 ipalvlshift_3p3_war_en = 0;
        u16 rccal_bcap_val, rccal_scap_val;
        u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
        u16 bbmult;
        u16 tblentry;
 
-       struct nphy_txiqcal_ladder ladder_lo[] = {
+       static const struct nphy_txiqcal_ladder ladder_lo[] = {
                {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
                {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
                {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
        };
 
-       struct nphy_txiqcal_ladder ladder_iq[] = {
+       static const struct nphy_txiqcal_ladder ladder_iq[] = {
                {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
                {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
                {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
        u16 cal_gain[2];
        struct nphy_iqcal_params cal_params[2];
        u32 tbl_len;
-       void *tbl_ptr;
+       const void *tbl_ptr;
        bool ladder_updated[2];
        u8 mphase_cal_lastphase = 0;
        int bcmerror = 0;
        bool phyhang_avoid_state = false;
 
-       u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+       static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
                0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
                0x1902,
                0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
                0x6407
        };
 
-       u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+       static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
                0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
                0x3200,
                0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
                0x6407
        };
 
-       u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+       static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
                0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
                0x1202,
                0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
                0x4707
        };
 
-       u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+       static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
                0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
                0x2300,
                0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
                0x4707
        };
 
-       u16 tbl_tx_iqlo_cal_startcoefs[] = {
+       static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
                0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
                0x0000
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
                0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
                0x9123, 0x9264, 0x9086, 0x9245, 0x9056
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
                0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
                0x9101, 0x9253, 0x9053, 0x9234, 0x9034
        };
 
-       u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+       static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
                0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
                0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
                0x0000
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
                0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
                0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
                0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
                0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
        };
index 45e2efc70d19e5f44c7a5e2a1cde2cf9a7448f91..ce741beec1fcf7c779cc993c4f6b240151257028 100644 (file)
@@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
        .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
        .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
        .dccm_len = IWL7265_DCCM_LEN,
+       .nvm_type = IWL_NVM_SDP,
 };
 
 const struct iwl_cfg iwl7265_2ac_cfg = {
index 2e6c52664ceedddfcba3ad1a20954328348d121b..c2a5936ccede26dbbd10512a21ef5da162e4b2d7 100644 (file)
@@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
        .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,       \
        .thermal_params = &iwl8000_tt_params,                           \
        .apmg_not_supported = true,                                     \
-       .ext_nvm = true,                                                \
+       .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true
 
 #define IWL_DEVICE_8000                                                        \
index 2babe0a1f18bcfb675fa21e089dff3b97f873b50..e8b5ff42f5a8f3711d0d27bcf64790d895384368 100644 (file)
@@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .vht_mu_mimo_supported = true,                                  \
        .mac_addr_from_csr = true,                                      \
        .rf_id = true,                                                  \
-       .ext_nvm = true,                                                \
+       .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true
 
 const struct iwl_cfg iwl9160_2ac_cfg = {
index 76ba1f8bc72feb51e96ac060fe8d5afaf1bab4f2..a440140ed8dda3d889629238af0d1aff4af725b4 100644 (file)
@@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
        .use_tfh = true,                                                \
        .rf_id = true,                                                  \
        .gen2 = true,                                                   \
-       .ext_nvm = true,                                                \
+       .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true
 
 const struct iwl_cfg iwla000_2ac_cfg_hr = {
index 00bc7a25dece29bd80392383eef7186aa8b961c0..3fd07bc80f54dd77410f4911e991b9460ca7ebb0 100644 (file)
@@ -108,6 +108,7 @@ enum iwl_nvm_access_target {
  * @NVM_SECTION_TYPE_REGULATORY: regulatory section
  * @NVM_SECTION_TYPE_CALIBRATION: calibration section
  * @NVM_SECTION_TYPE_PRODUCTION: production section
+ * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
  * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
  * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
  * @NVM_MAX_NUM_SECTIONS: number of sections
@@ -117,6 +118,7 @@ enum iwl_nvm_section_type {
        NVM_SECTION_TYPE_REGULATORY = 3,
        NVM_SECTION_TYPE_CALIBRATION = 4,
        NVM_SECTION_TYPE_PRODUCTION = 5,
+       NVM_SECTION_TYPE_REGULATORY_SDP = 8,
        NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
        NVM_SECTION_TYPE_PHY_SKU = 12,
        NVM_MAX_NUM_SECTIONS = 13,
index 6afc7a799892f424c64934f67b949d4c9beaa48d..f5dd7d83cd0a8eb8da5fc150ebb3d035c2c03a97 100644 (file)
@@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
 
        if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
                /* stop recording */
-               iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+               iwl_fw_dbg_stop_recording(fwrt);
 
                iwl_fw_error_dump(fwrt);
 
@@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
                u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
                u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
 
-               /* stop recording */
-               iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
-               udelay(100);
-               iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+               iwl_fw_dbg_stop_recording(fwrt);
                /* wait before we collect the data till the DBGC stop */
                udelay(500);
 
index 0f810ea89d31fa6394e80b90bac0dab38754aaae..9c889a32fe2424941d9bceb89b8cd1f593e4f3ab 100644 (file)
@@ -68,6 +68,8 @@
 #include <linux/workqueue.h>
 #include <net/cfg80211.h>
 #include "runtime.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
 #include "file.h"
 #include "error-dump.h"
 
@@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
                                        iwl_fw_dbg_get_trigger((fwrt)->fw,\
                                                               (trig)))
 
+static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+{
+       if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       } else {
+               iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
+               udelay(100);
+               iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+       }
+}
+
 static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
 {
+       iwl_fw_dbg_stop_recording(fwrt);
+
        fwrt->dump.conf = FW_DBG_INVALID;
 }
 
index 3e057b539d5b76dede3bf18de696003e041ace7a..71cb1ecde0f72b4b73d236d3028d5dde8bda4457 100644 (file)
@@ -108,6 +108,18 @@ enum iwl_led_mode {
        IWL_LED_DISABLE,
 };
 
+/**
+ * enum iwl_nvm_type - nvm formats
+ * @IWL_NVM: the regular format
+ * @IWL_NVM_EXT: extended NVM format
+ * @IWL_NVM_SDP: NVM format used by 3168 series
+ */
+enum iwl_nvm_type {
+       IWL_NVM,
+       IWL_NVM_EXT,
+       IWL_NVM_SDP,
+};
+
 /*
  * This is the threshold value of plcp error rate per 100mSecs.  It is
  * used to set and check for the validity of plcp_delta.
@@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff {
  * @integrated: discrete or integrated
  * @gen2: a000 and on transport operation
  * @cdb: CDB support
- * @ext_nvm: extended NVM format
+ * @nvm_type: see &enum iwl_nvm_type
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -342,6 +354,7 @@ struct iwl_cfg {
        const struct iwl_tt_params *thermal_params;
        enum iwl_device_family device_family;
        enum iwl_led_mode led_mode;
+       enum iwl_nvm_type nvm_type;
        u32 max_data_size;
        u32 max_inst_size;
        netdev_features_t features;
@@ -369,7 +382,6 @@ struct iwl_cfg {
            use_tfh:1,
            gen2:1,
            cdb:1,
-           ext_nvm:1,
            dbgc_supported:1;
        u8 valid_tx_ant;
        u8 valid_rx_ant;
index 3014beef48730dbd077c55b453e8e14c4bfb4c22..c3a5d8ccc95e942443dbf74e2ac6e21b2f09dfcb 100644 (file)
@@ -77,7 +77,7 @@
 #include "iwl-csr.h"
 
 /* NVM offsets (in words) definitions */
-enum wkp_nvm_offsets {
+enum nvm_offsets {
        /* NVM HW-Section offset (in words) definitions */
        SUBSYSTEM_ID = 0x0A,
        HW_ADDR = 0x15,
@@ -92,7 +92,10 @@ enum wkp_nvm_offsets {
 
        /* NVM calibration section offset (in words) definitions */
        NVM_CALIB_SECTION = 0x2B8,
-       XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+       XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
+
+       /* NVM REGULATORY -Section offset (in words) definitions */
+       NVM_CHANNELS_SDP = 0,
 };
 
 enum ext_nvm_offsets {
@@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags {
        NVM_CHANNEL_DC_HIGH             = BIT(12),
 };
 
+static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
+                                              int chan, u16 flags)
+{
 #define CHECK_AND_PRINT_I(x)   \
-       ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+       ((flags & NVM_CHANNEL_##x) ? " " #x : "")
+
+       if (!(flags & NVM_CHANNEL_VALID)) {
+               IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
+                             chan, flags);
+               return;
+       }
+
+       /* Note: already can print up to 101 characters, 110 is the limit! */
+       IWL_DEBUG_DEV(dev, level,
+                     "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+                     chan, flags,
+                     CHECK_AND_PRINT_I(VALID),
+                     CHECK_AND_PRINT_I(IBSS),
+                     CHECK_AND_PRINT_I(ACTIVE),
+                     CHECK_AND_PRINT_I(RADAR),
+                     CHECK_AND_PRINT_I(INDOOR_ONLY),
+                     CHECK_AND_PRINT_I(GO_CONCURRENT),
+                     CHECK_AND_PRINT_I(UNIFORM),
+                     CHECK_AND_PRINT_I(20MHZ),
+                     CHECK_AND_PRINT_I(40MHZ),
+                     CHECK_AND_PRINT_I(80MHZ),
+                     CHECK_AND_PRINT_I(160MHZ),
+                     CHECK_AND_PRINT_I(DC_HIGH));
+#undef CHECK_AND_PRINT_I
+}
 
 static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
                                 u16 nvm_flags, const struct iwl_cfg *cfg)
@@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
        u32 flags = IEEE80211_CHAN_NO_HT40;
        u32 last_5ghz_ht = LAST_5GHZ_HT;
 
-       if (cfg->ext_nvm)
+       if (cfg->nvm_type == IWL_NVM_EXT)
                last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
 
        if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
        int num_of_ch, num_2ghz_channels;
        const u8 *nvm_chan;
 
-       if (!cfg->ext_nvm) {
+       if (cfg->nvm_type != IWL_NVM_EXT) {
                num_of_ch = IWL_NUM_CHANNELS;
                nvm_chan = &iwl_nvm_channels[0];
                num_2ghz_channels = NUM_2GHZ_CHANNELS;
@@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                         * supported, hence we still want to add them to
                         * the list of supported channels to cfg80211.
                         */
-                       IWL_DEBUG_EEPROM(dev,
-                                        "Ch. %d Flags %x [%sGHz] - No traffic\n",
-                                        nvm_chan[ch_idx],
-                                        ch_flags,
-                                        (ch_idx >= num_2ghz_channels) ?
-                                        "5.2" : "2.4");
+                       iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+                                                   nvm_chan[ch_idx], ch_flags);
                        continue;
                }
 
@@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                else
                        channel->flags = 0;
 
-               IWL_DEBUG_EEPROM(dev,
-                                "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
-                                channel->hw_value,
-                                is_5ghz ? "5.2" : "2.4",
-                                ch_flags,
-                                CHECK_AND_PRINT_I(VALID),
-                                CHECK_AND_PRINT_I(IBSS),
-                                CHECK_AND_PRINT_I(ACTIVE),
-                                CHECK_AND_PRINT_I(RADAR),
-                                CHECK_AND_PRINT_I(INDOOR_ONLY),
-                                CHECK_AND_PRINT_I(GO_CONCURRENT),
-                                CHECK_AND_PRINT_I(UNIFORM),
-                                CHECK_AND_PRINT_I(20MHZ),
-                                CHECK_AND_PRINT_I(40MHZ),
-                                CHECK_AND_PRINT_I(80MHZ),
-                                CHECK_AND_PRINT_I(160MHZ),
-                                CHECK_AND_PRINT_I(DC_HIGH),
-                                channel->max_power,
-                                ((ch_flags & NVM_CHANNEL_IBSS) &&
-                                 !(ch_flags & NVM_CHANNEL_RADAR))
-                                       ? "" : "not ");
+               iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+                                           channel->hw_value, ch_flags);
+               IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
+                                channel->hw_value, channel->max_power);
        }
 
        return n_channels;
@@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
 static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
                       const __le16 *phy_sku)
 {
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + SKU);
 
        return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
@@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
 
 static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 {
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + NVM_VERSION);
        else
                return le32_to_cpup((__le32 *)(nvm_sw +
@@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
                             const __le16 *phy_sku)
 {
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
 
        return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
@@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 {
        int n_hw_addr;
 
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + N_HW_ADDRS);
 
        n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
@@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
                              struct iwl_nvm_data *data,
                              u32 radio_cfg)
 {
-       if (!cfg->ext_nvm) {
+       if (cfg->nvm_type != IWL_NVM_EXT) {
                data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
                data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
                data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
@@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
 {
        if (cfg->mac_addr_from_csr) {
                iwl_set_hw_address_from_csr(trans, data);
-       } else if (!cfg->ext_nvm) {
+       } else if (cfg->nvm_type != IWL_NVM_EXT) {
                const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
 
                /* The byte order is little endian 16 bit, meaning 214365 */
@@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        u16 lar_config;
        const __le16 *ch_section;
 
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                data = kzalloc(sizeof(*data) +
                               sizeof(struct ieee80211_channel) *
                               IWL_NUM_CHANNELS,
@@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
-       if (!cfg->ext_nvm) {
+       if (cfg->nvm_type != IWL_NVM_EXT) {
                /* Checking for required sections */
                if (!nvm_calib) {
                        IWL_ERR(trans,
@@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                        kfree(data);
                        return NULL;
                }
+
+               ch_section = cfg->nvm_type == IWL_NVM_SDP ?
+                            &regulatory[NVM_CHANNELS_SDP] :
+                            &nvm_sw[NVM_CHANNELS];
+
                /* in family 8000 Xtal calibration values moved to OTP */
                data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
                data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
                lar_enabled = true;
-               ch_section = &nvm_sw[NVM_CHANNELS];
        } else {
                u16 lar_offset = data->nvm_version < 0xE39 ?
                                 NVM_LAR_OFFSET_OLD :
@@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
        u32 flags = NL80211_RRF_NO_HT40;
        u32 last_5ghz_ht = LAST_5GHZ_HT;
 
-       if (cfg->ext_nvm)
+       if (cfg->nvm_type == IWL_NVM_EXT)
                last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
 
        if (ch_idx < NUM_2GHZ_CHANNELS &&
@@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        int ch_idx;
        u16 ch_flags;
        u32 reg_rule_flags, prev_reg_rule_flags = 0;
-       const u8 *nvm_chan = cfg->ext_nvm ?
+       const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd;
        int size_of_regd;
@@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        int center_freq, prev_center_freq = 0;
        int valid_rules = 0;
        bool new_rule;
-       int max_num_ch = cfg->ext_nvm ?
+       int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
                         IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
 
        if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
@@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                new_rule = false;
 
                if (!(ch_flags & NVM_CHANNEL_VALID)) {
-                       IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                                     "Ch. %d Flags %x [%sGHz] - No traffic\n",
-                                     nvm_chan[ch_idx],
-                                     ch_flags,
-                                     (ch_idx >= NUM_2GHZ_CHANNELS) ?
-                                     "5.2" : "2.4");
+                       iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+                                                   nvm_chan[ch_idx], ch_flags);
                        continue;
                }
 
@@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                prev_center_freq = center_freq;
                prev_reg_rule_flags = reg_rule_flags;
 
-               IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                             "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n",
-                             center_freq,
-                             band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
-                             CHECK_AND_PRINT_I(VALID),
-                             CHECK_AND_PRINT_I(IBSS),
-                             CHECK_AND_PRINT_I(ACTIVE),
-                             CHECK_AND_PRINT_I(RADAR),
-                             CHECK_AND_PRINT_I(INDOOR_ONLY),
-                             CHECK_AND_PRINT_I(GO_CONCURRENT),
-                             CHECK_AND_PRINT_I(UNIFORM),
-                             CHECK_AND_PRINT_I(20MHZ),
-                             CHECK_AND_PRINT_I(40MHZ),
-                             CHECK_AND_PRINT_I(80MHZ),
-                             CHECK_AND_PRINT_I(160MHZ),
-                             CHECK_AND_PRINT_I(DC_HIGH),
-                             ch_flags);
-               IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                             "Ch. %d [%sGHz] reg_flags 0x%x: %s\n",
-                             center_freq,
-                             band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
-                             reg_rule_flags,
-                             ((ch_flags & NVM_CHANNEL_ACTIVE) &&
-                              !(ch_flags & NVM_CHANNEL_RADAR))
-                                        ? "Ad-Hoc" : "");
+               iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+                                           nvm_chan[ch_idx], ch_flags);
        }
 
        regd->n_reg_rules = valid_rules;
index 5de19ea1057571f0395c4cc6b4296d60aca7c204..b205a7bfb828dc075c9019d92d0cd2b4fa16bc45 100644 (file)
@@ -2167,7 +2167,7 @@ out:
         * 1. We are not using a unified image
         * 2. We are using a unified image but had an error while exiting D3
         */
-       set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+       set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
        /*
         * When switching images we return 1, which causes mac80211
index 15f2d826bb4b20cd561bb6674a2590790fa47afe..a9ac872226fdf79d87d6bc8b9643d9d6d86cd793 100644 (file)
@@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
        mvm->vif_count = 0;
        mvm->rx_ba_sessions = 0;
        mvm->fwrt.dump.conf = FW_DBG_INVALID;
+       mvm->monitor_on = false;
 
        /* keep statistics ticking */
        iwl_mvm_accu_radio_stats(mvm);
@@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                mvm->p2p_device_vif = vif;
        }
 
+       if (vif->type == NL80211_IFTYPE_MONITOR)
+               mvm->monitor_on = true;
+
        iwl_mvm_vif_dbgfs_register(mvm, vif);
        goto out_unlock;
 
@@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
        iwl_mvm_power_update_mac(mvm);
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
+       if (vif->type == NL80211_IFTYPE_MONITOR)
+               mvm->monitor_on = false;
+
 out_release:
        mutex_unlock(&mvm->mutex);
 }
@@ -1546,6 +1553,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
        struct iwl_mvm_mc_iter_data *data = _data;
        struct iwl_mvm *mvm = data->mvm;
        struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+       struct iwl_host_cmd hcmd = {
+               .id = MCAST_FILTER_CMD,
+               .flags = CMD_ASYNC,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
        int ret, len;
 
        /* if we don't have free ports, mcast frames will be dropped */
@@ -1560,7 +1572,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
        memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
        len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
+       hcmd.len[0] = len;
+       hcmd.data[0] = cmd;
+
+       ret = iwl_mvm_send_cmd(mvm, &hcmd);
        if (ret)
                IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
@@ -1635,6 +1650,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
        if (!cmd)
                goto out;
 
+       if (changed_flags & FIF_ALLMULTI)
+               cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+       if (cmd->pass_all)
+               cmd->count = 0;
+
        iwl_mvm_recalc_multicast(mvm);
 out:
        mutex_unlock(&mvm->mutex);
@@ -2563,7 +2584,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
                         * queues, so we should never get a second deferred
                         * frame for the RA/TID.
                         */
-                       iwl_mvm_start_mac_queues(mvm, info->hw_queue);
+                       iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
                        ieee80211_free_txskb(mvm->hw, skb);
                }
        }
@@ -3975,6 +3996,43 @@ out_unlock:
        return ret;
 }
 
+static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+{
+       if (drop) {
+               if (iwl_mvm_has_new_tx_api(mvm))
+                       /* TODO new tx api */
+                       WARN_ONCE(1,
+                                 "Need to implement flush TX queue\n");
+               else
+                       iwl_mvm_flush_tx_path(mvm,
+                               iwl_mvm_flushable_queues(mvm) & queues,
+                               0);
+       } else {
+               if (iwl_mvm_has_new_tx_api(mvm)) {
+                       struct ieee80211_sta *sta;
+                       int i;
+
+                       mutex_lock(&mvm->mutex);
+
+                       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+                               sta = rcu_dereference_protected(
+                                               mvm->fw_id_to_mac_id[i],
+                                               lockdep_is_held(&mvm->mutex));
+                               if (IS_ERR_OR_NULL(sta))
+                                       continue;
+
+                               iwl_mvm_wait_sta_queues_empty(mvm,
+                                               iwl_mvm_sta_from_mac80211(sta));
+                       }
+
+                       mutex_unlock(&mvm->mutex);
+               } else {
+                       iwl_trans_wait_tx_queues_empty(mvm->trans,
+                                                      queues);
+               }
+       }
+}
+
 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif, u32 queues, bool drop)
 {
@@ -3985,7 +4043,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
        int i;
        u32 msk = 0;
 
-       if (!vif || vif->type != NL80211_IFTYPE_STATION)
+       if (!vif) {
+               iwl_mvm_flush_no_vif(mvm, queues, drop);
+               return;
+       }
+
+       if (vif->type != NL80211_IFTYPE_STATION)
                return;
 
        /* Make sure we're done with the deferred traffic before flushing */
index 83303bac0e4babf6b1d3d41d1f0fc167b80c871e..949e6341829908a8e8cc684ac616121dfd267428 100644 (file)
@@ -1015,6 +1015,9 @@ struct iwl_mvm {
        bool drop_bcn_ap_mode;
 
        struct delayed_work cs_tx_unblock_dwork;
+
+       /* does a monitor vif exist (only one can exist hence bool) */
+       bool monitor_on;
 #ifdef CONFIG_ACPI
        struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
        struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
@@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
         * Enable LAR only if it is supported by the FW (TLV) &&
         * enabled in the NVM
         */
-       if (mvm->cfg->ext_nvm)
+       if (mvm->cfg->nvm_type == IWL_NVM_EXT)
                return nvm_lar && tlv_lar;
        else
                return tlv_lar;
index 422aa6be99328b2f7dfe66db69c724bbe3824291..fb25b6f29323888b46789374077b4d4c1895ba05 100644 (file)
@@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        const __be16 *hw;
        const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
        bool lar_enabled;
+       int regulatory_type;
 
        /* Checking for required sections */
-       if (!mvm->trans->cfg->ext_nvm) {
+       if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
                    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
                        IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
                        return NULL;
                }
        } else {
+               if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
+                       regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
+               else
+                       regulatory_type = NVM_SECTION_TYPE_REGULATORY;
+
                /* SW and REGULATORY sections are mandatory */
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
-                   !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
+                   !mvm->nvm_sections[regulatory_type].data) {
                        IWL_ERR(mvm,
                                "Can't parse empty family 8000 OTP/NVM sections\n");
                        return NULL;
@@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
        calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
-       regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
        mac_override =
                (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
        phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
 
+       regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
+               (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
+               (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
        lar_enabled = !iwlwifi_mod_params.lar_disable &&
                      fw_has_capa(&mvm->fw->ucode_capa,
                                  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
@@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
        IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
 
        /* Maximal size depends on NVM version */
-       if (!mvm->trans->cfg->ext_nvm)
+       if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
                max_section_size = IWL_MAX_NVM_SECTION_SIZE;
        else
                max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
@@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
                        break;
                }
 
-               if (!mvm->trans->cfg->ext_nvm) {
+               if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
                        section_size =
                                2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
                        section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
@@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
        struct ieee80211_regdomain *regd;
        char mcc[3];
 
-       if (mvm->cfg->ext_nvm) {
+       if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
                tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
                                      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
                nvm_lar = mvm->nvm_data->lar_enabled;
index ba7bd049d3d4e0238c0dd81e598ce097972d762e..0fe723ca844eeeadeef04f453124b51b23a784b1 100644 (file)
@@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
            (lq_sta->tx_agg_tid_en & BIT(tid)) &&
            (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
                IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
-               rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta);
+               if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
+                       tid_data->state = IWL_AGG_QUEUED;
        }
 }
 
index 184c749766f29455bc57bf71ecb84b905a6354a7..2d14a58cbdd7e1cc2e7eaeb20eebc7242e0e5993 100644 (file)
@@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
                return 0;
 
        default:
-               IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+               /* Expected in monitor (not having the keys) */
+               if (!mvm->monitor_on)
+                       IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
        }
 
        return 0;
index 67ffd9774712b26c4a25d921a043959e12cfd113..248699c2c4bff0981a680eb4e161ffd5da261795 100644 (file)
@@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                stats->flag |= RX_FLAG_DECRYPTED;
                return 0;
        default:
-               IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
+               /* Expected in monitor (not having the keys) */
+               if (!mvm->monitor_on)
+                       IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
        }
 
        return 0;
@@ -672,11 +674,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
         * If there was a significant jump in the nssn - adjust.
         * If the SN is smaller than the NSSN it might need to first go into
         * the reorder buffer, in which case we just release up to it and the
-        * rest of the function will take of storing it and releasing up to the
-        * nssn
+        * rest of the function will take care of storing it and releasing up to
+        * the nssn
         */
        if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
-                               buffer->buf_size)) {
+                               buffer->buf_size) ||
+           !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
                u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
 
                iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
index 50983615dce673c6a96365a34ddc3fe9ae3cfa70..774122fed454fbb4d1d109a18a53634baf6f83e2 100644 (file)
@@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
        struct iwl_host_cmd cmd = {
                .id = SCAN_OFFLOAD_ABORT_CMD,
        };
-       u32 status;
+       u32 status = CAN_ABORT_STATUS;
 
        ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
        if (ret)
index 411a2055dc451d2ce18421bd4c520b9068bba942..c4a343534c5ead89793b6375660cb9238aeb86ed 100644 (file)
@@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_add_sta_cmd cmd;
        int ret;
-       u32 status;
+       u32 status = ADD_STA_SUCCESS;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
                return -EINVAL;
 
-       if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
-               IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
+       if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
+           mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+               IWL_ERR(mvm,
+                       "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
                        mvmsta->tid_data[tid].state);
                return -ENXIO;
        }
index d138938065136fc0dbaff623a005c4e529e5be94..aedabe101cf0f09681a7c61cfa6d6eb326b033a6 100644 (file)
@@ -281,6 +281,7 @@ struct iwl_mvm_vif;
  * These states relate to a specific RA / TID.
  *
  * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_QUEUED: aggregation start work has been queued
  * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
  * @IWL_AGG_ON: aggregation session is up
  * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
@@ -290,6 +291,7 @@ struct iwl_mvm_vif;
  */
 enum iwl_mvm_agg_state {
        IWL_AGG_OFF = 0,
+       IWL_AGG_QUEUED,
        IWL_AGG_STARTING,
        IWL_AGG_ON,
        IWL_EMPTYING_HW_QUEUE_ADDBA,
index 8876c2abc440ea9fd5e698f1ac6cc45e0d05848f..1232f63278eb64167263e55255cb95b54c27258d 100644 (file)
@@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
 
        lockdep_assert_held(&mvm->mutex);
 
+       status = 0;
        ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
                                                       CTDP_CONFIG_CMD),
                                          sizeof(cmd), &cmd, &status);
@@ -630,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
 
        if (!iwl_mvm_firmware_running(mvm) ||
            mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
-               ret = -EIO;
+               ret = -ENODATA;
                goto out;
        }
 
index 172b5e63d3fbebaaa63e7d6968c212b1f32f1a22..6f2e2af23219a27f856b9bb0bd3ce7cf0647856d 100644 (file)
@@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_ADHOC:
                /*
-                * Handle legacy hostapd as well, where station will be added
-                * only just before sending the association response.
+                * Non-bufferable frames use the broadcast station, thus they
+                * use the probe queue.
                 * Also take care of the case where we send a deauth to a
                 * station that we don't have, or similarly an association
                 * response (with non-success status) for a station we can't
@@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
                 * Also, disassociate frames might happen, particular with
                 * reason 7 ("Class 3 frame received from nonassociated STA").
                 */
-               if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
-                   ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) ||
-                   ieee80211_is_disassoc(fc))
+               if (ieee80211_is_mgmt(fc) &&
+                   (!ieee80211_is_bufferable_mmpdu(fc) ||
+                    ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
                        return mvm->probe_queue;
                if (info->hw_queue == info->control.vif->cab_queue)
                        return mvmvif->cab_queue;
index 856fa6e8327ed4d6d2a3b9148c9aa3445ffb775c..a450bc6bc77453b368aecef35b19971241e8994b 100644 (file)
@@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 
        vif = qtnf_netdev_get_priv(wdev->netdev);
 
+       qtnf_scan_done(vif->mac, true);
+
        if (qtnf_cmd_send_del_intf(vif))
                pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
                       vif->vifid);
@@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
        int ret;
 
+       qtnf_scan_done(vif->mac, true);
+
        ret = qtnf_cmd_send_stop_ap(vif);
        if (ret) {
                pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
@@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
            !qtnf_sta_list_lookup(&vif->sta_list, params->mac))
                return 0;
 
-       qtnf_scan_done(vif->mac, true);
-
        ret = qtnf_cmd_send_del_sta(vif, params);
        if (ret)
                pr_err("VIF%u.%u: failed to delete STA %pM\n",
@@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
                }
 
                vif->sta_state = QTNF_STA_DISCONNECTED;
-               qtnf_scan_done(mac, true);
        }
+
+       qtnf_scan_done(mac, true);
 }
 
 void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
index 6a4af52522b8d526e0f4cfad6dcfeb0cede83efc..66db26613b1fb16b065f2dc43739fcb2e1230e65 100644 (file)
@@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
                .aborted = aborted,
        };
 
+       if (timer_pending(&mac->scan_timeout))
+               del_timer_sync(&mac->scan_timeout);
+
        mutex_lock(&mac->mac_lock);
 
        if (mac->scan_req) {
index 0fc2814eafad255dfa7ee1074f084fd4016840e5..43d2e7fd6e0211766d4140841a2450f74c1a0788 100644 (file)
@@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac,
                return -EINVAL;
        }
 
-       if (timer_pending(&mac->scan_timeout))
-               del_timer_sync(&mac->scan_timeout);
        qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED);
 
        return 0;
index 502e72b7cdcc1d9410bd3694c55799d306d0a1a3..69131965a298fd784420830d2c0a782874b1633d 100644 (file)
@@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
        dma_addr_t txbd_paddr, skb_paddr;
        struct qtnf_tx_bd *txbd;
+       unsigned long flags;
        int len, i;
        u32 info;
        int ret = 0;
 
+       spin_lock_irqsave(&priv->tx0_lock, flags);
+
        if (!qtnf_tx_queue_ready(priv)) {
                if (skb->dev)
                        netif_stop_queue(skb->dev);
 
+               spin_unlock_irqrestore(&priv->tx0_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -717,8 +721,10 @@ tx_done:
                dev_kfree_skb_any(skb);
        }
 
-       qtnf_pcie_data_tx_reclaim(priv);
        priv->tx_done_count++;
+       spin_unlock_irqrestore(&priv->tx0_lock, flags);
+
+       qtnf_pcie_data_tx_reclaim(priv);
 
        return NETDEV_TX_OK;
 }
@@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
        init_completion(&bus->request_firmware_complete);
        mutex_init(&bus->bus_lock);
+       spin_lock_init(&pcie_priv->tx0_lock);
        spin_lock_init(&pcie_priv->irq_lock);
        spin_lock_init(&pcie_priv->tx_reclaim_lock);
 
index e76a23716ee019a4f49d38d81d2daa25b59adcb0..86ac1ccedb52fa1fa98620b25345f729bb244c69 100644 (file)
@@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv {
 
        /* lock for tx reclaim operations */
        spinlock_t tx_reclaim_lock;
+       /* lock for tx0 operations */
+       spinlock_t tx0_lock;
        u8 msi_enabled;
        int mps;
 
index 4f73012978e945869c6310d59996a2f7fc02b511..1d431d4bf6d26197516941d3de54ea1fffc9218b 100644 (file)
@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
        }
        if (0 == tmp) {
                read_addr = REG_DBI_RDATA + addr % 4;
-               ret = rtl_read_byte(rtlpriv, read_addr);
+               ret = rtl_read_word(rtlpriv, read_addr);
        }
        return ret;
 }
index ee8ed9da00ade809a70ac80800f9006edd8f535c..4491ca5aee906c1b29342bc12c692e175d857505 100644 (file)
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
 
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
 
-       dev->min_mtu = 0;
+       dev->min_mtu = ETH_MIN_MTU;
        dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
 
        /*
index 523387e71a8021b81beb46407132def2638fdb48..8b8689c6d8877863dddad9f9c10215d311f5fa6f 100644 (file)
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netdev->features |= netdev->hw_features;
 
        netdev->ethtool_ops = &xennet_ethtool_ops;
-       netdev->min_mtu = 0;
+       netdev->min_mtu = ETH_MIN_MTU;
        netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
index 1427a386a033e72f1f8ec7b4fe02471e9d5d92fb..3e4d1e7998dacb13df8638763eb4fa0181e73357 100644 (file)
@@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev)
                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
                struct nd_namespace_index *nsindex;
 
+               /*
+                * If any of the DIMMs do not support labels the only
+                * possible BTT format is v1.
+                */
+               if (!ndd) {
+                       loop_bitmask = 0;
+                       break;
+               }
+
                nsindex = to_namespace_index(ndd, ndd->ns_current);
                if (nsindex == NULL)
                        loop_bitmask |= 1;
index acc816b67582f30524ad19f66843b071dfcef6ae..5a14cc7f28ee75e930f8aca11e35443812720536 100644 (file)
@@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req)
                return false;
        if (nvme_req(req)->status & NVME_SC_DNR)
                return false;
-       if (jiffies - req->start_time >= req->timeout)
-               return false;
        if (nvme_req(req)->retries >= nvme_max_retries)
                return false;
        return true;
@@ -2138,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 
        if (a == &dev_attr_uuid.attr) {
-               if (uuid_is_null(&ns->uuid) ||
+               if (uuid_is_null(&ns->uuid) &&
                    !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
                        return 0;
        }
@@ -2590,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work)
                container_of(work, struct nvme_ctrl, async_event_work);
 
        spin_lock_irq(&ctrl->lock);
-       while (ctrl->event_limit > 0) {
+       while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
                int aer_idx = --ctrl->event_limit;
 
                spin_unlock_irq(&ctrl->lock);
@@ -2677,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
                /*FALLTHRU*/
        case NVME_SC_ABORT_REQ:
                ++ctrl->event_limit;
-               queue_work(nvme_wq, &ctrl->async_event_work);
+               if (ctrl->state == NVME_CTRL_LIVE)
+                       queue_work(nvme_wq, &ctrl->async_event_work);
                break;
        default:
                break;
@@ -2692,7 +2691,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
                nvme_queue_scan(ctrl);
                break;
        case NVME_AER_NOTICE_FW_ACT_STARTING:
-               schedule_work(&ctrl->fw_act_work);
+               queue_work(nvme_wq, &ctrl->fw_act_work);
                break;
        default:
                dev_warn(ctrl->device, "async event result %08x\n", result);
index 47307752dc65d3b9e78f7ab736cfc33831c6522b..555c976cc2ee7aae729c6465003b5cdffa11d594 100644 (file)
@@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
        opts->queue_size = NVMF_DEF_QUEUE_SIZE;
        opts->nr_io_queues = num_online_cpus();
        opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
+       opts->kato = NVME_DEFAULT_KATO;
 
        options = o = kstrdup(buf, GFP_KERNEL);
        if (!options)
@@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                goto out;
                        }
 
-                       if (opts->discovery_nqn) {
-                               pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
-                               ret = -EINVAL;
-                               goto out;
-                       }
-
                        if (token < 0) {
                                pr_err("Invalid keep_alive_tmo %d\n", token);
                                ret = -EINVAL;
                                goto out;
-                       } else if (token == 0) {
+                       } else if (token == 0 && !opts->discovery_nqn) {
                                /* Allowed for debug */
                                pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
                        }
                        opts->kato = token;
+
+                       if (opts->discovery_nqn && opts->kato) {
+                               pr_err("Discovery controllers cannot accept KATO != 0\n");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
                        break;
                case NVMF_OPT_CTRL_LOSS_TMO:
                        if (match_int(args, &token)) {
@@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
        uuid_copy(&opts->host->id, &hostid);
 
 out:
-       if (!opts->discovery_nqn && !opts->kato)
-               opts->kato = NVME_DEFAULT_KATO;
        kfree(options);
        return ret;
 }
index d2e882c0f4968e40eb256a3b0f08c8c200611810..be49d0f793816cae0a9629665230acf248ab85ec 100644 (file)
@@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
        if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
                status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
        else if (freq->status)
-               status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+               status = cpu_to_le16(NVME_SC_INTERNAL << 1);
 
        /*
         * For the linux implementation, if we have an unsuccesful
@@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
                 */
                if (freq->transferred_length !=
                        be32_to_cpu(op->cmd_iu.data_len)) {
-                       status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+                       status = cpu_to_le16(NVME_SC_INTERNAL << 1);
                        goto done;
                }
                result.u64 = 0;
@@ -1421,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
                                        freq->transferred_length ||
                             op->rsp_iu.status_code ||
                             sqe->common.command_id != cqe->command_id)) {
-                       status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+                       status = cpu_to_le16(NVME_SC_INTERNAL << 1);
                        goto done;
                }
                result = cqe->result;
@@ -1429,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
                break;
 
        default:
-               status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
+               status = cpu_to_le16(NVME_SC_INTERNAL << 1);
                goto done;
        }
 
@@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
         * as well as those by FC-NVME spec.
         */
        WARN_ON_ONCE(sqe->common.metadata);
-       WARN_ON_ONCE(sqe->common.dptr.prp1);
-       WARN_ON_ONCE(sqe->common.dptr.prp2);
        sqe->common.flags |= NVME_CMD_SGL_METABUF;
 
        /*
-        * format SQE DPTR field per FC-NVME rules
-        *    type=data block descr; subtype=offset;
-        *    offset is currently 0.
+        * format SQE DPTR field per FC-NVME rules:
+        *    type=0x5     Transport SGL Data Block Descriptor
+        *    subtype=0xA  Transport-specific value
+        *    address=0
+        *    length=length of the data series
         */
-       sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
+       sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
+                                       NVME_SGL_FMT_TRANSPORT_A;
        sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
        sqe->rw.dptr.sgl.addr = 0;
 
@@ -2544,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
        nvme_fc_abort_aen_ops(ctrl);
 
        /* wait for all io that had to be aborted */
-       spin_lock_irqsave(&ctrl->lock, flags);
+       spin_lock_irq(&ctrl->lock);
        wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
        ctrl->flags &= ~FCCTRL_TERMIO;
-       spin_unlock_irqrestore(&ctrl->lock, flags);
+       spin_unlock_irq(&ctrl->lock);
 
        nvme_fc_term_aen_ops(ctrl);
 
@@ -2733,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 {
        struct nvme_fc_ctrl *ctrl;
        unsigned long flags;
-       int ret, idx;
+       int ret, idx, retry;
 
        if (!(rport->remoteport.port_role &
            (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -2759,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        ctrl->rport = rport;
        ctrl->dev = lport->dev;
        ctrl->cnum = idx;
+       init_waitqueue_head(&ctrl->ioabort_wait);
 
        get_device(ctrl->dev);
        kref_init(&ctrl->ref);
@@ -2824,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
        spin_unlock_irqrestore(&rport->lock, flags);
 
-       ret = nvme_fc_create_association(ctrl);
+       /*
+        * It's possible that transactions used to create the association
+        * may fail. Examples: CreateAssociation LS or CreateIOConnection
+        * LS gets dropped/corrupted/fails; or a frame gets dropped or a
+        * command times out for one of the actions to init the controller
+        * (Connect, Get/Set_Property, Set_Features, etc). Many of these
+        * transport errors (frame drop, LS failure) inherently must kill
+        * the association. The transport is coded so that any command used
+        * to create the association (prior to a LIVE state transition
+        * while NEW or RECONNECTING) will fail if it completes in error or
+        * times out.
+        *
+        * As such: as the connect request was mostly likely due to a
+        * udev event that discovered the remote port, meaning there is
+        * not an admin or script there to restart if the connect
+        * request fails, retry the initial connection creation up to
+        * three times before giving up and declaring failure.
+        */
+       for (retry = 0; retry < 3; retry++) {
+               ret = nvme_fc_create_association(ctrl);
+               if (!ret)
+                       break;
+       }
+
        if (ret) {
+               /* couldn't schedule retry - fail out */
+               dev_err(ctrl->ctrl.device,
+                       "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
+
                ctrl->ctrl.opts = NULL;
+
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
                nvme_put_ctrl(&ctrl->ctrl);
index 4a2121335f48a0b4af31b413e04af8f9a6a52a2e..3f5a04c586cefdc8096469ba38d325004963b42d 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/once.h>
 #include <linux/pci.h>
 #include <linux/poison.h>
 #include <linux/t10-pi.h>
@@ -93,7 +94,7 @@ struct nvme_dev {
        struct mutex shutdown_lock;
        bool subsystem;
        void __iomem *cmb;
-       dma_addr_t cmb_dma_addr;
+       pci_bus_addr_t cmb_bus_addr;
        u64 cmb_size;
        u32 cmbsz;
        u32 cmbloc;
@@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
 }
 #endif
 
+static void nvme_print_sgl(struct scatterlist *sgl, int nents)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sgl, sg, nents, i) {
+               dma_addr_t phys = sg_phys(sg);
+               pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
+                       "dma_address:%pad dma_length:%d\n",
+                       i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
+                       sg_dma_len(sg));
+       }
+}
+
 static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
        return BLK_STS_OK;
 
  bad_sgl:
-       if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
-                               blk_rq_payload_bytes(req), iod->nents)) {
-               for_each_sg(iod->sg, sg, iod->nents, i) {
-                       dma_addr_t phys = sg_phys(sg);
-                       pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
-                              "dma_address:%pad dma_length:%d\n", i, &phys,
-                                       sg->offset, sg->length,
-                                       &sg_dma_address(sg),
-                                       sg_dma_len(sg));
-               }
-       }
+       WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+                       "Invalid SGL for payload:%d nents:%d\n",
+                       blk_rq_payload_bytes(req), iod->nents);
        return BLK_STS_IOERR;
-
 }
 
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -1220,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
        if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
                unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
                                                      dev->ctrl.page_size);
-               nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+               nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
                nvmeq->sq_cmds_io = dev->cmb + offset;
        } else {
                nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        if (result < 0)
                goto release_cq;
 
+       nvme_init_queue(nvmeq, qid);
        result = queue_request_irq(nvmeq);
        if (result < 0)
                goto release_sq;
 
-       nvme_init_queue(nvmeq, qid);
        return result;
 
  release_sq:
@@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
                return result;
 
        nvmeq->cq_vector = 0;
+       nvme_init_queue(nvmeq, 0);
        result = queue_request_irq(nvmeq);
        if (result) {
                nvmeq->cq_vector = -1;
@@ -1520,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        resource_size_t bar_size;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        void __iomem *cmb;
-       dma_addr_t dma_addr;
+       int bar;
 
        dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1533,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
        size = szu * NVME_CMB_SZ(dev->cmbsz);
        offset = szu * NVME_CMB_OFST(dev->cmbloc);
-       bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
+       bar = NVME_CMB_BIR(dev->cmbloc);
+       bar_size = pci_resource_len(pdev, bar);
 
        if (offset > bar_size)
                return NULL;
@@ -1546,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        if (size > bar_size - offset)
                size = bar_size - offset;
 
-       dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
-       cmb = ioremap_wc(dma_addr, size);
+       cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
        if (!cmb)
                return NULL;
 
-       dev->cmb_dma_addr = dma_addr;
+       dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
        dev->cmb_size = size;
        return cmb;
 }
@@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work)
        if (result)
                goto out;
 
-       nvme_init_queue(dev->queues[0], 0);
        result = nvme_alloc_admin_tags(dev);
        if (result)
                goto out;
index 58983000964be4c2ce3280ca9f4dda768979ed76..87bac27ec64bfecae52da09b7862d0570c50b284 100644 (file)
@@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
                return;
 
+       if (nvme_rdma_queue_idx(queue) == 0) {
+               nvme_rdma_free_qe(queue->device->dev,
+                       &queue->ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+       }
+
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
 }
@@ -739,8 +745,6 @@ out:
 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
-                       sizeof(struct nvme_command), DMA_TO_DEVICE);
        nvme_rdma_stop_queue(&ctrl->queues[0]);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
-               if (IS_ERR(ctrl->ctrl.admin_tagset))
+               if (IS_ERR(ctrl->ctrl.admin_tagset)) {
+                       error = PTR_ERR(ctrl->ctrl.admin_tagset);
                        goto out_free_queue;
+               }
 
                ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
                if (IS_ERR(ctrl->ctrl.admin_q)) {
@@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
 
        if (new) {
                ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
-               if (IS_ERR(ctrl->ctrl.tagset))
+               if (IS_ERR(ctrl->ctrl.tagset)) {
+                       ret = PTR_ERR(ctrl->ctrl.tagset);
                        goto out_free_io_queues;
+               }
 
                ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
                if (IS_ERR(ctrl->ctrl.connect_q)) {
@@ -942,7 +950,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        }
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
-       WARN_ON_ONCE(!changed);
+       if (!changed) {
+               /* state change failure is ok if we're in DELETING state */
+               WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+               return;
+       }
+
        ctrl->ctrl.nr_reconnects = 0;
 
        nvme_start_ctrl(&ctrl->ctrl);
@@ -962,7 +975,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        struct nvme_rdma_ctrl *ctrl = container_of(work,
                        struct nvme_rdma_ctrl, err_work);
 
-       nvme_stop_ctrl(&ctrl->ctrl);
+       nvme_stop_keep_alive(&ctrl->ctrl);
 
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
index 7c23eaf8e5639c14b5824fad26bdd4d6642e45b1..645ba7eee35db7a66a0249d39c7adba514173229 100644 (file)
@@ -387,13 +387,22 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 
 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
 {
+       u32 old_sqhd, new_sqhd;
+       u16 sqhd;
+
        if (status)
                nvmet_set_status(req, status);
 
-       /* XXX: need to fill in something useful for sq_head */
-       req->rsp->sq_head = 0;
-       if (likely(req->sq)) /* may happen during early failure */
-               req->rsp->sq_id = cpu_to_le16(req->sq->qid);
+       if (req->sq->size) {
+               do {
+                       old_sqhd = req->sq->sqhd;
+                       new_sqhd = (old_sqhd + 1) % req->sq->size;
+               } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
+                                       old_sqhd);
+       }
+       sqhd = req->sq->sqhd & 0x0000FFFF;
+       req->rsp->sq_head = cpu_to_le16(sqhd);
+       req->rsp->sq_id = cpu_to_le16(req->sq->qid);
        req->rsp->command_id = req->cmd->common.command_id;
 
        if (req->ns)
@@ -420,6 +429,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
                u16 qid, u16 size)
 {
+       sq->sqhd = 0;
        sq->qid = qid;
        sq->size = size;
 
index 859a66725291d62bbf6f12a06b8486a9c5edbcc2..db3bf6b8bf9ee63581661310aa98820a7341445b 100644 (file)
@@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
                pr_warn("queue already connected!\n");
                return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
        }
+       if (!sqsize) {
+               pr_warn("queue size zero!\n");
+               return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+       }
 
-       nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
-       nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
+       /* note: convert queue size from 0's-based value to 1's-based value */
+       nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
+       nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
        return 0;
 }
 
index 421e43bf1dd78f5d15f1ffb7cf88d36575ec3503..58e010bdda3ea5c155da5c268f542a6939e5c786 100644 (file)
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc {
        u32                             a_id;
        struct nvmet_fc_tgtport         *tgtport;
        struct list_head                a_list;
-       struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES];
+       struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES + 1];
        struct kref                     ref;
 };
 
@@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
        unsigned long flags;
        int ret;
 
-       if (qid >= NVMET_NR_QUEUES)
+       if (qid > NVMET_NR_QUEUES)
                return NULL;
 
        queue = kzalloc((sizeof(*queue) +
@@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
        u16 qid = nvmet_fc_getqueueid(connection_id);
        unsigned long flags;
 
+       if (qid > NVMET_NR_QUEUES)
+               return NULL;
+
        spin_lock_irqsave(&tgtport->lock, flags);
        list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
                if (association_id == assoc->association_id) {
@@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
        int i;
 
        spin_lock_irqsave(&tgtport->lock, flags);
-       for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
+       for (i = NVMET_NR_QUEUES; i >= 0; i--) {
                queue = assoc->queues[i];
                if (queue) {
                        if (!nvmet_fc_tgt_q_get(queue))
@@ -1910,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
                        spin_lock_irqsave(&fod->flock, flags);
                        fod->writedataactive = false;
                        spin_unlock_irqrestore(&fod->flock, flags);
-                       nvmet_req_complete(&fod->req,
-                                       NVME_SC_FC_TRANSPORT_ERROR);
+                       nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
                } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
                        fcpreq->fcp_error = ret;
                        fcpreq->transferred_length = 0;
@@ -1929,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
        /* if in the middle of an io and we need to tear down */
        if (abort) {
                if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
-                       nvmet_req_complete(&fod->req,
-                                       NVME_SC_FC_TRANSPORT_ERROR);
+                       nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
                        return true;
                }
 
@@ -1968,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
                        fod->abort = true;
                        spin_unlock(&fod->flock);
 
-                       nvmet_req_complete(&fod->req,
-                                       NVME_SC_FC_TRANSPORT_ERROR);
+                       nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
                        return;
                }
 
@@ -2533,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port)
 {
        struct nvmet_fc_tgtport *tgtport = port->priv;
        unsigned long flags;
+       bool matched = false;
 
        spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
        if (tgtport->port == port) {
-               nvmet_fc_tgtport_put(tgtport);
+               matched = true;
                tgtport->port = NULL;
        }
        spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+       if (matched)
+               nvmet_fc_tgtport_put(tgtport);
 }
 
 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
index 1cb9847ec261182dd5fdc6da836d14fa1503ad9b..7b75d9de55ab0d33939bf314a81ee01e26a6d1b1 100644 (file)
@@ -224,8 +224,6 @@ struct fcloop_nport {
        struct fcloop_lport *lport;
        struct list_head nport_list;
        struct kref ref;
-       struct completion rport_unreg_done;
-       struct completion tport_unreg_done;
        u64 node_name;
        u64 port_name;
        u32 port_role;
@@ -576,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
        tfcp_req->aborted = true;
        spin_unlock(&tfcp_req->reqlock);
 
-       tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
+       tfcp_req->status = NVME_SC_INTERNAL;
 
        /*
         * nothing more to do. If io wasn't active, the transport should
@@ -630,6 +628,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
        schedule_work(&inireq->iniwork);
 }
 
+static void
+fcloop_nport_free(struct kref *ref)
+{
+       struct fcloop_nport *nport =
+               container_of(ref, struct fcloop_nport, ref);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fcloop_lock, flags);
+       list_del(&nport->nport_list);
+       spin_unlock_irqrestore(&fcloop_lock, flags);
+
+       kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+       kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+       return kref_get_unless_zero(&nport->ref);
+}
+
 static void
 fcloop_localport_delete(struct nvme_fc_local_port *localport)
 {
@@ -644,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 {
        struct fcloop_rport *rport = remoteport->private;
 
-       /* release any threads waiting for the unreg to complete */
-       complete(&rport->nport->rport_unreg_done);
+       fcloop_nport_put(rport->nport);
 }
 
 static void
@@ -653,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
 {
        struct fcloop_tport *tport = targetport->private;
 
-       /* release any threads waiting for the unreg to complete */
-       complete(&tport->nport->tport_unreg_done);
+       fcloop_nport_put(tport->nport);
 }
 
 #define        FCLOOP_HW_QUEUES                4
@@ -722,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
                goto out_free_opts;
        }
 
+       memset(&pinfo, 0, sizeof(pinfo));
        pinfo.node_name = opts->wwnn;
        pinfo.port_name = opts->wwpn;
        pinfo.port_role = opts->roles;
@@ -804,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
        return ret ? ret : count;
 }
 
-static void
-fcloop_nport_free(struct kref *ref)
-{
-       struct fcloop_nport *nport =
-               container_of(ref, struct fcloop_nport, ref);
-       unsigned long flags;
-
-       spin_lock_irqsave(&fcloop_lock, flags);
-       list_del(&nport->nport_list);
-       spin_unlock_irqrestore(&fcloop_lock, flags);
-
-       kfree(nport);
-}
-
-static void
-fcloop_nport_put(struct fcloop_nport *nport)
-{
-       kref_put(&nport->ref, fcloop_nport_free);
-}
-
-static int
-fcloop_nport_get(struct fcloop_nport *nport)
-{
-       return kref_get_unless_zero(&nport->ref);
-}
-
 static struct fcloop_nport *
 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
 {
@@ -938,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
        if (!nport)
                return -EIO;
 
+       memset(&pinfo, 0, sizeof(pinfo));
        pinfo.node_name = nport->node_name;
        pinfo.port_name = nport->port_name;
        pinfo.port_role = nport->port_role;
@@ -979,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport)
 }
 
 static int
-__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
 {
-       int ret;
-
        if (!rport)
                return -EALREADY;
 
-       init_completion(&nport->rport_unreg_done);
-
-       ret = nvme_fc_unregister_remoteport(rport->remoteport);
-       if (ret)
-               return ret;
-
-       wait_for_completion(&nport->rport_unreg_done);
-
-       fcloop_nport_put(nport);
-
-       return ret;
+       return nvme_fc_unregister_remoteport(rport->remoteport);
 }
 
 static ssize_t
@@ -1029,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
        if (!nport)
                return -ENOENT;
 
-       ret = __wait_remoteport_unreg(nport, rport);
+       ret = __remoteport_unreg(nport, rport);
 
        return ret ? ret : count;
 }
@@ -1086,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport)
 }
 
 static int
-__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
 {
-       int ret;
-
        if (!tport)
                return -EALREADY;
 
-       init_completion(&nport->tport_unreg_done);
-
-       ret = nvmet_fc_unregister_targetport(tport->targetport);
-       if (ret)
-               return ret;
-
-       wait_for_completion(&nport->tport_unreg_done);
-
-       fcloop_nport_put(nport);
-
-       return ret;
+       return nvmet_fc_unregister_targetport(tport->targetport);
 }
 
 static ssize_t
@@ -1136,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
        if (!nport)
                return -ENOENT;
 
-       ret = __wait_targetport_unreg(nport, tport);
+       ret = __targetport_unreg(nport, tport);
 
        return ret ? ret : count;
 }
@@ -1223,11 +1197,11 @@ static void __exit fcloop_exit(void)
 
                spin_unlock_irqrestore(&fcloop_lock, flags);
 
-               ret = __wait_targetport_unreg(nport, tport);
+               ret = __targetport_unreg(nport, tport);
                if (ret)
                        pr_warn("%s: Failed deleting target port\n", __func__);
 
-               ret = __wait_remoteport_unreg(nport, rport);
+               ret = __remoteport_unreg(nport, rport);
                if (ret)
                        pr_warn("%s: Failed deleting remote port\n", __func__);
 
index 7d261ab894f47b56addb93fe95beee8fda37b8d3..87e429bfcd8a0c918f2aae018c247bb0014d3d0b 100644 (file)
@@ -74,6 +74,7 @@ struct nvmet_sq {
        struct percpu_ref       ref;
        u16                     qid;
        u16                     size;
+       u32                     sqhd;
        struct completion       free_done;
        struct completion       confirm_done;
 };
index de54c7f5048af4005dbfc51c04579813e9635455..d12e5de78e700018dc58965157b21ff2639f34c5 100644 (file)
@@ -135,7 +135,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
 
        /* Stop the user from writing */
        if (pos >= nvmem->size)
-               return 0;
+               return -EFBIG;
 
        if (count < nvmem->word_size)
                return -EINVAL;
@@ -789,6 +789,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
                return ERR_PTR(-EINVAL);
 
        nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
+       of_node_put(nvmem_np);
        if (IS_ERR(nvmem))
                return ERR_CAST(nvmem);
 
index 260d33c0f26c9b290ec2c7f2d03237ebbad7da02..63897531cd75e66b2a62a2e872e74232e6b0b1ac 100644 (file)
@@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
 {
        if (!dn || dn != of_stdout || console_set_on_cmdline)
                return false;
-       return !add_preferred_console(name, index,
-                                     kstrdup(of_stdout_options, GFP_KERNEL));
+
+       /*
+        * XXX: cast `options' to char pointer to suppress complication
+        * warnings: printk, UART and console drivers expect char pointer.
+        */
+       return !add_preferred_console(name, index, (char *)of_stdout_options);
 }
 EXPORT_SYMBOL_GPL(of_console_check);
 
index d94dd8b77abd5140d52c6dfbdff249409225cbec..98258583abb0b40529056767c91401296e0013d4 100644 (file)
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
        return -EINVAL;
 }
 
-static void of_mdiobus_register_phy(struct mii_bus *mdio,
+static int of_mdiobus_register_phy(struct mii_bus *mdio,
                                    struct device_node *child, u32 addr)
 {
        struct phy_device *phy;
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
        else
                phy = get_phy_device(mdio, addr, is_c45);
        if (IS_ERR(phy))
-               return;
+               return PTR_ERR(phy);
 
-       rc = irq_of_parse_and_map(child, 0);
+       rc = of_irq_get(child, 0);
+       if (rc == -EPROBE_DEFER) {
+               phy_device_free(phy);
+               return rc;
+       }
        if (rc > 0) {
                phy->irq = rc;
                mdio->irq[addr] = rc;
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
        if (rc) {
                phy_device_free(phy);
                of_node_put(child);
-               return;
+               return rc;
        }
 
        dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
                child->name, addr);
+       return 0;
 }
 
-static void of_mdiobus_register_device(struct mii_bus *mdio,
-                                      struct device_node *child, u32 addr)
+static int of_mdiobus_register_device(struct mii_bus *mdio,
+                                     struct device_node *child, u32 addr)
 {
        struct mdio_device *mdiodev;
        int rc;
 
        mdiodev = mdio_device_create(mdio, addr);
        if (IS_ERR(mdiodev))
-               return;
+               return PTR_ERR(mdiodev);
 
        /* Associate the OF node with the device structure so it
         * can be looked up later.
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
        if (rc) {
                mdio_device_free(mdiodev);
                of_node_put(child);
-               return;
+               return rc;
        }
 
        dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
                child->name, addr);
+       return 0;
 }
 
 /* The following is a list of PHY compatible strings which appear in
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                }
 
                if (of_mdiobus_child_is_phy(child))
-                       of_mdiobus_register_phy(mdio, child, addr);
+                       rc = of_mdiobus_register_phy(mdio, child, addr);
                else
-                       of_mdiobus_register_device(mdio, child, addr);
+                       rc = of_mdiobus_register_device(mdio, child, addr);
+               if (rc)
+                       goto unregister;
        }
 
        if (!scanphys)
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                        dev_info(&mdio->dev, "scan phy %s at address %i\n",
                                 child->name, addr);
 
-                       if (of_mdiobus_child_is_phy(child))
-                               of_mdiobus_register_phy(mdio, child, addr);
+                       if (of_mdiobus_child_is_phy(child)) {
+                               rc = of_mdiobus_register_phy(mdio, child, addr);
+                               if (rc)
+                                       goto unregister;
+                       }
                }
        }
 
        return 0;
+
+unregister:
+       mdiobus_unregister(mdio);
+       return rc;
 }
 EXPORT_SYMBOL(of_mdiobus_register);
 
index d507c3569a88acac9e796f1f172f412e5386347f..32771c2ced7bbc1df6f05a9f252e7a3682bd443c 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/sort.h>
 #include <linux/slab.h>
 
-#define MAX_RESERVED_REGIONS   16
+#define MAX_RESERVED_REGIONS   32
 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
 static int reserved_mem_count;
 
index fbb72116e9d456b8a08672391de9e2cc144ed171..264c355ba1ffc5f9474dd5a1bcf9696fa9cf7d4b 100644 (file)
@@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
        struct device_node *np;
 
        /* Get the parent of the port */
-       np = of_get_next_parent(to_of_node(fwnode));
+       np = of_get_parent(to_of_node(fwnode));
        if (!np)
                return NULL;
 
index 4ddc6e8f9fe7431ae24b7a2068ca2eb8a90e0b0e..f9308c2f22e6754d0b50fc627c11a96cf44af8f4 100644 (file)
@@ -251,9 +251,8 @@ err:
        return ret;
 }
 
-static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
 {
-       u8 irq;
        u8 msi_count;
        struct pci_epf *epf = epf_test->epf;
        struct pci_epc *epc = epf->epc;
@@ -262,7 +261,6 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
 
        reg->status |= STATUS_IRQ_RAISED;
        msi_count = pci_epc_get_msi(epc);
-       irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
        if (irq > msi_count || msi_count <= 0)
                pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
        else
@@ -289,6 +287,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
        reg->command = 0;
        reg->status = 0;
 
+       irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+
        if (command & COMMAND_RAISE_LEGACY_IRQ) {
                reg->status = STATUS_IRQ_RAISED;
                pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
@@ -301,7 +301,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
                        reg->status |= STATUS_WRITE_FAIL;
                else
                        reg->status |= STATUS_WRITE_SUCCESS;
-               pci_epf_test_raise_irq(epf_test);
+               pci_epf_test_raise_irq(epf_test, irq);
                goto reset_handler;
        }
 
@@ -311,7 +311,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
                        reg->status |= STATUS_READ_SUCCESS;
                else
                        reg->status |= STATUS_READ_FAIL;
-               pci_epf_test_raise_irq(epf_test);
+               pci_epf_test_raise_irq(epf_test, irq);
                goto reset_handler;
        }
 
@@ -321,13 +321,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
                        reg->status |= STATUS_COPY_SUCCESS;
                else
                        reg->status |= STATUS_COPY_FAIL;
-               pci_epf_test_raise_irq(epf_test);
+               pci_epf_test_raise_irq(epf_test, irq);
                goto reset_handler;
        }
 
        if (command & COMMAND_RAISE_MSI_IRQ) {
                msi_count = pci_epc_get_msi(epc);
-               irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
                if (irq > msi_count || msi_count <= 0)
                        goto reset_handler;
                reg->status = STATUS_IRQ_RAISED;
index 89f4e3d072d7c10e14c3b8332408babf5a30f3d3..26ed0c08f20972e6d1d6ef09b4daf140dc17a193 100644 (file)
@@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
        bridge->sysdata = pcie;
        bridge->busnr = 0;
        bridge->ops = &advk_pcie_ops;
+       bridge->map_irq = of_irq_parse_and_map_pci;
+       bridge->swizzle_irq = pci_common_swizzle;
 
        ret = pci_scan_root_bus_bridge(bridge);
        if (ret < 0) {
index 9c40da54f88a10aab4dde9ba705b187912101909..1987fec1f126f1ecc84c1a0f4c6bc21b2f639c6c 100644 (file)
@@ -233,6 +233,7 @@ struct tegra_msi {
        struct msi_controller chip;
        DECLARE_BITMAP(used, INT_PCI_MSI_NR);
        struct irq_domain *domain;
+       unsigned long pages;
        struct mutex lock;
        u64 phys;
        int irq;
@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
                goto err;
        }
 
-       /*
-        * The PCI host bridge on Tegra contains some logic that intercepts
-        * MSI writes, which means that the MSI target address doesn't have
-        * to point to actual physical memory. Rather than allocating one 4
-        * KiB page of system memory that's never used, we can simply pick
-        * an arbitrary address within an area reserved for system memory
-        * in the FPCI address map.
-        *
-        * However, in order to avoid confusion, we pick an address that
-        * doesn't map to physical memory. The FPCI address map reserves a
-        * 1012 GiB region for system memory and memory-mapped I/O. Since
-        * none of the Tegra SoCs that contain this PCI host bridge can
-        * address more than 16 GiB of system memory, the last 4 KiB of
-        * these 1012 GiB is a good candidate.
-        */
-       msi->phys = 0xfcfffff000;
+       /* setup AFI/FPCI range */
+       msi->pages = __get_free_pages(GFP_KERNEL, 0);
+       msi->phys = virt_to_phys((void *)msi->pages);
 
        afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
        afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
        afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
        afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
 
+       free_pages(msi->pages, 0);
+
        if (msi->irq > 0)
                free_irq(msi->irq, pcie);
 
index 1eecfa301f7fb2efbfd025f70d39b7d159a42dde..8e075ea2743ef41b4ed08065f502527cbe804c82 100644 (file)
@@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       char *driver_override, *old = pdev->driver_override, *cp;
+       char *driver_override, *old, *cp;
 
        /* We need to keep extra room for a newline */
        if (count >= (PAGE_SIZE - 1))
@@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev,
        if (cp)
                *cp = '\0';
 
+       device_lock(dev);
+       old = pdev->driver_override;
        if (strlen(driver_override)) {
                pdev->driver_override = driver_override;
        } else {
                kfree(driver_override);
                pdev->driver_override = NULL;
        }
+       device_unlock(dev);
 
        kfree(old);
 
@@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+       ssize_t len;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+       device_lock(dev);
+       len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+       device_unlock(dev);
+       return len;
 }
 static DEVICE_ATTR_RW(driver_override);
 
index 0a9b78705ee810c9e18c6fa1f46551fc27374287..3303dd8d8eb5718f96de5326949ba89a60e9f32e 100644 (file)
@@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
                ret = armpmu_register(pmu);
                if (ret) {
                        pr_warn("Failed to register PMU for CPU%d\n", cpu);
+                       kfree(pmu->name);
                        return ret;
                }
        }
index 73ebad6634a79955340de255fb846b6acc815922..89c887ea5557bcd0b7917b2d772bb7eabe0f91ff 100644 (file)
 #define     MVEBU_COMPHY_CONF6_40B             BIT(18)
 #define MVEBU_COMPHY_SELECTOR                  0x1140
 #define     MVEBU_COMPHY_SELECTOR_PHY(n)       ((n) * 0x4)
+#define MVEBU_COMPHY_PIPE_SELECTOR             0x1144
+#define     MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
 
 #define MVEBU_COMPHY_LANES     6
 #define MVEBU_COMPHY_PORTS     3
@@ -468,13 +470,17 @@ static int mvebu_comphy_power_on(struct phy *phy)
 {
        struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
        struct mvebu_comphy_priv *priv = lane->priv;
-       int ret;
-       u32 mux, val;
+       int ret, mux;
+       u32 val;
 
        mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode);
        if (mux < 0)
                return -ENOTSUPP;
 
+       regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+       val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+       regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
        regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val);
        val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
        val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id);
@@ -526,6 +532,10 @@ static int mvebu_comphy_power_off(struct phy *phy)
        val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
        regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val);
 
+       regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+       val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+       regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
        return 0;
 }
 
@@ -576,8 +586,8 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
                return PTR_ERR(priv->regmap);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->base = devm_ioremap_resource(&pdev->dev, res);
-       if (!priv->base)
-               return -ENOMEM;
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
 
        for_each_available_child_of_node(pdev->dev.of_node, child) {
                struct mvebu_comphy_lane *lane;
index e3baad78521fbde8bf9d4e2fa657e7bab5c5a7dc..721a2a1c97ef43299c3e143744297764943b4b92 100644 (file)
@@ -27,6 +27,7 @@
 /* banks shared by multiple phys */
 #define SSUSB_SIFSLV_V1_SPLLC          0x000   /* shared by u3 phys */
 #define SSUSB_SIFSLV_V1_U2FREQ         0x100   /* shared by u2 phys */
+#define SSUSB_SIFSLV_V1_CHIP           0x300   /* shared by u3 phys */
 /* u2 phy bank */
 #define SSUSB_SIFSLV_V1_U2PHY_COM      0x000
 /* u3/pcie/sata phy banks */
@@ -762,7 +763,7 @@ static void phy_v1_banks_init(struct mtk_tphy *tphy,
        case PHY_TYPE_USB3:
        case PHY_TYPE_PCIE:
                u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC;
-               u3_banks->chip = NULL;
+               u3_banks->chip = tphy->sif_base + SSUSB_SIFSLV_V1_CHIP;
                u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
                u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA;
                break;
index 4d2c57f21d7682bb6b08f31a1dab0ba74ab0e4e2..a958c9bced019d2da7520977a26f5c10a81cdea4 100644 (file)
@@ -443,14 +443,34 @@ static inline int property_enable(struct rockchip_typec_phy *tcphy,
        return regmap_write(tcphy->grf_regs, reg->offset, val | mask);
 }
 
+static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy)
+{
+       u16 tx_ana_ctrl_reg_1;
+
+       /*
+        * Select the polarity of the xcvr:
+        * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
+        * down aux_m)
+        * 0, Normal polarity (if TYPEC, pulls up aux_m and pulls down
+        * aux_p)
+        */
+       tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+       if (!tcphy->flip)
+               tx_ana_ctrl_reg_1 |= BIT(12);
+       else
+               tx_ana_ctrl_reg_1 &= ~BIT(12);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+}
+
 static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
 {
+       u16 tx_ana_ctrl_reg_1;
        u16 rdata, rdata2, val;
 
        /* disable txda_cal_latch_en for rewrite the calibration values */
-       rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
-       val = rdata & 0xdfff;
-       writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 &= ~BIT(13);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
 
        /*
         * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
@@ -472,9 +492,8 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
         * Activate this signal for 1 clock cycle to sample new calibration
         * values.
         */
-       rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
-       val = rdata | 0x2000;
-       writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 |= BIT(13);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
        usleep_range(150, 200);
 
        /* set TX Voltage Level and TX Deemphasis to 0 */
@@ -482,8 +501,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
        /* re-enable decap */
        writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
        writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
-       writel(0x2008, tcphy->base + TX_ANA_CTRL_REG_1);
-       writel(0x2018, tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 |= BIT(3);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 |= BIT(4);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
 
        writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
 
@@ -494,8 +515,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
        writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
 
        /* re-enables Bandgap reference for LDO */
-       writel(0x2098, tcphy->base + TX_ANA_CTRL_REG_1);
-       writel(0x2198, tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 |= BIT(7);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 |= BIT(8);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
 
        /*
         * re-enables the transmitter pre-driver, driver data selection MUX,
@@ -505,27 +528,26 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
        writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
 
        /*
-        * BIT 12: Controls auxda_polarity, which selects the polarity of the
-        * xcvr:
-        * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
-        * down aux_m)
-        * 0, Normal polarity (if TYPE_C, pulls up aux_m and pulls down
-        * aux_p)
+        * Do some magic undocumented stuff, some of which appears to
+        * undo the "re-enables Bandgap reference for LDO" above.
         */
-       val = 0xa078;
-       if (!tcphy->flip)
-               val |= BIT(12);
-       writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+       tx_ana_ctrl_reg_1 |=  BIT(15);
+       tx_ana_ctrl_reg_1 &= ~BIT(8);
+       tx_ana_ctrl_reg_1 &= ~BIT(7);
+       tx_ana_ctrl_reg_1 |=  BIT(6);
+       tx_ana_ctrl_reg_1 |=  BIT(5);
+       writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
 
        writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
        writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
        writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
 
        /*
-        * Controls low_power_swing_en, set the voltage swing of the driver
-        * to 400mv. The values below are peak to peak (differential) values.
+        * Controls low_power_swing_en, don't set the voltage swing of the
+        * driver to 400mv. The values below are peak to peak (differential)
+        * values.
         */
-       writel(4, tcphy->base + TXDA_COEFF_CALC_CTRL);
+       writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL);
        writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
 
        /* Controls tx_high_z_tm_en */
@@ -555,6 +577,7 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
        reset_control_deassert(tcphy->tcphy_rst);
 
        property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip);
+       tcphy_dp_aux_set_flip(tcphy);
 
        tcphy_cfg_24m(tcphy);
 
@@ -685,8 +708,11 @@ static int rockchip_usb3_phy_power_on(struct phy *phy)
        if (tcphy->mode == new_mode)
                goto unlock_ret;
 
-       if (tcphy->mode == MODE_DISCONNECT)
-               tcphy_phy_init(tcphy, new_mode);
+       if (tcphy->mode == MODE_DISCONNECT) {
+               ret = tcphy_phy_init(tcphy, new_mode);
+               if (ret)
+                       goto unlock_ret;
+       }
 
        /* wait TCPHY for pipe ready */
        for (timeout = 0; timeout < 100; timeout++) {
@@ -760,10 +786,12 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
         */
        if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) {
                tcphy_phy_deinit(tcphy);
-               tcphy_phy_init(tcphy, new_mode);
+               ret = tcphy_phy_init(tcphy, new_mode);
        } else if (tcphy->mode == MODE_DISCONNECT) {
-               tcphy_phy_init(tcphy, new_mode);
+               ret = tcphy_phy_init(tcphy, new_mode);
        }
+       if (ret)
+               goto unlock_ret;
 
        ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
                                 val, val & DP_MODE_A2, 1000,
index 3cbcb2537657623444c65ab59aa6300b7b66526f..4307bf0013e186cd859b779aaaf4c61ddd662a6c 100644 (file)
@@ -454,6 +454,8 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
                char *name;
 
                name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+               if (!name)
+                       return ERR_PTR(-ENOMEM);
                np = of_find_node_by_name(np, name);
                kfree(name);
        }
index 1778cf4f81c7853638b46139204d3ee71e3b396d..82cd8b08d71f5247c0595d1ba2be199e4fe6ecec 100644 (file)
@@ -100,6 +100,7 @@ config PINCTRL_AMD
        tristate "AMD GPIO pin control"
        depends on GPIOLIB
        select GPIOLIB_IRQCHIP
+       select PINMUX
        select PINCONF
        select GENERIC_PINCONF
        help
index 0944310225db92a8ee2c5011c7fb394044ccfd22..ff782445dfb75d1a8af9eed775b8e8f7b275fee3 100644 (file)
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
        unsigned long events;
        unsigned offset;
        unsigned gpio;
-       unsigned int type;
 
        events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
        events &= mask;
        events &= pc->enabled_irq_map[bank];
        for_each_set_bit(offset, &events, 32) {
                gpio = (32 * bank) + offset;
-               /* FIXME: no clue why the code looks up the type here */
-               type = pc->irq_type[gpio];
-
                generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
                                                     gpio));
        }
index 04e929fd0ffee494cc744cf495e5acd9e437ea6b..fadbca907c7c7ea552ffb2b401a6fc154a4d1a2d 100644 (file)
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        struct gpio_chip *chip = &pctrl->chip;
        bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
        int ret, i, offset;
+       int irq_base;
 
        *chip = chv_gpio_chip;
 
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
-       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+       if (!need_valid_mask) {
+               irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+                                               chip->ngpio, NUMA_NO_NODE);
+               if (irq_base < 0) {
+                       dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+                       return irq_base;
+               }
+       } else {
+               irq_base = 0;
+       }
+
+       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
                                   handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add IRQ chip\n");
index 3f6b34febbf11249cf2a30e400647038f4a66f33..433af328d9817a028f4bccce07492fbc6ac27a68 100644 (file)
@@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
                                continue;
                        irq = irq_find_mapping(gc->irqdomain, irqnr + i);
                        generic_handle_irq(irq);
-                       /* Clear interrupt */
+
+                       /* Clear interrupt.
+                        * We must read the pin register again, in case the
+                        * value was changed while executing
+                        * generic_handle_irq() above.
+                        */
+                       raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+                       regval = readl(regs + i);
                        writel(regval, regs + i);
+                       raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
                        ret = IRQ_HANDLED;
                }
        }
index 3e40d4245512e286166ac1b838567f6c55a881a0..9c950bbf07bab62cf72dcb74cbed6fa20b04598b 100644 (file)
@@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
        ret = mcp_read(mcp, MCP_GPIO, &status);
        if (ret < 0)
                status = 0;
-       else
+       else {
+               mcp->cached_gpio = status;
                status = !!(status & (1 << offset));
-
-       mcp->cached_gpio = status;
+       }
 
        mutex_unlock(&mcp->lock);
        return status;
index 85de30f93a9cc45c1fe0d634c7f687947e1a39bb..56a8195096a229c6975d3f78746ecbc4c6169660 100644 (file)
@@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b)
 {
        struct acpi_device *device = bl_get_data(b);
 
-       if (b->props.power == FB_BLANK_POWERDOWN)
-               call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
-       else
-               call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
+       if (fext) {
+               if (b->props.power == FB_BLANK_POWERDOWN)
+                       call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
+               else
+                       call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
+       }
 
        return set_lcd_level(device, b->props.brightness);
 }
index bb792a52248b63d1c16a4fefc67a32de0e58101b..e03fa31446ca83b107c220e438db53db38c44e80 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/suspend.h>
 #include <linux/acpi.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/spinlock.h>
 
 #include <asm/intel_pmc_ipc.h>
 
@@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev {
        /* gcr */
        void __iomem *gcr_mem_base;
        bool has_gcr_regs;
+       spinlock_t gcr_lock;
 
        /* punit */
        struct platform_device *punit_dev;
@@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data)
 {
        int ret;
 
-       mutex_lock(&ipclock);
+       spin_lock(&ipcdev.gcr_lock);
 
        ret = is_gcr_valid(offset);
        if (ret < 0) {
-               mutex_unlock(&ipclock);
+               spin_unlock(&ipcdev.gcr_lock);
                return ret;
        }
 
        *data = readl(ipcdev.gcr_mem_base + offset);
 
-       mutex_unlock(&ipclock);
+       spin_unlock(&ipcdev.gcr_lock);
 
        return 0;
 }
@@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data)
 {
        int ret;
 
-       mutex_lock(&ipclock);
+       spin_lock(&ipcdev.gcr_lock);
 
        ret = is_gcr_valid(offset);
        if (ret < 0) {
-               mutex_unlock(&ipclock);
+               spin_unlock(&ipcdev.gcr_lock);
                return ret;
        }
 
        writel(data, ipcdev.gcr_mem_base + offset);
 
-       mutex_unlock(&ipclock);
+       spin_unlock(&ipcdev.gcr_lock);
 
        return 0;
 }
@@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
        u32 new_val;
        int ret = 0;
 
-       mutex_lock(&ipclock);
+       spin_lock(&ipcdev.gcr_lock);
 
        ret = is_gcr_valid(offset);
        if (ret < 0)
@@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
        }
 
 gcr_ipc_unlock:
-       mutex_unlock(&ipclock);
+       spin_unlock(&ipcdev.gcr_lock);
        return ret;
 }
 EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
@@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id)
 
 static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       resource_size_t pci_resource;
+       struct intel_pmc_ipc_dev *pmc = &ipcdev;
        int ret;
-       int len;
 
-       ipcdev.dev = &pci_dev_get(pdev)->dev;
-       ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
+       /* Only one PMC is supported */
+       if (pmc->dev)
+               return -EBUSY;
 
-       ret = pci_enable_device(pdev);
+       pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
+
+       spin_lock_init(&ipcdev.gcr_lock);
+
+       ret = pcim_enable_device(pdev);
        if (ret)
                return ret;
 
-       ret = pci_request_regions(pdev, "intel_pmc_ipc");
+       ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
        if (ret)
                return ret;
 
-       pci_resource = pci_resource_start(pdev, 0);
-       len = pci_resource_len(pdev, 0);
-       if (!pci_resource || !len) {
-               dev_err(&pdev->dev, "Failed to get resource\n");
-               return -ENOMEM;
-       }
+       init_completion(&pmc->cmd_complete);
 
-       init_completion(&ipcdev.cmd_complete);
+       pmc->ipc_base = pcim_iomap_table(pdev)[0];
 
-       if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+       ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc",
+                               pmc);
+       if (ret) {
                dev_err(&pdev->dev, "Failed to request irq\n");
-               return -EBUSY;
+               return ret;
        }
 
-       ipcdev.ipc_base = ioremap_nocache(pci_resource, len);
-       if (!ipcdev.ipc_base) {
-               dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
-               free_irq(pdev->irq, &ipcdev);
-               ret = -ENOMEM;
-       }
+       pmc->dev = &pdev->dev;
 
-       return ret;
-}
+       pci_set_drvdata(pdev, pmc);
 
-static void ipc_pci_remove(struct pci_dev *pdev)
-{
-       free_irq(pdev->irq, &ipcdev);
-       pci_release_regions(pdev);
-       pci_dev_put(pdev);
-       iounmap(ipcdev.ipc_base);
-       ipcdev.dev = NULL;
+       return 0;
 }
 
 static const struct pci_device_id ipc_pci_ids[] = {
@@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = {
        .name = "intel_pmc_ipc",
        .id_table = ipc_pci_ids,
        .probe = ipc_pci_probe,
-       .remove = ipc_pci_remove,
 };
 
 static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
@@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev)
                return -ENXIO;
        }
        size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE;
+       res->end = res->start + size - 1;
+
+       addr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(addr))
+               return PTR_ERR(addr);
 
-       if (!request_mem_region(res->start, size, pdev->name)) {
-               dev_err(&pdev->dev, "Failed to request ipc resource\n");
-               return -EBUSY;
-       }
-       addr = ioremap_nocache(res->start, size);
-       if (!addr) {
-               dev_err(&pdev->dev, "I/O memory remapping failed\n");
-               release_mem_region(res->start, size);
-               return -ENOMEM;
-       }
        ipcdev.ipc_base = addr;
 
        ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
@@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
 
 static int ipc_plat_probe(struct platform_device *pdev)
 {
-       struct resource *res;
        int ret;
 
        ipcdev.dev = &pdev->dev;
        ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
        init_completion(&ipcdev.cmd_complete);
+       spin_lock_init(&ipcdev.gcr_lock);
 
        ipcdev.irq = platform_get_irq(pdev, 0);
        if (ipcdev.irq < 0) {
@@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev)
        ret = ipc_create_pmc_devices();
        if (ret) {
                dev_err(&pdev->dev, "Failed to create pmc devices\n");
-               goto err_device;
+               return ret;
        }
 
-       if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND,
-                       "intel_pmc_ipc", &ipcdev)) {
+       if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND,
+                            "intel_pmc_ipc", &ipcdev)) {
                dev_err(&pdev->dev, "Failed to request irq\n");
                ret = -EBUSY;
                goto err_irq;
@@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev)
 
        return 0;
 err_sys:
-       free_irq(ipcdev.irq, &ipcdev);
+       devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
 err_irq:
        platform_device_unregister(ipcdev.tco_dev);
        platform_device_unregister(ipcdev.punit_dev);
        platform_device_unregister(ipcdev.telemetry_dev);
-err_device:
-       iounmap(ipcdev.ipc_base);
-       res = platform_get_resource(pdev, IORESOURCE_MEM,
-                                   PLAT_RESOURCE_IPC_INDEX);
-       if (res) {
-               release_mem_region(res->start,
-                                  PLAT_RESOURCE_IPC_SIZE +
-                                  PLAT_RESOURCE_GCR_SIZE);
-       }
+
        return ret;
 }
 
 static int ipc_plat_remove(struct platform_device *pdev)
 {
-       struct resource *res;
-
        sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
-       free_irq(ipcdev.irq, &ipcdev);
+       devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
        platform_device_unregister(ipcdev.tco_dev);
        platform_device_unregister(ipcdev.punit_dev);
        platform_device_unregister(ipcdev.telemetry_dev);
-       iounmap(ipcdev.ipc_base);
-       res = platform_get_resource(pdev, IORESOURCE_MEM,
-                                   PLAT_RESOURCE_IPC_INDEX);
-       if (res) {
-               release_mem_region(res->start,
-                                  PLAT_RESOURCE_IPC_SIZE +
-                                  PLAT_RESOURCE_GCR_SIZE);
-       }
        ipcdev.dev = NULL;
        return 0;
 }
index 315a4be8dc1e64f429fb6bd5bab9700a0d254f29..9a68914100ad6b7e12c1464779b433d337cca7ee 100644 (file)
@@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO);
 MODULE_PARM_DESC(mbox_sel,
                 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
 
+static DEFINE_SPINLOCK(tsi721_maint_lock);
+
 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
 
@@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
        void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
        struct tsi721_dma_desc *bd_ptr;
        u32 rd_count, swr_ptr, ch_stat;
+       unsigned long flags;
        int i, err = 0;
        u32 op = do_wr ? MAINT_WR : MAINT_RD;
 
        if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
                return -EINVAL;
 
+       spin_lock_irqsave(&tsi721_maint_lock, flags);
+
        bd_ptr = priv->mdma.bd_base;
 
        rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
@@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
         */
        swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
        iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
+
 err_out:
+       spin_unlock_irqrestore(&tsi721_maint_lock, flags);
 
        return err;
 }
index a3824baca2e51c920958731a9daf4ade575c1397..3ee9af83b63849d45d1412ef6247ed834c0d840e 100644 (file)
 #include <linux/rio.h>
 #include <linux/module.h>
 
-/*
- * These interrupt-safe spinlocks protect all accesses to RIO
- * configuration space and doorbell access.
- */
-static DEFINE_SPINLOCK(rio_config_lock);
-static DEFINE_SPINLOCK(rio_doorbell_lock);
-
 /*
  *  Wrappers for all RIO configuration access functions.  They just check
- *  alignment, do locking and call the low-level functions pointed to
- *  by rio_mport->ops.
+ *  alignment and call the low-level functions pointed to by rio_mport->ops.
  */
 
 #define RIO_8_BAD 0
@@ -44,13 +36,10 @@ int __rio_local_read_config_##size \
        (struct rio_mport *mport, u32 offset, type *value)              \
 {                                                                      \
        int res;                                                        \
-       unsigned long flags;                                            \
        u32 data = 0;                                                   \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
-       spin_lock_irqsave(&rio_config_lock, flags);                     \
        res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
        *value = (type)data;                                            \
-       spin_unlock_irqrestore(&rio_config_lock, flags);                \
        return res;                                                     \
 }
 
@@ -67,13 +56,8 @@ int __rio_local_read_config_##size \
 int __rio_local_write_config_##size \
        (struct rio_mport *mport, u32 offset, type value)               \
 {                                                                      \
-       int res;                                                        \
-       unsigned long flags;                                            \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
-       spin_lock_irqsave(&rio_config_lock, flags);                     \
-       res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
-       spin_unlock_irqrestore(&rio_config_lock, flags);                \
-       return res;                                                     \
+       return mport->ops->lcwrite(mport, mport->id, offset, len, value);\
 }
 
 RIO_LOP_READ(8, u8, 1)
@@ -104,13 +88,10 @@ int rio_mport_read_config_##size \
        (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value)     \
 {                                                                      \
        int res;                                                        \
-       unsigned long flags;                                            \
        u32 data = 0;                                                   \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
-       spin_lock_irqsave(&rio_config_lock, flags);                     \
        res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
        *value = (type)data;                                            \
-       spin_unlock_irqrestore(&rio_config_lock, flags);                \
        return res;                                                     \
 }
 
@@ -127,13 +108,9 @@ int rio_mport_read_config_##size \
 int rio_mport_write_config_##size \
        (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value)      \
 {                                                                      \
-       int res;                                                        \
-       unsigned long flags;                                            \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
-       spin_lock_irqsave(&rio_config_lock, flags);                     \
-       res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \
-       spin_unlock_irqrestore(&rio_config_lock, flags);                \
-       return res;                                                     \
+       return mport->ops->cwrite(mport, mport->id, destid, hopcount,   \
+                       offset, len, value);                            \
 }
 
 RIO_OP_READ(8, u8, 1)
@@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32);
  */
 int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
 {
-       int res;
-       unsigned long flags;
-
-       spin_lock_irqsave(&rio_doorbell_lock, flags);
-       res = mport->ops->dsend(mport, mport->id, destid, data);
-       spin_unlock_irqrestore(&rio_doorbell_lock, flags);
-
-       return res;
+       return mport->ops->dsend(mport, mport->id, destid, data);
 }
 
 EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);
index d0e5d6ee882c4e041d8c3db4e41392d9bc6d90ab..e2c1988cd7c03d0b9cdddc544c736c2c7298928b 100644 (file)
@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str)
        if (*str == '=')
                str++;
 
-       if (!strncmp(str, "cec_disable", 7))
+       if (!strcmp(str, "cec_disable"))
                ce_arr.disabled = 1;
        else
                return 0;
index f18b36dd57dd44a2f1d94302f6e1492ea6e54f88..376a99b7cf5da150794cbb3b2f1c72078985166c 100644 (file)
@@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
                case AXP803_DCDC3:
                        return !!(reg & BIT(6));
                case AXP803_DCDC6:
-                       return !!(reg & BIT(7));
+                       return !!(reg & BIT(5));
                }
                break;
 
index ef2be56460fe2779765311473944c48e9bb9ba85..790a4a73ea2c8f94c3fb80c8567827d0e8b0860b 100644 (file)
@@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
 };
 
 #define REG(rid, ereg, emask, vreg, vmask, min, max, step)             \
-       [RN5T618_##rid] = {                                             \
+       {                                                               \
                .name           = #rid,                                 \
                .of_match       = of_match_ptr(#rid),                   \
                .regulators_node = of_match_ptr("regulators"),          \
index df63e44526ac42a814ee85c3286c9d1a9b45be48..bf04479456a050abb56290a71729a76f49a638b6 100644 (file)
@@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL
        depends on OF && ARCH_QCOM
        depends on QCOM_SMEM
        depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+       depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
        select MFD_SYSCON
        select QCOM_RPROC_COMMON
        select QCOM_SCM
@@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL
        tristate "Qualcomm WCNSS Peripheral Image Loader"
        depends on OF && ARCH_QCOM
        depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+       depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
        depends on QCOM_SMEM
        select QCOM_MDT_LOADER
        select QCOM_RPROC_COMMON
index 612d914033414e3b17dea2b8bac97584cb280f1c..633268e9d550de7001999052f2692239b4754f6b 100644 (file)
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
                if (!(att->flags & ATT_OWN))
                        continue;
 
-               if (b > IMX7D_RPROC_MEM_MAX)
+               if (b >= IMX7D_RPROC_MEM_MAX)
                        break;
 
                priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
                                                     att->sa, att->size);
-               if (IS_ERR(priv->mem[b].cpu_addr)) {
+               if (!priv->mem[b].cpu_addr) {
                        dev_err(dev, "devm_ioremap_resource failed\n");
-                       err = PTR_ERR(priv->mem[b].cpu_addr);
-                       return err;
+                       return -ENOMEM;
                }
                priv->mem[b].sys_addr = att->sa;
                priv->mem[b].size = att->size;
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
                        return err;
                }
 
-               if (b > IMX7D_RPROC_MEM_MAX)
+               if (b >= IMX7D_RPROC_MEM_MAX)
                        break;
 
                priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
index e0c393214264adad04a08fb6e48fc34266144e54..e2baecbb9dd3a08c71e28a72b45bb504a7b5a6e1 100644 (file)
@@ -34,11 +34,12 @@ config RESET_BERLIN
        help
          This enables the reset controller driver for Marvell Berlin SoCs.
 
-config RESET_HSDK_V1
-       bool "HSDK v1 Reset Driver"
-       default n
+config RESET_HSDK
+       bool "Synopsys HSDK Reset Driver"
+       depends on HAS_IOMEM
+       depends on ARC_SOC_HSDK || COMPILE_TEST
        help
-         This enables the reset controller driver for HSDK v1.
+         This enables the reset controller driver for HSDK board.
 
 config RESET_IMX7
        bool "i.MX7 Reset Driver" if COMPILE_TEST
index d368367110e55780c5154995011cd193c709ab49..af1c15c330b3069f5836b6f2522a46af85de8898 100644 (file)
@@ -5,7 +5,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
 obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
 obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
 obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
-obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o
+obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
 obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
 obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
 obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
similarity index 72%
rename from drivers/reset/reset-hsdk-v1.c
rename to drivers/reset/reset-hsdk.c
index bca13e4bf6223b799774b57545000f6c989ba097..8bce391c6943ba55edcb6292088e3d3cfc9d0ae5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2017 Synopsys.
  *
- * Synopsys HSDKv1 SDP reset driver.
+ * Synopsys HSDK Development platform reset driver.
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -18,9 +18,9 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 
-#define to_hsdkv1_rst(p)       container_of((p), struct hsdkv1_rst, rcdev)
+#define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev)
 
-struct hsdkv1_rst {
+struct hsdk_rst {
        void __iomem                    *regs_ctl;
        void __iomem                    *regs_rst;
        spinlock_t                      lock;
@@ -49,12 +49,12 @@ static const u32 rst_map[] = {
 #define CGU_IP_SW_RESET_RESET          BIT(0)
 #define SW_RESET_TIMEOUT               10000
 
-static void hsdkv1_reset_config(struct hsdkv1_rst *rst, unsigned long id)
+static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id)
 {
        writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL);
 }
 
-static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
+static int hsdk_reset_do(struct hsdk_rst *rst)
 {
        u32 reg;
 
@@ -69,28 +69,28 @@ static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
                !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT);
 }
 
-static int hsdkv1_reset_reset(struct reset_controller_dev *rcdev,
+static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
                              unsigned long id)
 {
-       struct hsdkv1_rst *rst = to_hsdkv1_rst(rcdev);
+       struct hsdk_rst *rst = to_hsdk_rst(rcdev);
        unsigned long flags;
        int ret;
 
        spin_lock_irqsave(&rst->lock, flags);
-       hsdkv1_reset_config(rst, id);
-       ret = hsdkv1_reset_do(rst);
+       hsdk_reset_config(rst, id);
+       ret = hsdk_reset_do(rst);
        spin_unlock_irqrestore(&rst->lock, flags);
 
        return ret;
 }
 
-static const struct reset_control_ops hsdkv1_reset_ops = {
-       .reset  = hsdkv1_reset_reset,
+static const struct reset_control_ops hsdk_reset_ops = {
+       .reset  = hsdk_reset_reset,
 };
 
-static int hsdkv1_reset_probe(struct platform_device *pdev)
+static int hsdk_reset_probe(struct platform_device *pdev)
 {
-       struct hsdkv1_rst *rst;
+       struct hsdk_rst *rst;
        struct resource *mem;
 
        rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
@@ -110,7 +110,7 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
        spin_lock_init(&rst->lock);
 
        rst->rcdev.owner = THIS_MODULE;
-       rst->rcdev.ops = &hsdkv1_reset_ops;
+       rst->rcdev.ops = &hsdk_reset_ops;
        rst->rcdev.of_node = pdev->dev.of_node;
        rst->rcdev.nr_resets = HSDK_MAX_RESETS;
        rst->rcdev.of_reset_n_cells = 1;
@@ -118,20 +118,20 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
        return reset_controller_register(&rst->rcdev);
 }
 
-static const struct of_device_id hsdkv1_reset_dt_match[] = {
-       { .compatible = "snps,hsdk-v1.0-reset" },
+static const struct of_device_id hsdk_reset_dt_match[] = {
+       { .compatible = "snps,hsdk-reset" },
        { },
 };
 
-static struct platform_driver hsdkv1_reset_driver = {
-       .probe  = hsdkv1_reset_probe,
+static struct platform_driver hsdk_reset_driver = {
+       .probe  = hsdk_reset_probe,
        .driver = {
-               .name = "hsdk-v1.0-reset",
-               .of_match_table = hsdkv1_reset_dt_match,
+               .name = "hsdk-reset",
+               .of_match_table = hsdk_reset_dt_match,
        },
 };
-builtin_platform_driver(hsdkv1_reset_driver);
+builtin_platform_driver(hsdk_reset_driver);
 
 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys HSDKv1 SDP reset driver");
+MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver");
 MODULE_LICENSE("GPL v2");
index c60904ff40b830358a966b5121a9876a90d5ebe0..3907bbc9c6cf7eafd4210358c923423a8fd23a9c 100644 (file)
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
        struct socfpga_reset_data *data = container_of(rcdev,
                                                     struct socfpga_reset_data,
                                                     rcdev);
-       int bank = id / BITS_PER_LONG;
-       int offset = id % BITS_PER_LONG;
+       int reg_width = sizeof(u32);
+       int bank = id / (reg_width * BITS_PER_BYTE);
+       int offset = id % (reg_width * BITS_PER_BYTE);
        unsigned long flags;
        u32 reg;
 
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
                                                     struct socfpga_reset_data,
                                                     rcdev);
 
-       int bank = id / BITS_PER_LONG;
-       int offset = id % BITS_PER_LONG;
+       int reg_width = sizeof(u32);
+       int bank = id / (reg_width * BITS_PER_BYTE);
+       int offset = id % (reg_width * BITS_PER_BYTE);
        unsigned long flags;
        u32 reg;
 
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
 {
        struct socfpga_reset_data *data = container_of(rcdev,
                                                struct socfpga_reset_data, rcdev);
-       int bank = id / BITS_PER_LONG;
-       int offset = id % BITS_PER_LONG;
+       int reg_width = sizeof(u32);
+       int bank = id / (reg_width * BITS_PER_BYTE);
+       int offset = id % (reg_width * BITS_PER_BYTE);
        u32 reg;
 
        reg = readl(data->membase + (bank * BANK_INCREMENT));
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
        spin_lock_init(&data->lock);
 
        data->rcdev.owner = THIS_MODULE;
-       data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG;
+       data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
        data->rcdev.ops = &socfpga_reset_ops;
        data->rcdev.of_node = pdev->dev.of_node;
 
index 5a5e927ea50f45ac307dc6ed0abfa4f8d4b3a815..5dcc9bf1c5bc5de65af2bfc5d778c4d494b20cee 100644 (file)
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
        unsigned long flags;
 
        intent = kzalloc(sizeof(*intent), GFP_KERNEL);
-
        if (!intent)
                return NULL;
 
        intent->data = kzalloc(size, GFP_KERNEL);
        if (!intent->data)
-               return NULL;
+               goto free_intent;
 
        spin_lock_irqsave(&channel->intent_lock, flags);
        ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
        if (ret < 0) {
                spin_unlock_irqrestore(&channel->intent_lock, flags);
-               return NULL;
+               goto free_data;
        }
        spin_unlock_irqrestore(&channel->intent_lock, flags);
 
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
        intent->reuse = reuseable;
 
        return intent;
+
+free_data:
+       kfree(intent->data);
+free_intent:
+       kfree(intent);
+       return NULL;
 }
 
 static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
 
        ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
        if (ret)
-               return ret;
+               goto unlock;
 
        ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
        if (!ret) {
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
                ret = channel->intent_req_result ? 0 : -ECANCELED;
        }
 
+unlock:
        mutex_unlock(&channel->intent_req_lock);
        return ret;
 }
index ea19b4ff87a2f00837abd83c8e22a9e21016ff21..29f35e29d4801f83aa74b0a590bdb4412a9caa7c 100644 (file)
@@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
        dasd_schedule_device_bh(device);
        if (device->block) {
                dasd_schedule_block_bh(device->block);
-               blk_mq_run_hw_queues(device->block->request_queue, true);
+               if (device->block->request_queue)
+                       blk_mq_run_hw_queues(device->block->request_queue,
+                                            true);
        }
 }
 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
@@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device)
        dasd_schedule_device_bh(device);
        if (device->block) {
                dasd_schedule_block_bh(device->block);
-               blk_mq_run_hw_queues(device->block->request_queue, true);
+               if (device->block->request_queue)
+                       blk_mq_run_hw_queues(device->block->request_queue,
+                                            true);
                }
 
        if (!device->stopped)
@@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
 
        if (device->block) {
                dasd_schedule_block_bh(device->block);
-               blk_mq_run_hw_queues(device->block->request_queue, true);
+               if (device->block->request_queue)
+                       blk_mq_run_hw_queues(device->block->request_queue,
+                                            true);
        }
 
        clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
index 2e7fd966c515107714a4e612aaec5f36a57747b8..eb51893c74a4ba4053fe8d15e064fbf42bed9845 100644 (file)
@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq)
 static void scm_request_finish(struct scm_request *scmrq)
 {
        struct scm_blk_dev *bdev = scmrq->bdev;
-       int *error;
+       blk_status_t *error;
        int i;
 
        for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
 
 static void scm_blk_request_done(struct request *req)
 {
-       int *error = blk_mq_rq_to_pdu(req);
+       blk_status_t *error = blk_mq_rq_to_pdu(req);
 
        blk_mq_end_request(req, *error);
 }
@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
        atomic_set(&bdev->queued_reqs, 0);
 
        bdev->tag_set.ops = &scm_mq_ops;
-       bdev->tag_set.cmd_size = sizeof(int);
+       bdev->tag_set.cmd_size = sizeof(blk_status_t);
        bdev->tag_set.nr_hw_queues = nr_requests;
        bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
        bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
index 489b583f263d2806db03a909b1c5d92002e7b848..e5c32f4b5287ebc4da569d403475d5624bac43ef 100644 (file)
@@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev)
 static int recovery_check(struct device *dev, void *data)
 {
        struct ccw_device *cdev = to_ccwdev(dev);
+       struct subchannel *sch;
        int *redo = data;
 
        spin_lock_irq(cdev->ccwlock);
        switch (cdev->private->state) {
+       case DEV_STATE_ONLINE:
+               sch = to_subchannel(cdev->dev.parent);
+               if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
+                       break;
+               /* fall through */
        case DEV_STATE_DISCONNECTED:
                CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
                              cdev->private->dev_id.ssid,
@@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused)
                }
                spin_unlock_irq(&recovery_lock);
        } else
-               CIO_MSG_EVENT(4, "recovery: end\n");
+               CIO_MSG_EVENT(3, "recovery: end\n");
 }
 
 static DECLARE_WORK(recovery_work, recovery_work_func);
@@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data)
        schedule_work(&recovery_work);
 }
 
-static void ccw_device_schedule_recovery(void)
+void ccw_device_schedule_recovery(void)
 {
        unsigned long flags;
 
-       CIO_MSG_EVENT(4, "recovery: schedule\n");
+       CIO_MSG_EVENT(3, "recovery: schedule\n");
        spin_lock_irqsave(&recovery_lock, flags);
        if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
                recovery_phase = 0;
index ec497af99dd8acfb74c388f990201214ee02713e..69cb70f080a59a46d0c71dbd06a003b527e850d4 100644 (file)
@@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev);
 void ccw_device_set_notoper(struct ccw_device *cdev);
 
 void ccw_device_set_timeout(struct ccw_device *, int);
+void ccw_device_schedule_recovery(void);
 
 /* Channel measurement facility related */
 void retry_set_schib(struct ccw_device *cdev);
index 12016e32e5193796b471eb5430281bea5b6423f7..f98ea674c3d8054390a5486802fedd482f0dbe16 100644 (file)
@@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type)
        }
 }
 
+static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
+{
+       struct subchannel *sch = to_subchannel(cdev->dev.parent);
+       u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
+
+       if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
+               ccw_device_schedule_recovery();
+
+       cdev->private->path_broken_mask = broken_paths;
+}
+
 void ccw_device_verify_done(struct ccw_device *cdev, int err)
 {
        struct subchannel *sch;
@@ -508,6 +519,7 @@ callback:
                        memset(&cdev->private->irb, 0, sizeof(struct irb));
                }
                ccw_device_report_path_events(cdev);
+               ccw_device_handle_broken_paths(cdev);
                break;
        case -ETIME:
        case -EUSERS:
index 220f49145b2f9bf48e29f92dc06f94ab7a9a0ef2..9a1b56b2df3e5e3eac8e2ad35b5def3bb149b44c 100644 (file)
@@ -131,6 +131,8 @@ struct ccw_device_private {
                                   not operable */
        u8 path_gone_mask;      /* mask of paths, that became unavailable */
        u8 path_new_mask;       /* mask of paths, that became available */
+       u8 path_broken_mask;    /* mask of paths, which were found to be
+                                  unusable */
        struct {
                unsigned int fast:1;    /* post with "channel end" */
                unsigned int repall:1;  /* report every interrupt status */
index 82ac331d91254e72debc5d021808a6282e93dc1b..84752152d41fd682c5ae350ddb4bd3ac80d47cde 100644 (file)
@@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 
        adapter->next_port_scan = jiffies;
 
+       adapter->erp_action.adapter = adapter;
+
        if (zfcp_qdio_setup(adapter))
                goto failed;
 
@@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
        port->dev.groups = zfcp_port_attr_groups;
        port->dev.release = zfcp_port_release;
 
+       port->erp_action.adapter = adapter;
+       port->erp_action.port = port;
+
        if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
                kfree(port);
                goto err_out;
index 37408f5f81ce84e5f4f598069bbc417edff71469..ec2532ee1822ea6c068a12a42dce145aef45b333 100644 (file)
@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
                atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
                                &zfcp_sdev->status);
                erp_action = &zfcp_sdev->erp_action;
-               memset(erp_action, 0, sizeof(struct zfcp_erp_action));
-               erp_action->port = port;
-               erp_action->sdev = sdev;
+               WARN_ON_ONCE(erp_action->port != port);
+               WARN_ON_ONCE(erp_action->sdev != sdev);
                if (!(atomic_read(&zfcp_sdev->status) &
                      ZFCP_STATUS_COMMON_RUNNING))
                        act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
                zfcp_erp_action_dismiss_port(port);
                atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
                erp_action = &port->erp_action;
-               memset(erp_action, 0, sizeof(struct zfcp_erp_action));
-               erp_action->port = port;
+               WARN_ON_ONCE(erp_action->port != port);
+               WARN_ON_ONCE(erp_action->sdev != NULL);
                if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
                        act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
                break;
@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
                zfcp_erp_action_dismiss_adapter(adapter);
                atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
                erp_action = &adapter->erp_action;
-               memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+               WARN_ON_ONCE(erp_action->port != NULL);
+               WARN_ON_ONCE(erp_action->sdev != NULL);
                if (!(atomic_read(&adapter->status) &
                      ZFCP_STATUS_COMMON_RUNNING))
                        act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
                return NULL;
        }
 
-       erp_action->adapter = adapter;
+       WARN_ON_ONCE(erp_action->adapter != adapter);
+       memset(&erp_action->list, 0, sizeof(erp_action->list));
+       memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+       erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+       erp_action->fsf_req_id = 0;
        erp_action->action = need;
        erp_action->status = act_status;
 
index ec3ddd1d31d5f5be43fb70425dd3b8981a179f63..6cf8732627e012137514075ea4958fdd5cbf82e4 100644 (file)
@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
        struct zfcp_unit *unit;
        int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
 
+       zfcp_sdev->erp_action.adapter = adapter;
+       zfcp_sdev->erp_action.sdev = sdev;
+
        port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
        if (!port)
                return -ENXIO;
 
+       zfcp_sdev->erp_action.port = port;
+
        unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
        if (unit)
                put_device(&unit->dev);
index a64285ab0728f14c870db3d72b19190d516d44f3..af3e4d3f9735fdc3430eea0ea05cc1a78e2fa306 100644 (file)
@@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
        int status;
 
        dresp = (struct aac_mount *) fib_data(fibptr);
-       if (!(fibptr->dev->supplement_adapter_info.supported_options2 &
-           AAC_OPTION_VARIABLE_BLOCK_SIZE))
+       if (!aac_supports_2T(fibptr->dev)) {
                dresp->mnt[0].capacityhigh = 0;
-       if ((le32_to_cpu(dresp->status) != ST_OK) ||
-           (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
-               _aac_probe_container2(context, fibptr);
-               return;
+               if ((le32_to_cpu(dresp->status) == ST_OK) &&
+                       (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+                       _aac_probe_container2(context, fibptr);
+                       return;
+               }
        }
        scsicmd = (struct scsi_cmnd *) context;
 
index 92fabf2b0c24c4c24f79ae8626d80ef09853816d..403a639574e5ea10c5c8500141204ebc513bd7c1 100644 (file)
@@ -2701,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev)
        return 0;
 }
 
+static inline int aac_supports_2T(struct aac_dev *dev)
+{
+       return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
+}
+
 char * get_container_type(unsigned type);
 extern int numacb;
 extern char aac_driver_version[];
index 97d269f1688863a90f0263c5668441650fd2cadc..1bc623ad3fafabd7025ce759c22aed725aa3bad5 100644 (file)
@@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
                return -ENOMEM;
        aac_fib_init(fibctx);
 
-       mutex_lock(&dev->ioctl_mutex);
-       dev->adapter_shutdown = 1;
-       mutex_unlock(&dev->ioctl_mutex);
+       if (!dev->adapter_shutdown) {
+               mutex_lock(&dev->ioctl_mutex);
+               dev->adapter_shutdown = 1;
+               mutex_unlock(&dev->ioctl_mutex);
+       }
 
        cmd = (struct aac_close *) fib_data(fibctx);
        cmd->command = cpu_to_le32(VM_CloseAll);
index 87cc4a93e637e6db517c12b0c01bb212eb2e8445..c9252b138c1fe0e21d217b0fb305cc45afc1545a 100644 (file)
@@ -906,12 +906,14 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
 
        bus = aac_logical_to_phys(scmd_channel(cmd));
        cid = scmd_id(cmd);
-       info = &aac->hba_map[bus][cid];
-       if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
-           info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+
+       if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
                return FAILED;
 
-       if (info->reset_state > 0)
+       info = &aac->hba_map[bus][cid];
+
+       if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+           info->reset_state > 0)
                return FAILED;
 
        pr_err("%s: Host adapter reset request. SCSI hang ?\n",
@@ -962,12 +964,14 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
 
        bus = aac_logical_to_phys(scmd_channel(cmd));
        cid = scmd_id(cmd);
-       info = &aac->hba_map[bus][cid];
-       if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
-           info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+
+       if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
                return FAILED;
 
-       if (info->reset_state > 0)
+       info = &aac->hba_map[bus][cid];
+
+       if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+           info->reset_state > 0)
                return FAILED;
 
        pr_err("%s: Host adapter reset request. SCSI hang ?\n",
@@ -1547,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac)
 {
        int i;
 
+       mutex_lock(&aac->ioctl_mutex);
        aac->adapter_shutdown = 1;
-       aac_send_shutdown(aac);
+       mutex_unlock(&aac->ioctl_mutex);
 
        if (aac->aif_thread) {
                int i;
@@ -1561,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac)
                }
                kthread_stop(aac->thread);
        }
+
+       aac_send_shutdown(aac);
+
        aac_adapter_disable_int(aac);
+
        if (aac_is_src(aac)) {
                if (aac->max_msix > 1) {
                        for (i = 0; i < aac->max_msix; i++) {
index 48c2b2b34b7222ae656ab4f0c9113d59a0d58d24..0c9361c87ec8de8b853f6ccaa6132663a4b982bd 100644 (file)
@@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev)
        aac_set_intx_mode(dev);
 
        src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
+
+       msleep(5000);
 }
 
 static void aac_send_hardware_soft_reset(struct aac_dev *dev)
index 690816f3c6af9a67ff38e801a4e9c440b763f114..421fe869a11ef0cbdb8130aa7b8ebdffbc775236 100644 (file)
@@ -2725,9 +2725,9 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
  * Params   : SCpnt  - command causing reset
  * Returns  : one of SCSI_RESET_ macros
  */
-int acornscsi_host_reset(struct Scsi_Host *shpnt)
+int acornscsi_host_reset(struct scsi_cmnd *SCpnt)
 {
-       AS_Host *host = (AS_Host *)shpnt->hostdata;
+       AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
        struct scsi_cmnd *SCptr;
     
     host->stats.resets += 1;
@@ -2741,7 +2741,7 @@ int acornscsi_host_reset(struct Scsi_Host *shpnt)
 
        printk(KERN_WARNING "acornscsi_reset: ");
        print_sbic_status(asr, ssr, host->scsi.phase);
-       for (devidx = 0; devidx < 9; devidx ++) {
+       for (devidx = 0; devidx < 9; devidx++)
            acornscsi_dumplog(host, devidx);
     }
 #endif
index 9abe81021484dc146755d0475a51ccd26231372d..4ed3d26ffdde809f457501abcbe9dee6ce644fe7 100644 (file)
@@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
        memset(id_ctlr, 0, sizeof(*id_ctlr));
        rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
        if (!rc)
-               if (id_ctlr->configured_logical_drive_count < 256)
+               if (id_ctlr->configured_logical_drive_count < 255)
                        *nlocals = id_ctlr->configured_logical_drive_count;
                else
                        *nlocals = le16_to_cpu(
index 785fb42f66502a40e005c912d1cf9022a86ddb62..2799a6b08f736052a52ae7901a06fe64fc486211 100644 (file)
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
         */
        if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
                pr_err("write_pending failed since: %d\n", vscsi->flags);
-               return 0;
+               return -EIO;
        }
 
        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
index 520325867e2b4c05528bd89a7eeaccea2f5c6f94..31d31aad3de1d3fd0f2ff58d2141cabddec474bf 100644 (file)
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work)
                                fc_rport_enter_flogi(rdata);
                                mutex_unlock(&rdata->rp_mutex);
                        } else {
+                               mutex_unlock(&rdata->rp_mutex);
                                FC_RPORT_DBG(rdata, "work delete\n");
                                mutex_lock(&lport->disc.disc_mutex);
                                list_del_rcu(&rdata->peers);
                                mutex_unlock(&lport->disc.disc_mutex);
-                               mutex_unlock(&rdata->rp_mutex);
                                kref_put(&rdata->kref, fc_rport_destroy);
                        }
                } else {
index bd4605a34f54d655a09471243c8d4386b9e78b3d..f8dc1601efd5f1eb51b4d776087d6ea20534d09e 100644 (file)
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
 
        if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
                reason = FAILURE_SESSION_IN_RECOVERY;
-               sc->result = DID_REQUEUE;
+               sc->result = DID_REQUEUE << 16;
                goto fault;
        }
 
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup);
 /**
  * iscsi_session_teardown - destroy session, host, and cls_session
  * @cls_session: iscsi session
- *
- * The driver must have called iscsi_remove_session before
- * calling this.
  */
 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 {
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 
        iscsi_pool_free(&session->cmdpool);
 
+       iscsi_remove_session(cls_session);
+
        kfree(session->password);
        kfree(session->password_in);
        kfree(session->username);
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
        kfree(session->portal_type);
        kfree(session->discovery_parent_type);
 
-       iscsi_destroy_session(cls_session);
+       iscsi_free_session(cls_session);
+
        iscsi_host_dec_session_cnt(shost);
        module_put(owner);
 }
index 7e7ae786121b6e8458f7a7aa8954141c77e223a5..100bc4c8798d76852adb9224edc763f70f0741ff 100644 (file)
@@ -6131,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                                "Extents and RPI headers enabled.\n");
                }
                mempool_free(mboxq, phba->mbox_mem_pool);
+               rc = -EIO;
                goto out_free_bsmbx;
        }
 
index 79ba3ce063a4f847ce8187d3290ecf5a309f62b9..23bdb1ca106e459355aed0fa7627d2665a7312a3 100644 (file)
@@ -884,7 +884,7 @@ out_err:
                                         wcqe->total_data_placed);
                        nCmd->transferred_length = 0;
                        nCmd->rcv_rsplen = 0;
-                       nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
+                       nCmd->status = NVME_SC_INTERNAL;
                }
        }
 
index 1f59e7a74c7b7b9dc7db660e6114420bc90ce8b7..6b33a1f24f56169a3a17803e5a6952da2e6445a6 100644 (file)
@@ -180,7 +180,7 @@ static void qla_nvme_sp_done(void *ptr, int res)
                goto rel;
 
        if (unlikely(res == QLA_FUNCTION_FAILED))
-               fd->status = NVME_SC_FC_TRANSPORT_ERROR;
+               fd->status = NVME_SC_INTERNAL;
        else
                fd->status = 0;
 
index 5b2437a5ea440a5d3a6836115bcf444595d8f7ef..3bd956d3bc5d9597f92b612c8aa27f3c73f6fca0 100644 (file)
@@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->max_cmd_len, host->max_channel, host->max_lun,
            host->transportt, sht->vendor_id);
 
+       INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+
        /* Set up the irqs */
        ret = qla2x00_request_irqs(ha, rsp);
        if (ret)
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
         */
        qla2xxx_wake_dpc(base_vha);
 
-       INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
        INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
 
        if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
index 38942050b2656bd104abb465a79b29c4e7039d43..dab876c6547392c0ccb75047c4cb767640407834 100644 (file)
@@ -580,7 +580,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
                if (sshdr.asc == 0x20 || /* Invalid command operation code */
                    sshdr.asc == 0x21 || /* Logical block address out of range */
                    sshdr.asc == 0x24 || /* Invalid field in cdb */
-                   sshdr.asc == 0x26) { /* Parameter value invalid */
+                   sshdr.asc == 0x26 || /* Parameter value invalid */
+                   sshdr.asc == 0x27) { /* Write protected */
                        set_host_byte(scmd, DID_TARGET_FAILURE);
                }
                return SUCCESS;
index 9cf6a80fe29754fc93d96d41edb090db2fdd67f4..ad3ea24f08859fb167e7297c2cacef81d646fb00 100644 (file)
@@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
 
        ret = scsi_setup_cmnd(sdev, req);
 out:
-       if (ret != BLKPREP_OK)
-               cmd->flags &= ~SCMD_INITIALIZED;
        return scsi_prep_return(q, req, ret);
 }
 
@@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req)
        struct scsi_device *sdev = req->q->queuedata;
        struct Scsi_Host *shost = sdev->host;
        struct scatterlist *sg;
-       int ret;
 
        scsi_init_command(sdev, cmd);
 
@@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req)
 
        blk_mq_start_request(req);
 
-       ret = scsi_setup_cmnd(sdev, req);
-       if (ret != BLK_STS_OK)
-               cmd->flags &= ~SCMD_INITIALIZED;
-       return ret;
+       return scsi_setup_cmnd(sdev, req);
 }
 
 static void scsi_mq_done(struct scsi_cmnd *cmd)
index e7818afeda2bea560101fe023983050d0b3086b2..15590a063ad94d00fe55076a51b8e49eca54dc9c 100644 (file)
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        if (*bflags & BLIST_NO_DIF)
                sdev->no_dif = 1;
 
+       if (*bflags & BLIST_UNMAP_LIMIT_WS)
+               sdev->unmap_limit_for_ws = 1;
+
        sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
 
        if (*bflags & BLIST_TRY_VPD_PAGES)
index bf53356f41f0639e7c0e70393d91337a2bed2880..f796bd61f3f06505069ad4e21fed202003f9df1c 100644 (file)
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget)
        spin_lock_irqsave(shost->host_lock, flags);
  restart:
        list_for_each_entry(sdev, &shost->__devices, siblings) {
+               /*
+                * We cannot call scsi_device_get() here, as
+                * we might've been called from rmmod() causing
+                * scsi_device_get() to fail the module_is_live()
+                * check.
+                */
                if (sdev->channel != starget->channel ||
                    sdev->id != starget->id ||
-                   scsi_device_get(sdev))
+                   !get_device(&sdev->sdev_gendev))
                        continue;
                spin_unlock_irqrestore(shost->host_lock, flags);
                scsi_remove_device(sdev);
-               scsi_device_put(sdev);
+               put_device(&sdev->sdev_gendev);
                spin_lock_irqsave(shost->host_lock, flags);
                goto restart;
        }
index 3c6bc0081fcbe34afcfc5c398a5637a2d31ae3ab..8c46a6d536af26a9e083bcf0a614a3eedd5374e6 100644 (file)
@@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
 
        list_for_each_entry(rport, &fc_host->rports, peers) {
 
-               if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
+               if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
+                    rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
                        (rport->channel == channel)) {
 
                        switch (fc_host->tgtid_bind_type) {
@@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
                        memcpy(&rport->port_name, &ids->port_name,
                                sizeof(rport->port_name));
                        rport->port_id = ids->port_id;
-                       rport->roles = ids->roles;
                        rport->port_state = FC_PORTSTATE_ONLINE;
                        rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
 
@@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
                                                fci->f->dd_fcrport_size);
                        spin_unlock_irqrestore(shost->host_lock, flags);
 
-                       if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
-                               scsi_target_unblock(&rport->dev, SDEV_RUNNING);
-
-                               /* initiate a scan of the target */
-                               spin_lock_irqsave(shost->host_lock, flags);
-                               rport->flags |= FC_RPORT_SCAN_PENDING;
-                               scsi_queue_work(shost, &rport->scan_work);
-                               spin_unlock_irqrestore(shost->host_lock, flags);
-                       }
+                       fc_remote_port_rolechg(rport, ids->roles);
                        return rport;
                }
        }
@@ -3328,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
 {
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 
+       if (WARN_ON_ONCE(!rport))
+               return FAST_IO_FAIL;
+
        return fc_block_rport(rport);
 }
 EXPORT_SYMBOL(fc_block_scsi_eh);
@@ -3571,7 +3566,7 @@ fc_vport_sched_delete(struct work_struct *work)
 static enum blk_eh_timer_return
 fc_bsg_job_timeout(struct request *req)
 {
-       struct bsg_job *job = (void *) req->special;
+       struct bsg_job *job = blk_mq_rq_to_pdu(req);
        struct Scsi_Host *shost = fc_bsg_to_shost(job);
        struct fc_rport *rport = fc_bsg_to_rport(job);
        struct fc_internal *i = to_fc_internal(shost->transportt);
index 8934f19bce8ea815d696189e7bb075c43cffaa5c..7404d26895f5b7de916f65e86c549790cf444d96 100644 (file)
@@ -2210,22 +2210,6 @@ void iscsi_free_session(struct iscsi_cls_session *session)
 }
 EXPORT_SYMBOL_GPL(iscsi_free_session);
 
-/**
- * iscsi_destroy_session - destroy iscsi session
- * @session: iscsi_session
- *
- * Can be called by a LLD or iscsi_transport. There must not be
- * any running connections.
- */
-int iscsi_destroy_session(struct iscsi_cls_session *session)
-{
-       iscsi_remove_session(session);
-       ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
-       iscsi_free_session(session);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(iscsi_destroy_session);
-
 /**
  * iscsi_create_conn - create iscsi class connection
  * @session: iscsi cls session
@@ -3689,7 +3673,7 @@ iscsi_if_rx(struct sk_buff *skb)
                uint32_t group;
 
                nlh = nlmsg_hdr(skb);
-               if (nlh->nlmsg_len < sizeof(*nlh) ||
+               if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
                    skb->len < nlh->nlmsg_len) {
                        break;
                }
index 11c1738c21000862718ec4f60a663409a11c0b6a..d175c5c5ccf87eba361aa1bad5d84b56ce64d5b0 100644 (file)
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
                break;
 
        case SD_LBP_WS16:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks,
-                                         (u32)SD_MAX_WS16_BLOCKS);
+               if (sdkp->device->unmap_limit_for_ws)
+                       max_blocks = sdkp->max_unmap_blocks;
+               else
+                       max_blocks = sdkp->max_ws_blocks;
+
+               max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
                break;
 
        case SD_LBP_WS10:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks,
-                                         (u32)SD_MAX_WS10_BLOCKS);
+               if (sdkp->device->unmap_limit_for_ws)
+                       max_blocks = sdkp->max_unmap_blocks;
+               else
+                       max_blocks = sdkp->max_ws_blocks;
+
+               max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
                break;
 
        case SD_LBP_ZERO:
@@ -2915,8 +2923,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
                                sd_config_discard(sdkp, SD_LBP_WS16);
                        else if (sdkp->lbpws10)
                                sd_config_discard(sdkp, SD_LBP_WS10);
-                       else if (sdkp->lbpu && sdkp->max_unmap_blocks)
-                               sd_config_discard(sdkp, SD_LBP_UNMAP);
                        else
                                sd_config_discard(sdkp, SD_LBP_DISABLE);
                }
@@ -3101,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
                sd_read_security(sdkp, buffer);
        }
 
-       sdkp->first_scan = 0;
-
        /*
         * We now have all cache related info, determine how we deal
         * with flush requests.
@@ -3117,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
        q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
 
        /*
-        * Use the device's preferred I/O size for reads and writes
+        * Determine the device's preferred I/O size for reads and writes
         * unless the reported value is unreasonably small, large, or
         * garbage.
         */
@@ -3131,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
                rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
                                      (sector_t)BLK_DEF_MAX_SECTORS);
 
-       /* Combine with controller limits */
-       q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+       /* Do not exceed controller limit */
+       rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+       /*
+        * Only update max_sectors if previously unset or if the current value
+        * exceeds the capabilities of the hardware.
+        */
+       if (sdkp->first_scan ||
+           q->limits.max_sectors > q->limits.max_dev_sectors ||
+           q->limits.max_sectors > q->limits.max_hw_sectors)
+               q->limits.max_sectors = rw_max;
+
+       sdkp->first_scan = 0;
 
        set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
        sd_config_write_same(sdkp);
index cf0e71db9e5146068294ab46fb1db1fa69addbc9..aa28874e8fb92f5090d64c9ceb9523fce224eabe 100644 (file)
@@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q)
        return max_sectors << 9;
 }
 
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+       Sg_request *srp;
+       int val;
+       unsigned int ms;
+
+       val = 0;
+       list_for_each_entry(srp, &sfp->rq_list, entry) {
+               if (val >= SG_MAX_QUEUE)
+                       break;
+               rinfo[val].req_state = srp->done + 1;
+               rinfo[val].problem =
+                       srp->header.masked_status &
+                       srp->header.host_status &
+                       srp->header.driver_status;
+               if (srp->done)
+                       rinfo[val].duration =
+                               srp->header.duration;
+               else {
+                       ms = jiffies_to_msecs(jiffies);
+                       rinfo[val].duration =
+                               (ms > srp->header.duration) ?
+                               (ms - srp->header.duration) : 0;
+               }
+               rinfo[val].orphan = srp->orphan;
+               rinfo[val].sg_io_owned = srp->sg_io_owned;
+               rinfo[val].pack_id = srp->header.pack_id;
+               rinfo[val].usr_ptr = srp->header.usr_ptr;
+               val++;
+       }
+}
+
 static long
 sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 {
@@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
                        return -EFAULT;
                else {
                        sg_req_info_t *rinfo;
-                       unsigned int ms;
 
-                       rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
-                                                               GFP_KERNEL);
+                       rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+                                       GFP_KERNEL);
                        if (!rinfo)
                                return -ENOMEM;
                        read_lock_irqsave(&sfp->rq_list_lock, iflags);
-                       val = 0;
-                       list_for_each_entry(srp, &sfp->rq_list, entry) {
-                               if (val >= SG_MAX_QUEUE)
-                                       break;
-                               memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
-                               rinfo[val].req_state = srp->done + 1;
-                               rinfo[val].problem =
-                                       srp->header.masked_status &
-                                       srp->header.host_status &
-                                       srp->header.driver_status;
-                               if (srp->done)
-                                       rinfo[val].duration =
-                                               srp->header.duration;
-                               else {
-                                       ms = jiffies_to_msecs(jiffies);
-                                       rinfo[val].duration =
-                                               (ms > srp->header.duration) ?
-                                               (ms - srp->header.duration) : 0;
-                               }
-                               rinfo[val].orphan = srp->orphan;
-                               rinfo[val].sg_io_owned = srp->sg_io_owned;
-                               rinfo[val].pack_id = srp->header.pack_id;
-                               rinfo[val].usr_ptr = srp->header.usr_ptr;
-                               val++;
-                       }
+                       sg_fill_request_table(sfp, rinfo);
                        read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
                        result = __copy_to_user(p, rinfo,
                                                SZ_SG_REQ_INFO * SG_MAX_QUEUE);
index 6c7d7a460689917d577973f7bb47a1e408470210..568e1c65aa82cb77a92b7df5ff066513c3d959a8 100644 (file)
 /* A3700_SPI_IF_TIME_REG */
 #define A3700_SPI_CLK_CAPT_EDGE                BIT(7)
 
-/* Flags and macros for struct a3700_spi */
-#define A3700_INSTR_CNT                        1
-#define A3700_ADDR_CNT                 3
-#define A3700_DUMMY_CNT                        1
-
 struct a3700_spi {
        struct spi_master *master;
        void __iomem *base;
@@ -117,9 +112,6 @@ struct a3700_spi {
        u8 byte_len;
        u32 wait_mask;
        struct completion done;
-       u32 addr_cnt;
-       u32 instr_cnt;
-       size_t hdr_cnt;
 };
 
 static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
@@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
 }
 
 static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
-                                 unsigned int pin_mode)
+                                 unsigned int pin_mode, bool receiving)
 {
        u32 val;
 
@@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
                break;
        case SPI_NBITS_QUAD:
                val |= A3700_SPI_DATA_PIN1;
+               /* RX during address reception uses 4-pin */
+               if (receiving)
+                       val |= A3700_SPI_ADDR_PIN;
                break;
        default:
                dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
@@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
 
        spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
 
-       return true;
+       /* Timeout was reached */
+       return false;
 }
 
 static bool a3700_spi_transfer_wait(struct spi_device *spi,
@@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
 
 static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
 {
-       u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0;
+       unsigned int addr_cnt;
        u32 val = 0;
 
        /* Clear the header registers */
        spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
        spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
        spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+       spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
 
        /* Set header counters */
        if (a3700_spi->tx_buf) {
-               if (a3700_spi->buf_len <= a3700_spi->instr_cnt) {
-                       instr_cnt = a3700_spi->buf_len;
-               } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt +
-                                                 a3700_spi->addr_cnt)) {
-                       instr_cnt = a3700_spi->instr_cnt;
-                       addr_cnt = a3700_spi->buf_len - instr_cnt;
-               } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) {
-                       instr_cnt = a3700_spi->instr_cnt;
-                       addr_cnt = a3700_spi->addr_cnt;
-                       /* Need to handle the normal write case with 1 byte
-                        * data
-                        */
-                       if (!a3700_spi->tx_buf[instr_cnt + addr_cnt])
-                               dummy_cnt = a3700_spi->buf_len - instr_cnt -
-                                           addr_cnt;
+               /*
+                * when tx data is not 4 bytes aligned, there will be unexpected
+                * bytes out of SPI output register, since it always shifts out
+                * as whole 4 bytes. This might cause incorrect transaction with
+                * some devices. To avoid that, use SPI header count feature to
+                * transfer up to 3 bytes of data first, and then make the rest
+                * of data 4-byte aligned.
+                */
+               addr_cnt = a3700_spi->buf_len % 4;
+               if (addr_cnt) {
+                       val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+                               << A3700_SPI_ADDR_CNT_BIT;
+                       spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+                       /* Update the buffer length to be transferred */
+                       a3700_spi->buf_len -= addr_cnt;
+
+                       /* transfer 1~3 bytes through address count */
+                       val = 0;
+                       while (addr_cnt--) {
+                               val = (val << 8) | a3700_spi->tx_buf[0];
+                               a3700_spi->tx_buf++;
+                       }
+                       spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
                }
-               val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
-                       << A3700_SPI_INSTR_CNT_BIT);
-               val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
-                       << A3700_SPI_ADDR_CNT_BIT);
-               val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
-                       << A3700_SPI_DUMMY_CNT_BIT);
        }
-       spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
-
-       /* Update the buffer length to be transferred */
-       a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
-
-       /* Set Instruction */
-       val = 0;
-       while (instr_cnt--) {
-               val = (val << 8) | a3700_spi->tx_buf[0];
-               a3700_spi->tx_buf++;
-       }
-       spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
-
-       /* Set Address */
-       val = 0;
-       while (addr_cnt--) {
-               val = (val << 8) | a3700_spi->tx_buf[0];
-               a3700_spi->tx_buf++;
-       }
-       spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
 }
 
 static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
@@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
 static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
 {
        u32 val;
-       int i = 0;
 
        while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
-               val = 0;
-               if (a3700_spi->buf_len >= 4) {
-                       val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
-                       spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
-
-                       a3700_spi->buf_len -= 4;
-                       a3700_spi->tx_buf += 4;
-               } else {
-                       /*
-                        * If the remained buffer length is less than 4-bytes,
-                        * we should pad the write buffer with all ones. So that
-                        * it avoids overwrite the unexpected bytes following
-                        * the last one.
-                        */
-                       val = GENMASK(31, 0);
-                       while (a3700_spi->buf_len) {
-                               val &= ~(0xff << (8 * i));
-                               val |= *a3700_spi->tx_buf++ << (8 * i);
-                               i++;
-                               a3700_spi->buf_len--;
-
-                               spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
-                                            val);
-                       }
-                       break;
-               }
+               val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+               spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+               a3700_spi->buf_len -= 4;
+               a3700_spi->tx_buf += 4;
        }
 
        return 0;
@@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master,
        a3700_spi->rx_buf  = xfer->rx_buf;
        a3700_spi->buf_len = xfer->len;
 
-       /* SPI transfer headers */
-       a3700_spi_header_set(a3700_spi);
-
        if (xfer->tx_buf)
                nbits = xfer->tx_nbits;
        else if (xfer->rx_buf)
                nbits = xfer->rx_nbits;
 
-       a3700_spi_pin_mode_set(a3700_spi, nbits);
+       a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
+
+       /* Flush the FIFOs */
+       a3700_spi_fifo_flush(a3700_spi);
+
+       /* Transfer first bytes of data when buffer is not 4-byte aligned */
+       a3700_spi_header_set(a3700_spi);
 
        if (xfer->rx_buf) {
                /* Set read data length */
@@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
                                dev_err(&spi->dev, "wait wfifo empty timed out\n");
                                return -ETIMEDOUT;
                        }
-               } else {
-                       /*
-                        * If the instruction in SPI_INSTR does not require data
-                        * to be written to the SPI device, wait until SPI_RDY
-                        * is 1 for the SPI interface to be in idle.
-                        */
-                       if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
-                               dev_err(&spi->dev, "wait xfer ready timed out\n");
-                               return -ETIMEDOUT;
-                       }
+               }
+
+               if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+                       dev_err(&spi->dev, "wait xfer ready timed out\n");
+                       return -ETIMEDOUT;
                }
 
                val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
@@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
        memset(spi, 0, sizeof(struct a3700_spi));
 
        spi->master = master;
-       spi->instr_cnt = A3700_INSTR_CNT;
-       spi->addr_cnt = A3700_ADDR_CNT;
-       spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
-                      A3700_DUMMY_CNT;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        spi->base = devm_ioremap_resource(dev, res);
index 6ef6c44f39f5f7e591710029814ca03128455299..a172ab299e80316ee904a1c9277a36755699e4d4 100644 (file)
@@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
                        goto qspi_probe_err;
                }
        } else {
-               goto qspi_probe_err;
+               goto qspi_resource_err;
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
                qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
                if (IS_ERR(qspi->base[CHIP_SELECT])) {
                        ret = PTR_ERR(qspi->base[CHIP_SELECT]);
-                       goto qspi_probe_err;
+                       goto qspi_resource_err;
                }
        }
 
@@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
                                GFP_KERNEL);
        if (!qspi->dev_ids) {
                ret = -ENOMEM;
-               goto qspi_probe_err;
+               goto qspi_resource_err;
        }
 
        for (val = 0; val < num_irqs; val++) {
@@ -1369,8 +1369,9 @@ qspi_reg_err:
        bcm_qspi_hw_uninit(qspi);
        clk_disable_unprepare(qspi->clk);
 qspi_probe_err:
-       spi_master_put(master);
        kfree(qspi->dev_ids);
+qspi_resource_err:
+       spi_master_put(master);
        return ret;
 }
 /* probe function to be called by SoC specific platform driver probe */
index 680cdf549506143d6041736f43552f868e3bf6db..ba9743fa2326d11689384e47c84da4068c9fc521 100644 (file)
@@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
         * no need to check it there.
         * However, we need to ensure the following calculations.
         */
-       if ((div < SPI_MBR_DIV_MIN) &&
-           (div > SPI_MBR_DIV_MAX))
+       if (div < SPI_MBR_DIV_MIN ||
+           div > SPI_MBR_DIV_MAX)
                return -EINVAL;
 
        /* Determine the first power of 2 greater than or equal to div */
index 6e65524cbfd9b8f7e1ff21a4c304908531476f21..e8b5a5e21b2e692e8af31e93ee3102b0c26872d9 100644 (file)
@@ -45,7 +45,6 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/spi.h>
-#define SPI_DYN_FIRST_BUS_NUM 0
 
 static DEFINE_IDR(spi_master_idr);
 
@@ -2086,7 +2085,7 @@ int spi_register_controller(struct spi_controller *ctlr)
        struct device           *dev = ctlr->dev.parent;
        struct boardinfo        *bi;
        int                     status = -ENODEV;
-       int                     id;
+       int                     id, first_dynamic;
 
        if (!dev)
                return -ENODEV;
@@ -2116,9 +2115,15 @@ int spi_register_controller(struct spi_controller *ctlr)
                }
        }
        if (ctlr->bus_num < 0) {
+               first_dynamic = of_alias_get_highest_id("spi");
+               if (first_dynamic < 0)
+                       first_dynamic = 0;
+               else
+                       first_dynamic++;
+
                mutex_lock(&board_lock);
-               id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0,
-                              GFP_KERNEL);
+               id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
+                              0, GFP_KERNEL);
                mutex_unlock(&board_lock);
                if (WARN(id < 0, "couldn't get idr"))
                        return id;
index d11c6de9c777f979a2a59b05cd82fc8ed0c54e0b..6150d2780e22c57479c95d59afaea1cbc2b385a5 100644 (file)
@@ -223,11 +223,9 @@ static int ad7192_setup(struct ad7192_state *st,
        struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
        unsigned long long scale_uv;
        int i, ret, id;
-       u8 ones[6];
 
        /* reset the serial interface */
-       memset(&ones, 0xFF, 6);
-       ret = spi_write(st->sd.spi, &ones, 6);
+       ret = ad_sd_reset(&st->sd, 48);
        if (ret < 0)
                goto out;
        usleep_range(500, 1000); /* Wait for at least 500us */
index 1691760339da0f021db0d36e3c0d1e7dcef03323..02573c517d9d13cc8d95250e0de0f8623d381482 100644 (file)
@@ -172,7 +172,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
                                reg_address);
                goto error_ret;
        }
-       *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) |
+       *val = ((u64)st->rx[1] << 32) | ((u64)st->rx[2] << 24) |
                (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
 
 error_ret:
index d96f4512224ffb11fd13900422d6da1498ab8697..b55e5ebba8b4194df4392e259062e9b483be491d 100644 (file)
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
                                        struct media_link, list);
                ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
                if (ret)
-                       break;
+                       return ret;
        }
 
-       return ret;
+       return 0;
 }
 
 /* async subdev complete notifier */
index 13eaf16ecd16a26fcd94aeca0e522f54e4ebfd1b..87595c594b12091432f22f7135b1b61713002bda 100644 (file)
@@ -496,8 +496,12 @@ static int spinand_program_page(struct spi_device *spi_nand,
        if (!wbuf)
                return -ENOMEM;
 
-       enable_read_hw_ecc = 0;
-       spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
+       enable_read_hw_ecc = 1;
+       retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
+       if (retval < 0) {
+               dev_err(&spi_nand->dev, "ecc error on read page!!!\n");
+               return retval;
+       }
 
        for (i = offset, j = 0; i < len; i++, j++)
                wbuf[i] &= buf[j];
index c4b1b218ea38f8d197b5393492e2f690158737f6..290b419aa9dd71358c479eacaa5fbffc662af7c2 100644 (file)
@@ -570,12 +570,6 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
                dev_dbg(&spi->dev, "set: DIO mapping");
        #endif
 
-       // check DIO number
-       if (DIONumber > 5) {
-               dev_dbg(&spi->dev, "set: illegal input param");
-               return -EINVAL;
-       }
-
        switch (DIONumber) {
        case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break;
        case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break;
@@ -583,6 +577,9 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
        case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break;
        case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break;
        case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break;
+       default:
+               dev_dbg(&spi->dev, "set: illegal input param");
+               return -EINVAL;
        }
 
        // read reg
index 6b778206a1a36ffc12e040a1922126007802052a..cb8a95aabd6c218c8c73471c03398e4d526d458c 100644 (file)
@@ -119,9 +119,8 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
 
 void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
 {
-       rtw_free_mlme_priv_ie_data(pmlmepriv);
-
        if (pmlmepriv) {
+               rtw_free_mlme_priv_ie_data(pmlmepriv);
                if (pmlmepriv->free_bss_buf) {
                        vfree(pmlmepriv->free_bss_buf);
                }
index 92277457aba4efc6d77061765cf4cfaa44adce27..ce1dd6f9036fd6caff1716ef06a8838e9e136be4 100644 (file)
@@ -311,6 +311,8 @@ static ssize_t proc_set_cam(struct file *file, const char __user *buffer, size_t
 
                if (num < 2)
                        return count;
+               if (id >= TOTAL_CAM_ENTRY)
+                       return -EINVAL;
 
                if (strcmp("c", cmd) == 0) {
                        _clear_cam_entry(adapter, id);
index 5f84526cb5b5b89afb9e7a7c3753e346b5b91e4b..edbf6af1c8b70a76749e7554bff3f718a8a431ad 100644 (file)
@@ -2901,11 +2901,11 @@ halmac_update_datapack_88xx(struct halmac_adapter *halmac_adapter,
        if (halmac_adapter->fw_version.h2c_version < 4)
                return HALMAC_RET_FW_NO_SUPPORT;
 
+       driver_adapter = halmac_adapter->driver_adapter;
+
        HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
                        "[TRACE]%s ==========>\n", __func__);
 
-       driver_adapter = halmac_adapter->driver_adapter;
-
        HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
                        "[TRACE]%s <==========\n", __func__);
 
index f33024e4d853de78ae699f50a6b77b177c1a54ae..544f638ed3efb94a6639e214da9bc10f2c6aee23 100644 (file)
@@ -1618,10 +1618,11 @@ halmac_send_h2c_set_pwr_mode_88xx(struct halmac_adapter *halmac_adapter,
        void *driver_adapter = NULL;
        enum halmac_ret_status status = HALMAC_RET_SUCCESS;
 
+       driver_adapter = halmac_adapter->driver_adapter;
+
        HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
                        "%s!!\n", __func__);
 
-       driver_adapter = halmac_adapter->driver_adapter;
        h2c_header = h2c_buff;
        h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
 
@@ -1713,10 +1714,11 @@ halmac_media_status_rpt_88xx(struct halmac_adapter *halmac_adapter, u8 op_mode,
        void *driver_adapter = NULL;
        enum halmac_ret_status status = HALMAC_RET_SUCCESS;
 
+       driver_adapter = halmac_adapter->driver_adapter;
+
        HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
                        "halmac_send_h2c_set_pwr_mode_88xx!!\n");
 
-       driver_adapter = halmac_adapter->driver_adapter;
        h2c_header = H2c_buff;
        h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
 
@@ -2143,10 +2145,11 @@ halmac_func_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter,
        enum halmac_cmd_process_status *process_status =
                &halmac_adapter->halmac_state.scan_state_set.process_status;
 
+       driver_adapter = halmac_adapter->driver_adapter;
+
        HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
                        "halmac_ctrl_ch_switch!!\n");
 
-       driver_adapter = halmac_adapter->driver_adapter;
        halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
 
        if (halmac_transition_scan_state_88xx(
@@ -2276,15 +2279,13 @@ enum halmac_ret_status halmac_send_h2c_update_bcn_parse_info_88xx(
 {
        u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
        u16 h2c_seq_mum = 0;
-       void *driver_adapter = NULL;
+       void *driver_adapter = halmac_adapter->driver_adapter;
        struct halmac_h2c_header_info h2c_header_info;
        enum halmac_ret_status status = HALMAC_RET_SUCCESS;
 
        HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
                        "%s!!\n", __func__);
 
-       driver_adapter = halmac_adapter->driver_adapter;
-
        UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en);
        UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th);
        UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout);
index 67956e24779ce6fea015da416422cdf5c834bf18..56f7be6af1f695a873c9acfe6619d3ec8dafd496 100644 (file)
@@ -1376,6 +1376,8 @@ static void reset_highlight_buffers(struct vc_data *);
 
 static int read_all_key;
 
+static int in_keyboard_notifier;
+
 static void start_read_all_timer(struct vc_data *vc, int command);
 
 enum {
@@ -1408,7 +1410,10 @@ static void read_all_doc(struct vc_data *vc)
        cursor_track = read_all_mode;
        spk_reset_index_count(0);
        if (get_sentence_buf(vc, 0) == -1) {
-               kbd_fakekey2(vc, RA_DOWN_ARROW);
+               del_timer(&cursor_timer);
+               if (!in_keyboard_notifier)
+                       speakup_fake_down_arrow();
+               start_read_all_timer(vc, RA_DOWN_ARROW);
        } else {
                say_sentence_num(0, 0);
                synth_insert_next_index(0);
@@ -2212,8 +2217,10 @@ static int keyboard_notifier_call(struct notifier_block *nb,
        int ret = NOTIFY_OK;
        static int keycode;     /* to hold the current keycode */
 
+       in_keyboard_notifier = 1;
+
        if (vc->vc_mode == KD_GRAPHICS)
-               return ret;
+               goto out;
 
        /*
         * First, determine whether we are handling a fake keypress on
@@ -2225,7 +2232,7 @@ static int keyboard_notifier_call(struct notifier_block *nb,
         */
 
        if (speakup_fake_key_pressed())
-               return ret;
+               goto out;
 
        switch (code) {
        case KBD_KEYCODE:
@@ -2266,6 +2273,8 @@ static int keyboard_notifier_call(struct notifier_block *nb,
                        break;
                }
        }
+out:
+       in_keyboard_notifier = 0;
        return ret;
 }
 
index 74cce4f1a7bd7340a30ef13bece954da4f69a7c0..27ecf6fb49fd940021f6dedc25cf3a5ef849097a 100644 (file)
@@ -1826,7 +1826,7 @@ static __init int visorutil_spar_detect(void)
        return 0;
 }
 
-static int init_unisys(void)
+static int __init init_unisys(void)
 {
        int result;
 
@@ -1841,7 +1841,7 @@ static int init_unisys(void)
        return 0;
 };
 
-static void exit_unisys(void)
+static void __exit exit_unisys(void)
 {
        acpi_bus_unregister_driver(&unisys_acpi_driver);
 }
index 5f3d8f2339e34834d11edfa8de1d5819e3e32b4f..4be864dbd41c9f4eb63f03361a0dab1e13c54a67 100644 (file)
@@ -390,8 +390,7 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
                        __func__, instance);
                instance->alsa_stream = alsa_stream;
                alsa_stream->instance = instance;
-               ret = 0; // xxx todo -1;
-               goto err_free_mem;
+               return 0;
        }
 
        /* Initialize and create a VCHI connection */
@@ -401,16 +400,15 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
                        LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
                                __func__, ret);
 
-                       ret = -EIO;
-                       goto err_free_mem;
+                       return -EIO;
                }
                ret = vchi_connect(NULL, 0, vchi_instance);
                if (ret) {
                        LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
                                __func__, ret);
 
-                       ret = -EIO;
-                       goto err_free_mem;
+                       kfree(vchi_instance);
+                       return -EIO;
                }
                initted = 1;
        }
@@ -421,19 +419,16 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
        if (IS_ERR(instance)) {
                LOG_ERR("%s: failed to initialize audio service\n", __func__);
 
-               ret = PTR_ERR(instance);
-               goto err_free_mem;
+               /* vchi_instance is retained for use the next time. */
+               return PTR_ERR(instance);
        }
 
        instance->alsa_stream = alsa_stream;
        alsa_stream->instance = instance;
 
        LOG_DBG(" success !\n");
-       ret = 0;
-err_free_mem:
-       kfree(vchi_instance);
 
-       return ret;
+       return 0;
 }
 
 int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
index 0159ca4407d8a8faeba3918e260986fdc1c120c8..be08849175ea3622321136f3be66df8705e40772 100644 (file)
@@ -612,18 +612,20 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
                        if (head_bytes > actual)
                                head_bytes = actual;
 
-                       memcpy((char *)page_address(pages[0]) +
+                       memcpy((char *)kmap(pages[0]) +
                                pagelist->offset,
                                fragments,
                                head_bytes);
+                       kunmap(pages[0]);
                }
                if ((actual >= 0) && (head_bytes < actual) &&
                        (tail_bytes != 0)) {
-                       memcpy((char *)page_address(pages[num_pages - 1]) +
+                       memcpy((char *)kmap(pages[num_pages - 1]) +
                                ((pagelist->offset + actual) &
                                (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
                                fragments + g_cache_line_size,
                                tail_bytes);
+                       kunmap(pages[num_pages - 1]);
                }
 
                down(&g_free_fragments_mutex);
index 1c0c9553bc050ecc8b6ad3a5dddab8150398a842..7dd38047ba2352d5dde2845170ecfc3e149ec27d 100644 (file)
@@ -246,11 +246,11 @@ struct mxser_port {
        unsigned char err_shadow;
 
        struct async_icount icount; /* kernel counters for 4 input interrupts */
-       int timeout;
+       unsigned int timeout;
 
        int read_status_mask;
        int ignore_status_mask;
-       int xmit_fifo_size;
+       unsigned int xmit_fifo_size;
        int xmit_head;
        int xmit_tail;
        int xmit_cnt;
@@ -572,8 +572,9 @@ static void mxser_dtr_rts(struct tty_port *port, int on)
 static int mxser_set_baud(struct tty_struct *tty, long newspd)
 {
        struct mxser_port *info = tty->driver_data;
-       int quot = 0, baud;
+       unsigned int quot = 0, baud;
        unsigned char cval;
+       u64 timeout;
 
        if (!info->ioaddr)
                return -1;
@@ -594,8 +595,13 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
                quot = 0;
        }
 
-       info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
-       info->timeout += HZ / 50;       /* Add .02 seconds of slop */
+       /*
+        * worst case (128 * 1000 * 10 * 18432) needs 35 bits, so divide in the
+        * u64 domain
+        */
+       timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot;
+       do_div(timeout, info->baud_base);
+       info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */
 
        if (quot) {
                info->MCR |= UART_MCR_DTR;
index 583c9a0c7eccd439b3c457c6c9c767976c49deda..8c48c3784831b3605158ef38b15f17d1dae65b93 100644 (file)
@@ -507,9 +507,14 @@ static void bcm_uart_set_termios(struct uart_port *port,
 {
        unsigned int ctl, baud, quot, ier;
        unsigned long flags;
+       int tries;
 
        spin_lock_irqsave(&port->lock, flags);
 
+       /* Drain the hot tub fully before we power it off for the winter. */
+       for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
+               mdelay(10);
+
        /* disable uart while changing speed */
        bcm_uart_disable(port);
        bcm_uart_flush(port);
index 849c1f9991cec8e0102719a90fe3325e90f2d72f..f0252184291ed4a3ab49ab03ce634e7be8588bf4 100644 (file)
@@ -1276,7 +1276,6 @@ static void rx_dma_timer_init(struct lpuart_port *sport)
 static int lpuart_startup(struct uart_port *port)
 {
        struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
-       int ret;
        unsigned long flags;
        unsigned char temp;
 
@@ -1291,11 +1290,6 @@ static int lpuart_startup(struct uart_port *port)
        sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
                UARTPFIFO_FIFOSIZE_MASK) + 1);
 
-       ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
-                               DRIVER_NAME, sport);
-       if (ret)
-               return ret;
-
        spin_lock_irqsave(&sport->port.lock, flags);
 
        lpuart_setup_watermark(sport);
@@ -1333,7 +1327,6 @@ static int lpuart_startup(struct uart_port *port)
 static int lpuart32_startup(struct uart_port *port)
 {
        struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
-       int ret;
        unsigned long flags;
        unsigned long temp;
 
@@ -1346,11 +1339,6 @@ static int lpuart32_startup(struct uart_port *port)
        sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) &
                UARTFIFO_FIFOSIZE_MASK) - 1);
 
-       ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0,
-                               DRIVER_NAME, sport);
-       if (ret)
-               return ret;
-
        spin_lock_irqsave(&sport->port.lock, flags);
 
        lpuart32_setup_watermark(sport);
@@ -1380,8 +1368,6 @@ static void lpuart_shutdown(struct uart_port *port)
 
        spin_unlock_irqrestore(&port->lock, flags);
 
-       devm_free_irq(port->dev, port->irq, sport);
-
        if (sport->lpuart_dma_rx_use) {
                del_timer_sync(&sport->lpuart_timer);
                lpuart_dma_rx_free(&sport->port);
@@ -1400,7 +1386,6 @@ static void lpuart_shutdown(struct uart_port *port)
 
 static void lpuart32_shutdown(struct uart_port *port)
 {
-       struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
        unsigned long temp;
        unsigned long flags;
 
@@ -1413,8 +1398,6 @@ static void lpuart32_shutdown(struct uart_port *port)
        lpuart32_write(port, temp, UARTCTRL);
 
        spin_unlock_irqrestore(&port->lock, flags);
-
-       devm_free_irq(port->dev, port->irq, sport);
 }
 
 static void
@@ -2212,16 +2195,22 @@ static int lpuart_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, &sport->port);
 
-       if (lpuart_is_32(sport))
+       if (lpuart_is_32(sport)) {
                lpuart_reg.cons = LPUART32_CONSOLE;
-       else
+               ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
+                                       DRIVER_NAME, sport);
+       } else {
                lpuart_reg.cons = LPUART_CONSOLE;
+               ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
+                                       DRIVER_NAME, sport);
+       }
+
+       if (ret)
+               goto failed_irq_request;
 
        ret = uart_add_one_port(&lpuart_reg, &sport->port);
-       if (ret) {
-               clk_disable_unprepare(sport->clk);
-               return ret;
-       }
+       if (ret)
+               goto failed_attach_port;
 
        sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
        if (!sport->dma_tx_chan)
@@ -2240,6 +2229,11 @@ static int lpuart_probe(struct platform_device *pdev)
        }
 
        return 0;
+
+failed_attach_port:
+failed_irq_request:
+       clk_disable_unprepare(sport->clk);
+       return ret;
 }
 
 static int lpuart_remove(struct platform_device *pdev)
index cdd2f942317c59fe4ec04c022a004d871f551fde..b9c7a904c1eaf7716d2137d5bcded2a8c805c17d 100644 (file)
@@ -889,7 +889,16 @@ static int sccnxp_probe(struct platform_device *pdev)
                        goto err_out;
                uartclk = 0;
        } else {
-               clk_prepare_enable(clk);
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       goto err_out;
+
+               ret = devm_add_action_or_reset(&pdev->dev,
+                               (void(*)(void *))clk_disable_unprepare,
+                               clk);
+               if (ret)
+                       goto err_out;
+
                uartclk = clk_get_rate(clk);
        }
 
@@ -988,7 +997,7 @@ static int sccnxp_probe(struct platform_device *pdev)
        uart_unregister_driver(&s->uart);
 err_out:
        if (!IS_ERR(s->regulator))
-               return regulator_disable(s->regulator);
+               regulator_disable(s->regulator);
 
        return ret;
 }
index 2fe216b276e29ee54d6dd804555f27668407875a..84a8ac2a779f6ba7d4faf9df4d132421ac440c0a 100644 (file)
@@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
        tty_set_termios_ldisc(tty, disc);
        retval = tty_ldisc_open(tty, tty->ldisc);
        if (retval) {
-               if (!WARN_ON(disc == N_TTY)) {
-                       tty_ldisc_put(tty->ldisc);
-                       tty->ldisc = NULL;
-               }
+               tty_ldisc_put(tty->ldisc);
+               tty->ldisc = NULL;
        }
        return retval;
 }
@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
 
        if (tty->ldisc) {
                if (reinit) {
-                       if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0)
-                               tty_ldisc_reinit(tty, N_TTY);
+                       if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
+                           tty_ldisc_reinit(tty, N_TTY) < 0)
+                               WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
                } else
                        tty_ldisc_kill(tty);
        }
index 5e056064259c85432ebe9f3d9eb12616e2aa1271..18c923a4c16e1f41c1893c6b5e7a79a3ed5df6c7 100644 (file)
@@ -1832,6 +1832,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
        .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
        },
+       { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
+       .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
+       },
 
        { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
        .driver_info = CLEAR_HALT_CONDITIONS,
index 5aacea1978a5f38b5a7663ef7bcd71f4b1c311d3..3e865dbf878c74b2e904925798cd603e5cd0e979 100644 (file)
@@ -190,8 +190,10 @@ static void wdm_in_callback(struct urb *urb)
        /*
         * only set a new error if there is no previous error.
         * Errors are only cleared during read/open
+        * Avoid propagating -EPIPE (stall) to userspace since it is
+        * better handled as an empty read
         */
-       if (desc->rerr  == 0)
+       if (desc->rerr == 0 && status != -EPIPE)
                desc->rerr = status;
 
        if (length + desc->length > desc->wMaxCommand) {
index 4be52c602e9b7a7de6a76ecb4be686cb12ec9950..883549ee946cb5e206623bfc79d199620078a6bb 100644 (file)
@@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
 
                } else if (header->bDescriptorType ==
                                USB_DT_INTERFACE_ASSOCIATION) {
+                       struct usb_interface_assoc_descriptor *d;
+
+                       d = (struct usb_interface_assoc_descriptor *)header;
+                       if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
+                               dev_warn(ddev,
+                                        "config %d has an invalid interface association descriptor of length %d, skipping\n",
+                                        cfgno, d->bLength);
+                               continue;
+                       }
+
                        if (iad_num == USB_MAXIADS) {
                                dev_warn(ddev, "found more Interface "
                                               "Association Descriptors "
                                               "than allocated for in "
                                               "configuration %d\n", cfgno);
                        } else {
-                               config->intf_assoc[iad_num] =
-                                       (struct usb_interface_assoc_descriptor
-                                       *)header;
+                               config->intf_assoc[iad_num] = d;
                                iad_num++;
                        }
 
@@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev)
                }
 
                if (dev->quirks & USB_QUIRK_DELAY_INIT)
-                       msleep(100);
+                       msleep(200);
 
                result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
                    bigbuffer, length);
@@ -952,10 +960,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
        for (i = 0; i < num; i++) {
                buffer += length;
                cap = (struct usb_dev_cap_header *)buffer;
-               length = cap->bLength;
 
-               if (total_len < length)
+               if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+                       dev->bos->desc->bNumDeviceCaps = i;
                        break;
+               }
+               length = cap->bLength;
                total_len -= length;
 
                if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
index 318bb3b966879644644126062fb4c33b1392a6ef..e9326f31db8d4367cb80e813954573c983a49a08 100644 (file)
@@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644);
 MODULE_PARM_DESC(usbfs_memory_mb,
                "maximum MB allowed for usbfs buffers (0 = no limit)");
 
+/* Hard limit, necessary to avoid arithmetic overflow */
+#define USBFS_XFER_MAX         (UINT_MAX / 2 - 1000000)
+
 static atomic64_t usbfs_memory_usage;  /* Total memory currently allocated */
 
 /* Check whether it's okay to allocate more memory for a transfer */
@@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                                USBDEVFS_URB_ZERO_PACKET |
                                USBDEVFS_URB_NO_INTERRUPT))
                return -EINVAL;
+       if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
+               return -EINVAL;
        if (uurb->buffer_length > 0 && !uurb->buffer)
                return -EINVAL;
        if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
index 41eaf0b5251800c570736baf4775c35072e4d9fc..e9ce6bb0b22d15587efaff059b3c5c2c29681430 100644 (file)
@@ -2710,13 +2710,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
        if (!(portstatus & USB_PORT_STAT_CONNECTION))
                return -ENOTCONN;
 
-       /* bomb out completely if the connection bounced.  A USB 3.0
-        * connection may bounce if multiple warm resets were issued,
+       /* Retry if connect change is set but status is still connected.
+        * A USB 3.0 connection may bounce if multiple warm resets were issued,
         * but the device may have successfully re-connected. Ignore it.
         */
        if (!hub_is_superspeed(hub->hdev) &&
-                       (portchange & USB_PORT_STAT_C_CONNECTION))
-               return -ENOTCONN;
+           (portchange & USB_PORT_STAT_C_CONNECTION)) {
+               usb_clear_port_feature(hub->hdev, port1,
+                                      USB_PORT_FEAT_C_CONNECTION);
+               return -EAGAIN;
+       }
 
        if (!(portstatus & USB_PORT_STAT_ENABLE))
                return -EBUSY;
@@ -4838,7 +4841,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                        goto loop;
 
                if (udev->quirks & USB_QUIRK_DELAY_INIT)
-                       msleep(1000);
+                       msleep(2000);
 
                /* consecutive bus-powered hubs aren't reliable; they can
                 * violate the voltage drop budget.  if the new child has
index 4c38ea41ae969e6fac8ced209a655957322ba36f..371a07d874a370bc70ad6ef1499debac8b34b6fe 100644 (file)
@@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
                        elength = 1;
                        goto next_desc;
                }
+               if ((buflen < elength) || (elength < 3)) {
+                       dev_err(&intf->dev, "invalid descriptor buffer length\n");
+                       break;
+               }
                if (buffer[1] != USB_DT_CS_INTERFACE) {
                        dev_err(&intf->dev, "skipping garbage\n");
                        goto next_desc;
index 82806e3112028f335a90ec34c0283a02c3f87e9b..a6aaf2f193a464450c96fd30f9acf9f1503eecb1 100644 (file)
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Corsair Strafe RGB */
        { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
 
+       /* MIDI keyboard WORLDE MINI */
+       { USB_DEVICE(0x1c75, 0x0204), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* Acer C120 LED Projector */
        { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
 
index 4cef7d4f9cd060736ee53df87d2ca7b5dd931632..a26d1fde0f5e4b6f3613387da40aaca00f937477 100644 (file)
@@ -177,6 +177,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
        { .compatible = "rockchip,rk3399-dwc3" },
        { .compatible = "xlnx,zynqmp-dwc3" },
        { .compatible = "cavium,octeon-7130-usb-uctl" },
+       { .compatible = "sprd,sc9860-dwc3" },
        { /* Sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
index 827e376bfa97106f1ccbeefcad40bc2a05357c8b..75e6cb044eb2b96d173a2c716bb29f5f4fd7f703 100644 (file)
@@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                                         DWC3_TRBCTL_CONTROL_DATA,
                                         true);
 
+               req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+
                /* Now prepare one extra TRB to align transfer size */
                dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
                                         maxpacket - rem,
@@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                                         DWC3_TRBCTL_CONTROL_DATA,
                                         true);
 
+               req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+
                /* Now prepare one extra TRB to align transfer size */
                dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
                                         0, DWC3_TRBCTL_CONTROL_DATA,
@@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                dwc3_ep0_prepare_one_trb(dep, req->request.dma,
                                req->request.length, DWC3_TRBCTL_CONTROL_DATA,
                                false);
+
+               req->trb = &dwc->ep0_trb[dep->trb_enqueue];
+
                ret = dwc3_ep0_start_trans(dep);
        }
 
index dd74c99d6ce15f6a961d095f82c4a29bb560dc50..5d061b3d8224aaa0b0a88c5c700454d8256b25bc 100644 (file)
@@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended);
 static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
 {
        struct usb_composite_dev        *cdev = get_gadget_data(gadget);
+       struct usb_gadget_strings       *gstr = cdev->driver->strings[0];
+       struct usb_string               *dev_str = gstr->strings;
 
        /* composite_disconnect() must already have been called
         * by the underlying peripheral controller driver!
@@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
 
        composite_dev_cleanup(cdev);
 
+       if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
+               dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
+
        kfree(cdev->def_manufacturer);
        kfree(cdev);
        set_gadget_data(gadget, NULL);
index a22a892de7b7e0ae3a1d816aa943bcac053880a4..aeb9f3c4052157e9a99d9c5dceef026970ea76f3 100644 (file)
@@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = {
        NULL
 };
 
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
-                                  int n_interf,
-                                  struct usb_os_desc **desc,
-                                  char **names,
-                                  struct module *owner)
+struct config_group *usb_os_desc_prepare_interf_dir(
+               struct config_group *parent,
+               int n_interf,
+               struct usb_os_desc **desc,
+               char **names,
+               struct module *owner)
 {
        struct config_group *os_desc_group;
        struct config_item_type *os_desc_type, *interface_type;
@@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
 
        char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
        if (!vlabuf)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
        os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
@@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
                configfs_add_default_group(&d->group, os_desc_group);
        }
 
-       return 0;
+       return os_desc_group;
 }
 EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
 
index 36c468c4f5e90be54e7356d48dc2037476a62994..540d5e92ed2259149f736f4bd60e154ccff6bb37 100644 (file)
@@ -5,11 +5,12 @@
 
 void unregister_gadget_item(struct config_item *item);
 
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
-                                  int n_interf,
-                                  struct usb_os_desc **desc,
-                                  char **names,
-                                  struct module *owner);
+struct config_group *usb_os_desc_prepare_interf_dir(
+               struct config_group *parent,
+               int n_interf,
+               struct usb_os_desc **desc,
+               char **names,
+               struct module *owner);
 
 static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
 {
index 9990944a724584c57603a489f411a81d17aef99b..8b342587f8ad6eb3d79d25d602b741592ec911ab 100644 (file)
@@ -46,7 +46,8 @@
 static void ffs_data_get(struct ffs_data *ffs);
 static void ffs_data_put(struct ffs_data *ffs);
 /* Creates new ffs_data object. */
-static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
+static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
+       __attribute__((malloc));
 
 /* Opened counter handling. */
 static void ffs_data_opened(struct ffs_data *ffs);
@@ -780,11 +781,12 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
                                         struct usb_request *req)
 {
        struct ffs_io_data *io_data = req->context;
+       struct ffs_data *ffs = io_data->ffs;
 
        ENTER();
 
        INIT_WORK(&io_data->work, ffs_user_copy_worker);
-       schedule_work(&io_data->work);
+       queue_work(ffs->io_completion_wq, &io_data->work);
 }
 
 static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
@@ -1500,7 +1502,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
        if (unlikely(ret < 0))
                return ERR_PTR(ret);
 
-       ffs = ffs_data_new();
+       ffs = ffs_data_new(dev_name);
        if (unlikely(!ffs))
                return ERR_PTR(-ENOMEM);
        ffs->file_perms = data.perms;
@@ -1610,6 +1612,7 @@ static void ffs_data_put(struct ffs_data *ffs)
                BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
                       waitqueue_active(&ffs->ep0req_completion.wait) ||
                       waitqueue_active(&ffs->wait));
+               destroy_workqueue(ffs->io_completion_wq);
                kfree(ffs->dev_name);
                kfree(ffs);
        }
@@ -1642,7 +1645,7 @@ static void ffs_data_closed(struct ffs_data *ffs)
        ffs_data_put(ffs);
 }
 
-static struct ffs_data *ffs_data_new(void)
+static struct ffs_data *ffs_data_new(const char *dev_name)
 {
        struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
        if (unlikely(!ffs))
@@ -1650,6 +1653,12 @@ static struct ffs_data *ffs_data_new(void)
 
        ENTER();
 
+       ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
+       if (!ffs->io_completion_wq) {
+               kfree(ffs);
+               return NULL;
+       }
+
        refcount_set(&ffs->ref, 1);
        atomic_set(&ffs->opened, 0);
        ffs->state = FFS_READ_DESCRIPTORS;
index d6bd0244b008aa93f91e63049fe96fbc0b9eccfe..5153e29870c3917fe4cdefc1e4c920536cdd4bf4 100644 (file)
@@ -307,8 +307,6 @@ struct fsg_common {
        struct completion       thread_notifier;
        struct task_struct      *thread_task;
 
-       /* Callback functions. */
-       const struct fsg_operations     *ops;
        /* Gadget's private data. */
        void                    *private_data;
 
@@ -2438,6 +2436,7 @@ static void handle_exception(struct fsg_common *common)
 static int fsg_main_thread(void *common_)
 {
        struct fsg_common       *common = common_;
+       int                     i;
 
        /*
         * Allow the thread to be killed by a signal, but set the signal mask
@@ -2476,21 +2475,16 @@ static int fsg_main_thread(void *common_)
        common->thread_task = NULL;
        spin_unlock_irq(&common->lock);
 
-       if (!common->ops || !common->ops->thread_exits
-        || common->ops->thread_exits(common) < 0) {
-               int i;
+       /* Eject media from all LUNs */
 
-               down_write(&common->filesem);
-               for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
-                       struct fsg_lun *curlun = common->luns[i];
-                       if (!curlun || !fsg_lun_is_open(curlun))
-                               continue;
+       down_write(&common->filesem);
+       for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
+               struct fsg_lun *curlun = common->luns[i];
 
+               if (curlun && fsg_lun_is_open(curlun))
                        fsg_lun_close(curlun);
-                       curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
-               }
-               up_write(&common->filesem);
        }
+       up_write(&common->filesem);
 
        /* Let fsg_unbind() know the thread has exited */
        complete_and_exit(&common->thread_notifier, 0);
@@ -2681,13 +2675,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
 }
 EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
 
-void fsg_common_set_ops(struct fsg_common *common,
-                       const struct fsg_operations *ops)
-{
-       common->ops = ops;
-}
-EXPORT_SYMBOL_GPL(fsg_common_set_ops);
-
 void fsg_common_free_buffers(struct fsg_common *common)
 {
        _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
index d3902313b8ac44dd2a53ba16b346b0d585c3f2e1..dc05ca0c435969037811c75ac2b0e094cb66c155 100644 (file)
@@ -60,17 +60,6 @@ struct fsg_module_parameters {
 struct fsg_common;
 
 /* FSF callback functions */
-struct fsg_operations {
-       /*
-        * Callback function to call when thread exits.  If no
-        * callback is set or it returns value lower then zero MSF
-        * will force eject all LUNs it operates on (including those
-        * marked as non-removable or with prevent_medium_removal flag
-        * set).
-        */
-       int (*thread_exits)(struct fsg_common *common);
-};
-
 struct fsg_lun_opts {
        struct config_group group;
        struct fsg_lun *lun;
@@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
 
 void fsg_common_remove_luns(struct fsg_common *common);
 
-void fsg_common_set_ops(struct fsg_common *common,
-                       const struct fsg_operations *ops);
-
 int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
                          unsigned int id, const char *name,
                          const char **name_pfx);
index 8df244fc9d80daa4c7caf0da729bae2ac66137a2..ea0da35a44e2e9f3f2c0e11921568da68b70fafd 100644 (file)
@@ -555,6 +555,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        size_t                  size;   /* Amount of data in a TX request. */
        size_t                  bytes_copied = 0;
        struct usb_request      *req;
+       int                     value;
 
        DBG(dev, "printer_write trying to send %d bytes\n", (int)len);
 
@@ -634,7 +635,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                        return -EAGAIN;
                }
 
-               if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) {
+               /* here, we unlock, and only unlock, to avoid deadlock. */
+               spin_unlock(&dev->lock);
+               value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
+               spin_lock(&dev->lock);
+               if (value) {
                        list_add(&req->list, &dev->tx_reqs);
                        spin_unlock_irqrestore(&dev->lock, flags);
                        mutex_unlock(&dev->lock_printer_io);
index e1d5853ef1e429743637ab546b229a02ee49245c..c7c5b3ce1d98840d8d4e9122d60fd51ae75b330d 100644 (file)
@@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f)
                        free_netdev(opts->net);
        }
 
+       kfree(opts->rndis_interf_group);        /* single VLA chunk */
        kfree(opts);
 }
 
@@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
        struct f_rndis_opts *opts;
        struct usb_os_desc *descs[1];
        char *names[1];
+       struct config_group *rndis_interf_group;
 
        opts = kzalloc(sizeof(*opts), GFP_KERNEL);
        if (!opts)
@@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void)
        names[0] = "rndis";
        config_group_init_type_name(&opts->func_inst.group, "",
                                    &rndis_func_type);
-       usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
-                                      names, THIS_MODULE);
+       rndis_interf_group =
+               usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+                                              names, THIS_MODULE);
+       if (IS_ERR(rndis_interf_group)) {
+               rndis_free_inst(&opts->func_inst);
+               return ERR_CAST(rndis_interf_group);
+       }
+       opts->rndis_interf_group = rndis_interf_group;
 
        return &opts->func_inst;
 }
index 540f1c48c1a8d1a8bf058609bc5f455e3cf1dc57..79f70ebf85dc351c77d25e3315619cce280ed544 100644 (file)
@@ -279,6 +279,7 @@ struct ffs_data {
        }                               file_perms;
 
        struct eventfd_ctx *ffs_eventfd;
+       struct workqueue_struct *io_completion_wq;
        bool no_disconnect;
        struct work_struct reset_work;
 
index a35ee3c2545d9591aa4ef6b6b947cbedbe3f0093..efdb7ac381d973c6cca8f4665ea4ab9ba0083294 100644 (file)
@@ -26,6 +26,7 @@ struct f_rndis_opts {
        bool                            bound;
        bool                            borrowed_net;
 
+       struct config_group             *rndis_interf_group;
        struct usb_os_desc              rndis_os_desc;
        char                            rndis_ext_compat_id[16];
 
index 684900fcfe24c3c5ab206568f24da857434a654f..5c28bee327e15440210eb33cfb427f7a1fd659b3 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/aio.h>
 #include <linux/uio.h>
 #include <linux/refcount.h>
-
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/moduleparam.h>
 
@@ -116,6 +116,7 @@ enum ep0_state {
 struct dev_data {
        spinlock_t                      lock;
        refcount_t                      count;
+       int                             udc_usage;
        enum ep0_state                  state;          /* P: lock */
        struct usb_gadgetfs_event       event [N_EVENT];
        unsigned                        ev_next;
@@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
                INIT_WORK(&priv->work, ep_user_copy_worker);
                schedule_work(&priv->work);
        }
-       spin_unlock(&epdata->dev->lock);
 
        usb_ep_free_request(ep, req);
+       spin_unlock(&epdata->dev->lock);
        put_ep(epdata);
 }
 
@@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
                        struct usb_request      *req = dev->req;
 
                        if ((retval = setup_req (ep, req, 0)) == 0) {
+                               ++dev->udc_usage;
                                spin_unlock_irq (&dev->lock);
                                retval = usb_ep_queue (ep, req, GFP_KERNEL);
                                spin_lock_irq (&dev->lock);
+                               --dev->udc_usage;
                        }
                        dev->state = STATE_DEV_CONNECTED;
 
@@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
                                retval = -EIO;
                        else {
                                len = min (len, (size_t)dev->req->actual);
-// FIXME don't call this with the spinlock held ...
+                               ++dev->udc_usage;
+                               spin_unlock_irq(&dev->lock);
                                if (copy_to_user (buf, dev->req->buf, len))
                                        retval = -EFAULT;
                                else
                                        retval = len;
+                               spin_lock_irq(&dev->lock);
+                               --dev->udc_usage;
                                clean_req (dev->gadget->ep0, dev->req);
                                /* NOTE userspace can't yet choose to stall */
                        }
@@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                        retval = setup_req (dev->gadget->ep0, dev->req, len);
                        if (retval == 0) {
                                dev->state = STATE_DEV_CONNECTED;
+                               ++dev->udc_usage;
                                spin_unlock_irq (&dev->lock);
                                if (copy_from_user (dev->req->buf, buf, len))
                                        retval = -EFAULT;
@@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                                                GFP_KERNEL);
                                }
                                spin_lock_irq(&dev->lock);
+                               --dev->udc_usage;
                                if (retval < 0) {
                                        clean_req (dev->gadget->ep0, dev->req);
                                } else
@@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
        struct usb_gadget       *gadget = dev->gadget;
        long ret = -ENOTTY;
 
-       if (gadget->ops->ioctl)
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_DEV_OPENED ||
+                       dev->state == STATE_DEV_UNBOUND) {
+               /* Not bound to a UDC */
+       } else if (gadget->ops->ioctl) {
+               ++dev->udc_usage;
+               spin_unlock_irq(&dev->lock);
+
                ret = gadget->ops->ioctl (gadget, code, value);
 
+               spin_lock_irq(&dev->lock);
+               --dev->udc_usage;
+       }
+       spin_unlock_irq(&dev->lock);
+
        return ret;
 }
 
@@ -1463,10 +1483,12 @@ delegate:
                                if (value < 0)
                                        break;
 
+                               ++dev->udc_usage;
                                spin_unlock (&dev->lock);
                                value = usb_ep_queue (gadget->ep0, dev->req,
                                                        GFP_KERNEL);
                                spin_lock (&dev->lock);
+                               --dev->udc_usage;
                                if (value < 0) {
                                        clean_req (gadget->ep0, dev->req);
                                        break;
@@ -1490,8 +1512,12 @@ delegate:
                req->length = value;
                req->zero = value < w_length;
 
+               ++dev->udc_usage;
                spin_unlock (&dev->lock);
                value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
+               spin_lock(&dev->lock);
+               --dev->udc_usage;
+               spin_unlock(&dev->lock);
                if (value < 0) {
                        DBG (dev, "ep_queue --> %d\n", value);
                        req->status = 0;
@@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev)
                /* break link to FS */
                ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
                list_del_init (&ep->epfiles);
+               spin_unlock_irq (&dev->lock);
+
                dentry = ep->dentry;
                ep->dentry = NULL;
                parent = d_inode(dentry->d_parent);
 
                /* break link to controller */
+               mutex_lock(&ep->lock);
                if (ep->state == STATE_EP_ENABLED)
                        (void) usb_ep_disable (ep->ep);
                ep->state = STATE_EP_UNBOUND;
                usb_ep_free_request (ep->ep, ep->req);
                ep->ep = NULL;
+               mutex_unlock(&ep->lock);
+
                wake_up (&ep->wait);
                put_ep (ep);
 
-               spin_unlock_irq (&dev->lock);
-
                /* break link to dcache */
                inode_lock(parent);
                d_delete (dentry);
@@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
 
        spin_lock_irq (&dev->lock);
        dev->state = STATE_DEV_UNBOUND;
+       while (dev->udc_usage > 0) {
+               spin_unlock_irq(&dev->lock);
+               usleep_range(1000, 2000);
+               spin_lock_irq(&dev->lock);
+       }
        spin_unlock_irq (&dev->lock);
 
        destroy_ep_files (dev);
index e99ab57ee3e589a5df5ed8e0c4063ff6861158fb..fcba59782f265fc216d44d4b0394f79f08b320a7 100644 (file)
@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
 
 FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
 
-static unsigned long msg_registered;
-static void msg_cleanup(void);
-
-static int msg_thread_exits(struct fsg_common *common)
-{
-       msg_cleanup();
-       return 0;
-}
-
 static int msg_do_config(struct usb_configuration *c)
 {
        struct fsg_opts *opts;
@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
 
 static int msg_bind(struct usb_composite_dev *cdev)
 {
-       static const struct fsg_operations ops = {
-               .thread_exits = msg_thread_exits,
-       };
        struct fsg_opts *opts;
        struct fsg_config config;
        int status;
@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
        if (status)
                goto fail;
 
-       fsg_common_set_ops(opts->common, &ops);
-
        status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
        if (status)
                goto fail_set_cdev;
@@ -256,18 +242,12 @@ MODULE_LICENSE("GPL");
 
 static int __init msg_init(void)
 {
-       int ret;
-
-       ret = usb_composite_probe(&msg_driver);
-       set_bit(0, &msg_registered);
-
-       return ret;
+       return usb_composite_probe(&msg_driver);
 }
 module_init(msg_init);
 
-static void msg_cleanup(void)
+static void __exit msg_cleanup(void)
 {
-       if (test_and_clear_bit(0, &msg_registered))
-               usb_composite_unregister(&msg_driver);
+       usb_composite_unregister(&msg_driver);
 }
 module_exit(msg_cleanup);
index 7cd5c969fcbe9724ec0892403f1a5bd1c7d01320..1e9567091d86073f6e77f85021531de142c055a2 100644 (file)
@@ -273,6 +273,7 @@ config USB_SNP_CORE
 config USB_SNP_UDC_PLAT
        tristate "Synopsys USB 2.0 Device controller"
        depends on USB_GADGET && OF && HAS_DMA
+       depends on EXTCON || EXTCON=n
        select USB_GADGET_DUALSPEED
        select USB_SNP_CORE
        default ARCH_BCM_IPROC
index 98d71400f8a134442232196d47380fefb3b222fd..a884c022df7a5455e1f0c2746dd933afc55f65e3 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/of_gpio.h>
 
 #include "atmel_usba_udc.h"
+#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
+                          | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
 
 #ifdef CONFIG_USB_GADGET_DEBUG_FS
 #include <linux/debugfs.h>
@@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev)
                                        IRQ_NOAUTOEN);
                        ret = devm_request_threaded_irq(&pdev->dev,
                                        gpio_to_irq(udc->vbus_pin), NULL,
-                                       usba_vbus_irq_thread, IRQF_ONESHOT,
+                                       usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
                                        "atmel_usba_udc", udc);
                        if (ret) {
                                udc->vbus_pin = -ENODEV;
index 75c51ca4ee0f33cc419d36a3f928da093a30179d..d41d07aae0cec07294cd9917d8ccdbd57f1f29ca 100644 (file)
@@ -1320,8 +1320,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
        udc->dev.driver = &driver->driver;
        udc->gadget->dev.driver = &driver->driver;
 
-       if (driver->max_speed < udc->gadget->max_speed)
-               usb_gadget_udc_set_speed(udc, driver->max_speed);
+       usb_gadget_udc_set_speed(udc, driver->max_speed);
 
        ret = driver->bind(udc->gadget, driver);
        if (ret)
index a030d7923d7db4b4776f04eea94ca484c4e623d0..f04e91ef9e7c1c3fa5320659eb52dd20ccc1883d 100644 (file)
@@ -237,6 +237,8 @@ struct dummy_hcd {
 
        struct usb_device               *udev;
        struct list_head                urbp_list;
+       struct urbp                     *next_frame_urbp;
+
        u32                             stream_en_ep;
        u8                              num_stream[30 / 2];
 
@@ -253,11 +255,13 @@ struct dummy {
         */
        struct dummy_ep                 ep[DUMMY_ENDPOINTS];
        int                             address;
+       int                             callback_usage;
        struct usb_gadget               gadget;
        struct usb_gadget_driver        *driver;
        struct dummy_request            fifo_req;
        u8                              fifo_buf[FIFO_SIZE];
        u16                             devstatus;
+       unsigned                        ints_enabled:1;
        unsigned                        udc_suspended:1;
        unsigned                        pullup:1;
 
@@ -375,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
                             USB_PORT_STAT_CONNECTION) == 0)
                                dum_hcd->port_status |=
                                        (USB_PORT_STAT_C_CONNECTION << 16);
-                       if ((dum_hcd->port_status &
-                            USB_PORT_STAT_ENABLE) == 1 &&
-                               (dum_hcd->port_status &
-                                USB_SS_PORT_LS_U0) == 1 &&
-                               dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+                       if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
+                           (dum_hcd->port_status &
+                            USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
+                           dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
                                dum_hcd->active = 1;
                }
        } else {
@@ -416,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
 static void set_link_state(struct dummy_hcd *dum_hcd)
 {
        struct dummy *dum = dum_hcd->dum;
+       unsigned int power_bit;
 
        dum_hcd->active = 0;
        if (dum->pullup)
@@ -426,32 +430,43 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
                        return;
 
        set_link_state_by_speed(dum_hcd);
+       power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
+                       USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
 
        if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
             dum_hcd->active)
                dum_hcd->resuming = 0;
 
        /* Currently !connected or in reset */
-       if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+       if ((dum_hcd->port_status & power_bit) == 0 ||
                        (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
-               unsigned disconnect = USB_PORT_STAT_CONNECTION &
+               unsigned int disconnect = power_bit &
                                dum_hcd->old_status & (~dum_hcd->port_status);
-               unsigned reset = USB_PORT_STAT_RESET &
+               unsigned int reset = USB_PORT_STAT_RESET &
                                (~dum_hcd->old_status) & dum_hcd->port_status;
 
                /* Report reset and disconnect events to the driver */
-               if (dum->driver && (disconnect || reset)) {
+               if (dum->ints_enabled && (disconnect || reset)) {
                        stop_activity(dum);
+                       ++dum->callback_usage;
+                       spin_unlock(&dum->lock);
                        if (reset)
                                usb_gadget_udc_reset(&dum->gadget, dum->driver);
                        else
                                dum->driver->disconnect(&dum->gadget);
+                       spin_lock(&dum->lock);
+                       --dum->callback_usage;
                }
-       } else if (dum_hcd->active != dum_hcd->old_active) {
+       } else if (dum_hcd->active != dum_hcd->old_active &&
+                       dum->ints_enabled) {
+               ++dum->callback_usage;
+               spin_unlock(&dum->lock);
                if (dum_hcd->old_active && dum->driver->suspend)
                        dum->driver->suspend(&dum->gadget);
                else if (!dum_hcd->old_active &&  dum->driver->resume)
                        dum->driver->resume(&dum->gadget);
+               spin_lock(&dum->lock);
+               --dum->callback_usage;
        }
 
        dum_hcd->old_status = dum_hcd->port_status;
@@ -972,8 +987,11 @@ static int dummy_udc_start(struct usb_gadget *g,
         * can't enumerate without help from the driver we're binding.
         */
 
+       spin_lock_irq(&dum->lock);
        dum->devstatus = 0;
        dum->driver = driver;
+       dum->ints_enabled = 1;
+       spin_unlock_irq(&dum->lock);
 
        return 0;
 }
@@ -984,6 +1002,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
        struct dummy            *dum = dum_hcd->dum;
 
        spin_lock_irq(&dum->lock);
+       dum->ints_enabled = 0;
+       stop_activity(dum);
+
+       /* emulate synchronize_irq(): wait for callbacks to finish */
+       while (dum->callback_usage > 0) {
+               spin_unlock_irq(&dum->lock);
+               usleep_range(1000, 2000);
+               spin_lock_irq(&dum->lock);
+       }
+
        dum->driver = NULL;
        spin_unlock_irq(&dum->lock);
 
@@ -1037,7 +1065,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
        memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
        dum->gadget.name = gadget_name;
        dum->gadget.ops = &dummy_ops;
-       dum->gadget.max_speed = USB_SPEED_SUPER;
+       if (mod_data.is_super_speed)
+               dum->gadget.max_speed = USB_SPEED_SUPER;
+       else if (mod_data.is_high_speed)
+               dum->gadget.max_speed = USB_SPEED_HIGH;
+       else
+               dum->gadget.max_speed = USB_SPEED_FULL;
 
        dum->gadget.dev.parent = &pdev->dev;
        init_dummy_udc_hw(dum);
@@ -1246,6 +1279,8 @@ static int dummy_urb_enqueue(
 
        list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
        urb->hcpriv = urbp;
+       if (!dum_hcd->next_frame_urbp)
+               dum_hcd->next_frame_urbp = urbp;
        if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
                urb->error_count = 1;           /* mark as a new urb */
 
@@ -1521,6 +1556,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
        if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
                        dum->ss_hcd : dum->hs_hcd)))
                return NULL;
+       if (!dum->ints_enabled)
+               return NULL;
        if ((address & ~USB_DIR_IN) == 0)
                return &dum->ep[0];
        for (i = 1; i < DUMMY_ENDPOINTS; i++) {
@@ -1762,6 +1799,7 @@ static void dummy_timer(unsigned long _dum_hcd)
                spin_unlock_irqrestore(&dum->lock, flags);
                return;
        }
+       dum_hcd->next_frame_urbp = NULL;
 
        for (i = 0; i < DUMMY_ENDPOINTS; i++) {
                if (!ep_info[i].name)
@@ -1778,6 +1816,10 @@ restart:
                int                     type;
                int                     status = -EINPROGRESS;
 
+               /* stop when we reach URBs queued after the timer interrupt */
+               if (urbp == dum_hcd->next_frame_urbp)
+                       break;
+
                urb = urbp->urb;
                if (urb->unlinked)
                        goto return_urb;
@@ -1857,10 +1899,12 @@ restart:
                         * until setup() returns; no reentrancy issues etc.
                         */
                        if (value > 0) {
+                               ++dum->callback_usage;
                                spin_unlock(&dum->lock);
                                value = dum->driver->setup(&dum->gadget,
                                                &setup);
                                spin_lock(&dum->lock);
+                               --dum->callback_usage;
 
                                if (value >= 0) {
                                        /* no delays (max 64KB data stage) */
@@ -2561,8 +2605,6 @@ static struct hc_driver dummy_hcd = {
        .product_desc =         "Dummy host controller",
        .hcd_priv_size =        sizeof(struct dummy_hcd),
 
-       .flags =                HCD_USB3 | HCD_SHARED,
-
        .reset =                dummy_setup,
        .start =                dummy_start,
        .stop =                 dummy_stop,
@@ -2591,8 +2633,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
        dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
        dum = *((void **)dev_get_platdata(&pdev->dev));
 
-       if (!mod_data.is_super_speed)
+       if (mod_data.is_super_speed)
+               dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
+       else if (mod_data.is_high_speed)
                dummy_hcd.flags = HCD_USB2;
+       else
+               dummy_hcd.flags = HCD_USB11;
        hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
        if (!hs_hcd)
                return -ENOMEM;
index df37c1e6e9d5cc3e5cd1a7f1a632d768c2460c48..63a206122058968a1fb8fec424c0ca8022a1f0f6 100644 (file)
@@ -1038,7 +1038,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
                        usb3_ep->ep.maxpacket);
        u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
        u32 tmp = 0;
-       bool is_last;
+       bool is_last = !len ? true : false;
 
        if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
                return -EBUSY;
@@ -1059,7 +1059,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
                usb3_write(usb3, tmp, fifo_reg);
        }
 
-       is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
+       if (!is_last)
+               is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
        /* Send the data */
        usb3_set_px_con_send(usb3_ep, len, is_last);
 
@@ -1150,7 +1151,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
                usb3_set_p0_con_for_ctrl_read_data(usb3);
        } else {
                usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
-               usb3_set_p0_con_for_ctrl_write_data(usb3);
+               if (usb3_req->req.length)
+                       usb3_set_p0_con_for_ctrl_write_data(usb3);
        }
 
        usb3_p0_xfer(usb3_ep, usb3_req);
@@ -2053,7 +2055,16 @@ static u32 usb3_calc_ramarea(int ram_size)
 static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
                                const struct usb_endpoint_descriptor *desc)
 {
-       return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc));
+       int i;
+       const u32 max_packet_array[] = {8, 16, 32, 64, 512};
+       u32 mpkt = PN_RAMMAP_MPKT(1024);
+
+       for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
+               if (usb_endpoint_maxp(desc) <= max_packet_array[i])
+                       mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
+       }
+
+       return usb3_ep->rammap_val | mpkt;
 }
 
 static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
index 658d9d1f9ea3766c6236ab8d39e5fcd5604ce0d6..6dda3623a276d34f0926ee225a6becee16226a47 100644 (file)
@@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev)
                if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
                        return 0;
 
-               usleep_range(40, 60);
+               udelay(50);
        }
 
        dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
  *
  * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
  * It signals to the BIOS that the OS wants control of the host controller,
- * and then waits 5 seconds for the BIOS to hand over control.
+ * and then waits 1 second for the BIOS to hand over control.
  * If we timeout, assume the BIOS is broken and take control anyway.
  */
 static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
@@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
        if (val & XHCI_HC_BIOS_OWNED) {
                writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
 
-               /* Wait for 5 seconds with 10 microsecond polling interval */
+               /* Wait for 1 second with 10 microsecond polling interval */
                timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
-                               0, 5000, 10);
+                               0, 1000000, 10);
 
                /* Assume a buggy BIOS and take HC ownership anyway */
                if (timeout) {
@@ -1100,7 +1100,7 @@ hc_init:
         * operational or runtime registers.  Wait 5 seconds and no more.
         */
        timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
-                       5000, 10);
+                       5000000, 10);
        /* Assume a buggy HC and start HC initialization anyway */
        if (timeout) {
                val = readl(op_reg_base + XHCI_STS_OFFSET);
index ad89a6d4111b45e0c048ee437ffd391f5af846e0..a2336deb5e360cc7fb66b52f8730aa0691852da2 100644 (file)
@@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
 
        /* If PSI table exists, add the custom speed attributes from it */
        if (usb3_1 && xhci->usb3_rhub.psi_count) {
-               u32 ssp_cap_base, bm_attrib, psi;
+               u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
                int offset;
 
                ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
@@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
                for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
                        psi = xhci->usb3_rhub.psi[i];
                        psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+                       psi_exp = XHCI_EXT_PORT_PSIE(psi);
+                       psi_mant = XHCI_EXT_PORT_PSIM(psi);
+
+                       /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
+                       for (; psi_exp < 3; psi_exp++)
+                               psi_mant /= 1000;
+                       if (psi_mant >= 10)
+                               psi |= BIT(14);
+
                        if ((psi & PLT_MASK) == PLT_SYM) {
                        /* Symmetric, create SSA RX and TX from one PSI entry */
                                put_unaligned_le32(psi, &buf[offset]);
@@ -411,14 +420,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
                                                     GFP_NOWAIT);
                        if (!command) {
                                spin_unlock_irqrestore(&xhci->lock, flags);
-                               xhci_free_command(xhci, cmd);
-                               return -ENOMEM;
+                               ret = -ENOMEM;
+                               goto cmd_cleanup;
+                       }
+
+                       ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
+                                                      i, suspend);
+                       if (ret) {
+                               spin_unlock_irqrestore(&xhci->lock, flags);
+                               xhci_free_command(xhci, command);
+                               goto cmd_cleanup;
                        }
-                       xhci_queue_stop_endpoint(xhci, command, slot_id, i,
-                                                suspend);
                }
        }
-       xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+       ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+       if (ret) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               goto cmd_cleanup;
+       }
+
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -430,6 +450,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
                xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
                ret = -ETIME;
        }
+
+cmd_cleanup:
        xhci_free_command(xhci, cmd);
        return ret;
 }
@@ -1506,9 +1528,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                                t2 |= PORT_WKOC_E | PORT_WKCONN_E;
                                t2 &= ~PORT_WKDISC_E;
                        }
-                       if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
-                           (hcd->speed < HCD_USB3))
-                               t2 &= ~PORT_WAKE_BITS;
                } else
                        t2 &= ~PORT_WAKE_BITS;
 
index 8071c8fdd15e741b008af64075cda3c87072bfb4..76f392954733631c7b660cc806d87736fb2d8d15 100644 (file)
 #define PCI_DEVICE_ID_INTEL_APL_XHCI                   0x5aa8
 #define PCI_DEVICE_ID_INTEL_DNV_XHCI                   0x19d0
 
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_2                        0x43bb
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_1                        0x43bc
-
 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI               0x1142
 
 static const char hcd_name[] = "xhci_hcd";
@@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_AMD)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
-       if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
-               ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
-               (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
-               (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
-               (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
-               xhci->quirks |= XHCI_U2_DISABLE_WAKE;
-
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
index 163bafde709f79bf52e8e483d6ba6de43125cc48..1cb6eaef4ae14efcee09e5adfe0e913e4e54525c 100644 (file)
@@ -178,14 +178,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
         * 2. xhci_plat is child of a device from firmware (dwc3-plat)
         * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
         */
-       sysdev = &pdev->dev;
-       if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
-               sysdev = sysdev->parent;
+       for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
+               if (is_of_node(sysdev->fwnode) ||
+                       is_acpi_device_node(sysdev->fwnode))
+                       break;
 #ifdef CONFIG_PCI
-       else if (sysdev->parent && sysdev->parent->parent &&
-                sysdev->parent->parent->bus == &pci_bus_type)
-               sysdev = sysdev->parent->parent;
+               else if (sysdev->bus == &pci_bus_type)
+                       break;
 #endif
+       }
+
+       if (!sysdev)
+               sysdev = &pdev->dev;
 
        /* Try to set 64-bit DMA first */
        if (WARN_ON(!sysdev->dma_mask))
index a9443651ce0f32c4fafff6cdd7659a187f9923cd..82c746e2d85c0f7148cf780d9614eba3654f303d 100644 (file)
@@ -1309,6 +1309,7 @@ static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
 void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
 {
        struct xhci_command *cur_cmd, *tmp_cmd;
+       xhci->current_cmd = NULL;
        list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
                xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
 }
@@ -2579,15 +2580,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                (struct xhci_generic_trb *) ep_trb);
 
                /*
-                * No-op TRB should not trigger interrupts.
-                * If ep_trb is a no-op TRB, it means the
-                * corresponding TD has been cancelled. Just ignore
-                * the TD.
+                * No-op TRB could trigger interrupts in a case where
+                * a URB was killed and a STALL_ERROR happens right
+                * after the endpoint ring stopped. Reset the halted
+                * endpoint. Otherwise, the endpoint remains stalled
+                * indefinitely.
                 */
                if (trb_is_noop(ep_trb)) {
-                       xhci_dbg(xhci,
-                                "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n",
-                                slot_id, ep_index);
+                       if (trb_comp_code == COMP_STALL_ERROR ||
+                           xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+                                                             trb_comp_code))
+                               xhci_cleanup_halted_endpoint(xhci, slot_id,
+                                                            ep_index,
+                                                            ep_ring->stream_id,
+                                                            td, ep_trb,
+                                                            EP_HARD_RESET);
                        goto cleanup;
                }
 
index b2ff1ff1a02faff066374cafdf9fa5b531db5b81..51535ba2bcd425d6c039d30ed4b63283c5d39240 100644 (file)
@@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        if (xhci->quirks & XHCI_MTK_HOST) {
                ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
                if (ret < 0) {
-                       xhci_free_endpoint_ring(xhci, virt_dev, ep_index);
+                       xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
+                       virt_dev->eps[ep_index].new_ring = NULL;
                        return ret;
                }
        }
@@ -4804,7 +4805,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                 */
                hcd->has_tt = 1;
        } else {
-               if (xhci->sbrn == 0x31) {
+               /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+               if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
                        xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
                        hcd->speed = HCD_USB31;
                        hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
index 2abaa4d6d39daa74ff379020171efaf34a7e9197..2b48aa4f6b767935c64bc66855e43195dce5b2b9 100644 (file)
@@ -735,6 +735,8 @@ struct xhci_ep_ctx {
 #define EP_MAXPSTREAMS(p)      (((p) << 10) & EP_MAXPSTREAMS_MASK)
 /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
 #define        EP_HAS_LSA              (1 << 15)
+/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
+#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p)  (((p) >> 24) & 0xff)
 
 /* ep_info2 bitmasks */
 /*
@@ -1681,7 +1683,7 @@ struct xhci_bus_state {
 
 static inline unsigned int hcd_index(struct usb_hcd *hcd)
 {
-       if (hcd->speed == HCD_USB3)
+       if (hcd->speed >= HCD_USB3)
                return 0;
        else
                return 1;
@@ -1826,7 +1828,7 @@ struct xhci_hcd {
 /* For controller with a broken Port Disable implementation */
 #define XHCI_BROKEN_PORT_PED   (1 << 25)
 #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
-#define XHCI_U2_DISABLE_WAKE   (1 << 27)
+/* Reserved. It was XHCI_U2_DISABLE_WAKE */
 #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL        (1 << 28)
 
        unsigned int            num_active_eps;
@@ -2540,8 +2542,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
        u8 lsa;
        u8 hid;
 
-       esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 |
-               EP_MAX_ESIT_PAYLOAD_LO(tx_info);
+       esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+               CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
 
        ep_state = info & EP_STATE_MASK;
        max_pstr = info & EP_MAXPSTREAMS_MASK;
index eee82ca55b7b383bc507c39af7e64bb66a63c6a8..b3fc602b2e247ea150a49c13323ae66cde2b6b4b 100644 (file)
@@ -202,12 +202,13 @@ found:
                        return tmp;
        }
 
-       if (in) {
+       if (in)
                dev->in_pipe = usb_rcvbulkpipe(udev,
                        in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+       if (out)
                dev->out_pipe = usb_sndbulkpipe(udev,
                        out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
-       }
+
        if (iso_in) {
                dev->iso_in = &iso_in->desc;
                dev->in_iso_pipe = usb_rcvisocpipe(udev,
@@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
        int                     status = 0;
        struct urb              *urbs[param->sglen];
 
+       if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
+               return -EINVAL;
+
        memset(&context, 0, sizeof(context));
        context.count = param->iterations * param->sglen;
        context.dev = dev;
@@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
 
        if (param->iterations <= 0)
                return -EINVAL;
+       if (param->sglen > MAX_SGLEN)
+               return -EINVAL;
        /*
         * Just a bunch of test cases that every HCD is expected to handle.
         *
index 029692053dd3a4e50d47742751d3697f6e4d7a1e..ff5a1a8989d5a5ad5ad599463d6d34533cf13f09 100644 (file)
@@ -906,7 +906,7 @@ b_host:
         */
        if (int_usb & MUSB_INTR_RESET) {
                handled = IRQ_HANDLED;
-               if (devctl & MUSB_DEVCTL_HM) {
+               if (is_host_active(musb)) {
                        /*
                         * When BABBLE happens what we can depends on which
                         * platform MUSB is running, because some platforms
@@ -916,9 +916,7 @@ b_host:
                         * drop the session.
                         */
                        dev_err(musb->controller, "Babble\n");
-
-                       if (is_host_active(musb))
-                               musb_recover_from_babble(musb);
+                       musb_recover_from_babble(musb);
                } else {
                        musb_dbg(musb, "BUS RESET as %s",
                                usb_otg_state_string(musb->xceiv->otg->state));
@@ -1861,22 +1859,22 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                MUSB_DEVCTL_HR;
        switch (devctl & ~s) {
        case MUSB_QUIRK_B_INVALID_VBUS_91:
-               if (musb->quirk_retries--) {
+               if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
                                 "Poll devctl on invalid vbus, assume no session");
                        schedule_delayed_work(&musb->irq_work,
                                              msecs_to_jiffies(1000));
-
+                       musb->quirk_retries--;
                        return;
                }
                /* fall through */
        case MUSB_QUIRK_A_DISCONNECT_19:
-               if (musb->quirk_retries--) {
+               if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
                                 "Poll devctl on possible host mode disconnect");
                        schedule_delayed_work(&musb->irq_work,
                                              msecs_to_jiffies(1000));
-
+                       musb->quirk_retries--;
                        return;
                }
                if (!musb->session)
@@ -2681,8 +2679,15 @@ static int musb_suspend(struct device *dev)
 
        musb_platform_disable(musb);
        musb_disable_interrupts(musb);
+
+       musb->flush_irq_work = true;
+       while (flush_delayed_work(&musb->irq_work))
+               ;
+       musb->flush_irq_work = false;
+
        if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
                musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
        WARN_ON(!list_empty(&musb->pending_list));
 
        spin_lock_irqsave(&musb->lock, flags);
index c748f4ac1154428428eee6853cd37c816a2ea910..20f4614178d98c3155f4a508fb12f594f2cd4b5d 100644 (file)
@@ -428,6 +428,8 @@ struct musb {
        unsigned                test_mode:1;
        unsigned                softconnect:1;
 
+       unsigned                flush_irq_work:1;
+
        u8                      address;
        u8                      test_mode_nr;
        u16                     ackpend;                /* ep0 */
index ba255280a624d6922b82d501bf96ce3933359f1d..1ec0a4947b6b887414afd633e540e68470650113 100644 (file)
 
 #define MUSB_DMA_NUM_CHANNELS 15
 
+#define DA8XX_USB_MODE         0x10
+#define DA8XX_USB_AUTOREQ      0x14
+#define DA8XX_USB_TEARDOWN     0x1c
+
+#define DA8XX_DMA_NUM_CHANNELS 4
+
 struct cppi41_dma_controller {
        struct dma_controller controller;
-       struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
-       struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
+       struct cppi41_dma_channel *rx_channel;
+       struct cppi41_dma_channel *tx_channel;
        struct hrtimer early_tx;
        struct list_head early_tx_list;
        u32 rx_mode;
        u32 tx_mode;
        u32 auto_req;
+
+       u32 tdown_reg;
+       u32 autoreq_reg;
+
+       void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
+                            unsigned int mode);
+       u8 num_channels;
 };
 
 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
@@ -349,6 +362,32 @@ static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
        }
 }
 
+static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+               unsigned int mode)
+{
+       struct cppi41_dma_controller *controller = cppi41_channel->controller;
+       struct musb *musb = controller->controller.musb;
+       unsigned int shift;
+       u32 port;
+       u32 new_mode;
+       u32 old_mode;
+
+       old_mode = controller->tx_mode;
+       port = cppi41_channel->port_num;
+
+       shift = (port - 1) * 4;
+       if (!cppi41_channel->is_tx)
+               shift += 16;
+       new_mode = old_mode & ~(3 << shift);
+       new_mode |= mode << shift;
+
+       if (new_mode == old_mode)
+               return;
+       controller->tx_mode = new_mode;
+       musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
+}
+
+
 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
                unsigned mode)
 {
@@ -364,8 +403,8 @@ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
        if (new_mode == old_mode)
                return;
        controller->auto_req = new_mode;
-       musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ,
-                   new_mode);
+       musb_writel(controller->controller.musb->ctrl_base,
+                   controller->autoreq_reg, new_mode);
 }
 
 static bool cppi41_configure_channel(struct dma_channel *channel,
@@ -373,6 +412,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
                                dma_addr_t dma_addr, u32 len)
 {
        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+       struct cppi41_dma_controller *controller = cppi41_channel->controller;
        struct dma_chan *dc = cppi41_channel->dc;
        struct dma_async_tx_descriptor *dma_desc;
        enum dma_transfer_direction direction;
@@ -398,7 +438,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
                        musb_writel(musb->ctrl_base,
                                RNDIS_REG(cppi41_channel->port_num), len);
                        /* gen rndis */
-                       cppi41_set_dma_mode(cppi41_channel,
+                       controller->set_dma_mode(cppi41_channel,
                                        EP_MODE_DMA_GEN_RNDIS);
 
                        /* auto req */
@@ -407,14 +447,15 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
                } else {
                        musb_writel(musb->ctrl_base,
                                        RNDIS_REG(cppi41_channel->port_num), 0);
-                       cppi41_set_dma_mode(cppi41_channel,
+                       controller->set_dma_mode(cppi41_channel,
                                        EP_MODE_DMA_TRANSPARENT);
                        cppi41_set_autoreq_mode(cppi41_channel,
                                        EP_MODE_AUTOREQ_NONE);
                }
        } else {
                /* fallback mode */
-               cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+               controller->set_dma_mode(cppi41_channel,
+                               EP_MODE_DMA_TRANSPARENT);
                cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
                len = min_t(u32, packet_sz, len);
        }
@@ -445,7 +486,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
        struct cppi41_dma_channel *cppi41_channel = NULL;
        u8 ch_num = hw_ep->epnum - 1;
 
-       if (ch_num >= MUSB_DMA_NUM_CHANNELS)
+       if (ch_num >= controller->num_channels)
                return NULL;
 
        if (is_tx)
@@ -581,12 +622,13 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
 
        do {
                if (is_tx)
-                       musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+                       musb_writel(musb->ctrl_base, controller->tdown_reg,
+                                   tdbit);
                ret = dmaengine_terminate_all(cppi41_channel->dc);
        } while (ret == -EAGAIN);
 
        if (is_tx) {
-               musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+               musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
 
                csr = musb_readw(epio, MUSB_TXCSR);
                if (csr & MUSB_TXCSR_TXPKTRDY) {
@@ -604,7 +646,7 @@ static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
        struct dma_chan *dc;
        int i;
 
-       for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
+       for (i = 0; i < ctrl->num_channels; i++) {
                dc = ctrl->tx_channel[i].dc;
                if (dc)
                        dma_release_channel(dc);
@@ -656,7 +698,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
                        goto err;
 
                ret = -EINVAL;
-               if (port > MUSB_DMA_NUM_CHANNELS || !port)
+               if (port > controller->num_channels || !port)
                        goto err;
                if (is_tx)
                        cppi41_channel = &controller->tx_channel[port - 1];
@@ -697,6 +739,8 @@ void cppi41_dma_controller_destroy(struct dma_controller *c)
 
        hrtimer_cancel(&controller->early_tx);
        cppi41_dma_controller_stop(controller);
+       kfree(controller->rx_channel);
+       kfree(controller->tx_channel);
        kfree(controller);
 }
 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
@@ -705,6 +749,7 @@ struct dma_controller *
 cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
 {
        struct cppi41_dma_controller *controller;
+       int channel_size;
        int ret = 0;
 
        if (!musb->controller->parent->of_node) {
@@ -727,12 +772,37 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
        controller->controller.is_compatible = cppi41_is_compatible;
        controller->controller.musb = musb;
 
+       if (musb->io.quirks & MUSB_DA8XX) {
+               controller->tdown_reg = DA8XX_USB_TEARDOWN;
+               controller->autoreq_reg = DA8XX_USB_AUTOREQ;
+               controller->set_dma_mode = da8xx_set_dma_mode;
+               controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
+       } else {
+               controller->tdown_reg = USB_TDOWN;
+               controller->autoreq_reg = USB_CTRL_AUTOREQ;
+               controller->set_dma_mode = cppi41_set_dma_mode;
+               controller->num_channels = MUSB_DMA_NUM_CHANNELS;
+       }
+
+       channel_size = controller->num_channels *
+                       sizeof(struct cppi41_dma_channel);
+       controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
+       if (!controller->rx_channel)
+               goto rx_channel_alloc_fail;
+       controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
+       if (!controller->tx_channel)
+               goto tx_channel_alloc_fail;
+
        ret = cppi41_dma_controller_start(controller);
        if (ret)
                goto plat_get_fail;
        return &controller->controller;
 
 plat_get_fail:
+       kfree(controller->tx_channel);
+tx_channel_alloc_fail:
+       kfree(controller->rx_channel);
+rx_channel_alloc_fail:
        kfree(controller);
 kzalloc_fail:
        if (ret == -EPROBE_DEFER)
index c9a09b5bb6e59dca0a7cb7ee5516b333caf18466..dc353e24d53cb6993218dfc7bd1f566877ba851b 100644 (file)
@@ -297,6 +297,8 @@ static int sunxi_musb_exit(struct musb *musb)
        if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
                sunxi_sram_release(musb->controller->parent);
 
+       devm_usb_put_phy(glue->dev, glue->xceiv);
+
        return 0;
 }
 
index 5fe4a5704bde329caf492451be7fdb0593bb3548..ccc2bf5274b4ca3cbe45c8f6d2ad9a23210f1523 100644 (file)
@@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
        unsigned long val;
        void __iomem *base = phy->regs;
 
+       /*
+        * The USB driver may have already initiated the phy clock
+        * disable so wait to see if the clock turns off and if not
+        * then proceed with gating the clock.
+        */
+       if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0)
+               return;
+
        if (phy->is_legacy_phy) {
                val = readl(base + USB_SUSP_CTRL);
                val |= USB_SUSP_SET;
@@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
        unsigned long val;
        void __iomem *base = phy->regs;
 
+       /*
+        * The USB driver may have already initiated the phy clock
+        * enable so wait to see if the clock turns on and if not
+        * then proceed with ungating the clock.
+        */
+       if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+                              USB_PHY_CLK_VALID) == 0)
+               return;
+
        if (phy->is_legacy_phy) {
                val = readl(base + USB_SUSP_CTRL);
                val |= USB_SUSP_CLR;
index d1af831f43ebad07e86e1d62b0730eb0579ebb4c..50285b01da92ee0ee9ab60507800607ca207b8be 100644 (file)
@@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
                              struct usbhs_fifo *fifo)
 {
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+       int ret = 0;
 
-       if (!usbhs_pipe_is_dcp(pipe))
-               usbhsf_fifo_barrier(priv, fifo);
+       if (!usbhs_pipe_is_dcp(pipe)) {
+               /*
+                * This driver checks the pipe condition first to avoid -EBUSY
+                * from usbhsf_fifo_barrier() with about 10 msec delay in
+                * the interrupt handler if the pipe is RX direction and empty.
+                */
+               if (usbhs_pipe_is_dir_in(pipe))
+                       ret = usbhs_pipe_is_accessible(pipe);
+               if (!ret)
+                       ret = usbhsf_fifo_barrier(priv, fifo);
+       }
 
-       usbhs_write(priv, fifo->ctr, BCLR);
+       /*
+        * if non-DCP pipe, this driver should set BCLR when
+        * usbhsf_fifo_barrier() returns 0.
+        */
+       if (!ret)
+               usbhs_write(priv, fifo->ctr, BCLR);
 }
 
 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
@@ -842,9 +857,9 @@ static void xfer_work(struct work_struct *work)
                fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
 
        usbhs_pipe_running(pipe, 1);
-       usbhsf_dma_start(pipe, fifo);
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
        dma_async_issue_pending(chan);
+       usbhsf_dma_start(pipe, fifo);
        usbhs_pipe_enable(pipe);
 
 xfer_work_end:
index fdf89800ebc3fca8fd90ecac58c15cde813cb265..43a862a90a77592eda9eab2aa75be90a69981ffc 100644 (file)
@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
        tty_kref_put(tty);
  reset_open_count:
        port->port.count = 0;
+       info->port = NULL;
        usb_autopm_put_interface(serial->interface);
  error_get_interface:
        usb_serial_put(serial);
@@ -265,7 +266,7 @@ static struct console usbcons = {
 
 void usb_serial_console_disconnect(struct usb_serial *serial)
 {
-       if (serial->port[0] == usbcons_info.port) {
+       if (serial->port[0] && serial->port[0] == usbcons_info.port) {
                usb_serial_console_exit();
                usb_serial_put(serial);
        }
index 2d945c9f975c04d5cd7909e00a37017a97e8062e..412f812522ee05a2627334015c08b481ea3a6722 100644 (file)
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+       { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
        { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
        { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
        { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
@@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
 #define CP210X_PARTNUM_CP2104  0x04
 #define CP210X_PARTNUM_CP2105  0x05
 #define CP210X_PARTNUM_CP2108  0x08
+#define CP210X_PARTNUM_UNKNOWN 0xFF
 
 /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
 struct cp210x_comm_status {
@@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial)
        result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
                                          CP210X_GET_PARTNUM, &priv->partnum,
                                          sizeof(priv->partnum));
-       if (result < 0)
-               goto err_free_priv;
+       if (result < 0) {
+               dev_warn(&serial->interface->dev,
+                        "querying part number failed\n");
+               priv->partnum = CP210X_PARTNUM_UNKNOWN;
+       }
 
        usb_set_serial_data(serial, priv);
 
@@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial)
        }
 
        return 0;
-err_free_priv:
-       kfree(priv);
-
-       return result;
 }
 
 static void cp210x_disconnect(struct usb_serial *serial)
index 1cec03799cdfbb60d369f9f454284110672f1a27..49d1b2d4606d3858c6b3c02104bd9dc16fa1f26f 100644 (file)
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
        { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
        { }                                     /* Terminating entry */
 };
 
index 4fcf1cecb6d721ad366666eec49774bcf9d00808..f9d15bd627855440c48bb01f224a852d12591425 100644 (file)
 #define ADI_GNICE_PID          0xF000
 #define ADI_GNICEPLUS_PID      0xF001
 
+/*
+ * Cypress WICED USB UART
+ */
+#define CYPRESS_VID                    0x04B4
+#define CYPRESS_WICED_BT_USB_PID       0x009B
+#define CYPRESS_WICED_WL_USB_PID       0xF900
+
 /*
  * Microchip Technology, Inc.
  *
index cc84da8dbb8495bc10bff34bcafd89a5960ec65e..14511d6a7d44db1e31fc216ac71ebc89a8c27837 100644 (file)
@@ -45,6 +45,7 @@ struct metrousb_private {
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
        { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
+       { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) },   /* MS7820 */
        { }, /* Terminating entry. */
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 54bfef13966ac0d0145223ddb88891ddbcb494df..ba672cf4e888cf1fe2116abc69d3c17ecf1ff385 100644 (file)
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
 
 /* TP-LINK Incorporated products */
 #define TPLINK_VENDOR_ID                       0x2357
+#define TPLINK_PRODUCT_LTE                     0x000D
 #define TPLINK_PRODUCT_MA180                   0x0201
 
 /* Changhong products */
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
        { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
        { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+       { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) },      /* TP-Link LTE Module */
        { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000),                                 /* TP-Link MA260 */
index ebc0beea69d63efe165423799fd741214e99d15c..eb9928963a53c867c8d5707d895a3a0bc86637d8 100644 (file)
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81b3)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {DEVICE_SWI(0x413c, 0x81b5)},   /* Dell Wireless 5811e QDL */
        {DEVICE_SWI(0x413c, 0x81b6)},   /* Dell Wireless 5811e QDL */
+       {DEVICE_SWI(0x413c, 0x81cf)},   /* Dell Wireless 5819 */
+       {DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
+       {DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
+       {DEVICE_SWI(0x413c, 0x81d2)},   /* Dell Wireless 5818 */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 1a59f335b063e7e79f8ece2173f2f03574ce3a5b..a3ccb899df60c39dd537ae5d3664d937715d0338 100644 (file)
@@ -834,13 +834,25 @@ Retry_Sense:
                        if (result == USB_STOR_TRANSPORT_GOOD) {
                                srb->result = SAM_STAT_GOOD;
                                srb->sense_buffer[0] = 0x0;
+                       }
+
+                       /*
+                        * ATA-passthru commands use sense data to report
+                        * the command completion status, and often devices
+                        * return Check Condition status when nothing is
+                        * wrong.
+                        */
+                       else if (srb->cmnd[0] == ATA_16 ||
+                                       srb->cmnd[0] == ATA_12) {
+                               /* leave the data alone */
+                       }
 
                        /*
                         * If there was a problem, report an unspecified
                         * hardware error to prevent the higher layers from
                         * entering an infinite retry loop.
                         */
-                       else {
+                       else {
                                srb->result = DID_ERROR << 16;
                                if ((sshdr.response_code & 0x72) == 0x72)
                                        srb->sense_buffer[1] = HARDWARE_ERROR;
index f58caa9e6a27e6e1a7161a66e5e9a97698cc1e1a..a155cd02bce240db83742f0299ab8ea12a56e7a8 100644 (file)
@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
                intf->desc.bInterfaceProtocol == USB_PR_UAS);
 }
 
-static int uas_find_uas_alt_setting(struct usb_interface *intf)
+static struct usb_host_interface *uas_find_uas_alt_setting(
+               struct usb_interface *intf)
 {
        int i;
 
@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
                struct usb_host_interface *alt = &intf->altsetting[i];
 
                if (uas_is_interface(alt))
-                       return alt->desc.bAlternateSetting;
+                       return alt;
        }
 
-       return -ENODEV;
+       return NULL;
 }
 
 static int uas_find_endpoints(struct usb_host_interface *alt,
@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        struct usb_device *udev = interface_to_usbdev(intf);
        struct usb_hcd *hcd = bus_to_hcd(udev->bus);
        unsigned long flags = id->driver_info;
-       int r, alt;
-
+       struct usb_host_interface *alt;
+       int r;
 
        alt = uas_find_uas_alt_setting(intf);
-       if (alt < 0)
+       if (!alt)
                return 0;
 
-       r = uas_find_endpoints(&intf->altsetting[alt], eps);
+       r = uas_find_endpoints(alt, eps);
        if (r < 0)
                return 0;
 
index cfb1e3bbd434715410e1f78914bb6ac3eb004b76..63cf981ed81cf8c30eadae32bc36c40499566bdc 100644 (file)
@@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
 static int uas_switch_interface(struct usb_device *udev,
                                struct usb_interface *intf)
 {
-       int alt;
+       struct usb_host_interface *alt;
 
        alt = uas_find_uas_alt_setting(intf);
-       if (alt < 0)
-               return alt;
+       if (!alt)
+               return -ENODEV;
 
-       return usb_set_interface(udev,
-                       intf->altsetting[0].desc.bInterfaceNumber, alt);
+       return usb_set_interface(udev, alt->desc.bInterfaceNumber,
+                       alt->desc.bAlternateSetting);
 }
 
 static int uas_configure_endpoints(struct uas_dev_info *devinfo)
index 5a70c33ef0e0645d1c97a59021a6b400776dd473..eb06d88b41d6976b888317939bc2ae66a1394410 100644 (file)
@@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_SANE_SENSE ),
 
+/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
+UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
+               "Seagate",
+               "External",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_WP_DETECT ),
+
 UNUSUAL_DEV(  0x0d49, 0x7310, 0x0000, 0x9999,
                "Maxtor",
                "USB to SATA",
index 35a1e777b4497ad0ad1cfa18db0834354732b128..9a53912bdfe9f73d4b7de6a1d3b74dd8a94dc8b6 100644 (file)
@@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface,
 
        if (iface->cur_altsetting->desc.bNumEndpoints < 1)
                return -ENODEV;
+       if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
+               return -ENODEV;
 
        result = -ENOMEM;
        uwb_rc = uwb_rc_alloc();
index 01c20a260a8b3f9d9a8ccc315d453f810eb3104c..39dd4ef53c77902db9f740ea3e88fd8bec5cb4ec 100644 (file)
@@ -302,18 +302,22 @@ static int uwbd(void *param)
 /** Start the UWB daemon */
 void uwbd_start(struct uwb_rc *rc)
 {
-       rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
-       if (rc->uwbd.task == NULL)
+       struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
+       if (IS_ERR(task)) {
+               rc->uwbd.task = NULL;
                printk(KERN_ERR "UWB: Cannot start management daemon; "
                       "UWB won't work\n");
-       else
+       } else {
+               rc->uwbd.task = task;
                rc->uwbd.pid = rc->uwbd.task->pid;
+       }
 }
 
 /* Stop the UWB daemon and free any unprocessed events */
 void uwbd_stop(struct uwb_rc *rc)
 {
-       kthread_stop(rc->uwbd.task);
+       if (rc->uwbd.task)
+               kthread_stop(rc->uwbd.task);
        uwbd_flush(rc);
 }
 
index 82360594fa8e49bcbad179a6bb349564286cf203..57efbd3b053b37ca43816483dd3364c3c48a761e 100644 (file)
@@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
        mutex_unlock(&priv->lock);
 
        if (use_ptemod) {
+               map->pages_vm_start = vma->vm_start;
                err = apply_to_page_range(vma->vm_mm, vma->vm_start,
                                          vma->vm_end - vma->vm_start,
                                          find_grant_ptes, map);
@@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                                            set_grant_ptes_as_special, NULL);
                }
 #endif
-               map->pages_vm_start = vma->vm_start;
        }
 
        return 0;
index e89136ab851e30c1aff97893615f72f2d5bf3cd7..b437fccd4e624e3b7a8972245e9e992fe77c2993 100644 (file)
@@ -57,7 +57,7 @@ static int register_balloon(struct device *dev);
 static void watch_target(struct xenbus_watch *watch,
                         const char *path, const char *token)
 {
-       unsigned long long new_target;
+       unsigned long long new_target, static_max;
        int err;
        static bool watch_fired;
        static long target_diff;
@@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch,
         * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
         */
        new_target >>= PAGE_SHIFT - 10;
-       if (watch_fired) {
-               balloon_set_new_target(new_target - target_diff);
-               return;
+
+       if (!watch_fired) {
+               watch_fired = true;
+               err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
+                                  &static_max);
+               if (err != 1)
+                       static_max = new_target;
+               else
+                       static_max >>= PAGE_SHIFT - 10;
+               target_diff = xen_pv_domain() ? 0
+                               : static_max - balloon_stats.target_pages;
        }
 
-       watch_fired = true;
-       target_diff = new_target - balloon_stats.target_pages;
+       balloon_set_new_target(new_target - target_diff);
 }
 static struct xenbus_watch target_watch = {
        .node = "memory/target",
index 5fbfd9cfb6d63e61c1c89e6a9deb071ce6de2d32..5b3d57fc82d39bc8fdbcfb89582589a2be854357 100644 (file)
@@ -169,6 +169,9 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
 static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
 {
        struct pci_bar_info *bar = data;
+       unsigned int pos = (offset - PCI_BASE_ADDRESS_0) / 4;
+       const struct resource *res = dev->resource;
+       u32 mask;
 
        if (unlikely(!bar)) {
                pr_warn(DRV_NAME ": driver data not found for %s\n",
@@ -179,7 +182,13 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
        /* A write to obtain the length must happen as a 32-bit write.
         * This does not (yet) support writing individual bytes
         */
-       if (value == ~0)
+       if (res[pos].flags & IORESOURCE_IO)
+               mask = ~PCI_BASE_ADDRESS_IO_MASK;
+       else if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64))
+               mask = 0;
+       else
+               mask = ~PCI_BASE_ADDRESS_MEM_MASK;
+       if ((value | mask) == ~0U)
                bar->which = 1;
        else {
                u32 tmpval;
index 82a8866758ee0d5ac235430059f74a7bb56b09f8..a1c17000129ba1cb4465df0bc679a0756f8937a3 100644 (file)
@@ -519,64 +519,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
        return err;
 }
 
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
-                                    grant_ref_t *gnt_refs,
-                                    unsigned int nr_grefs,
-                                    void **vaddr)
-{
-       struct xenbus_map_node *node;
-       struct vm_struct *area;
-       pte_t *ptes[XENBUS_MAX_RING_GRANTS];
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
-       int err = GNTST_okay;
-       int i;
-       bool leaked;
-
-       *vaddr = NULL;
-
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
-               return -ENOMEM;
-
-       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
-       if (!area) {
-               kfree(node);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < nr_grefs; i++)
-               phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
-
-       err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
-                               phys_addrs,
-                               GNTMAP_host_map | GNTMAP_contains_pte,
-                               &leaked);
-       if (err)
-               goto failed;
-
-       node->nr_handles = nr_grefs;
-       node->pv.area = area;
-
-       spin_lock(&xenbus_valloc_lock);
-       list_add(&node->next, &xenbus_valloc_pages);
-       spin_unlock(&xenbus_valloc_lock);
-
-       *vaddr = area->addr;
-       return 0;
-
-failed:
-       if (!leaked)
-               free_vm_area(area);
-       else
-               pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
-
-       kfree(node);
-       return err;
-}
-
 struct map_ring_valloc_hvm
 {
        unsigned int idx;
@@ -725,6 +667,65 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 }
 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 
+#ifdef CONFIG_XEN_PV
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+                                    grant_ref_t *gnt_refs,
+                                    unsigned int nr_grefs,
+                                    void **vaddr)
+{
+       struct xenbus_map_node *node;
+       struct vm_struct *area;
+       pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+       int err = GNTST_okay;
+       int i;
+       bool leaked;
+
+       *vaddr = NULL;
+
+       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+               return -EINVAL;
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
+       if (!area) {
+               kfree(node);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < nr_grefs; i++)
+               phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+
+       err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
+                               phys_addrs,
+                               GNTMAP_host_map | GNTMAP_contains_pte,
+                               &leaked);
+       if (err)
+               goto failed;
+
+       node->nr_handles = nr_grefs;
+       node->pv.area = area;
+
+       spin_lock(&xenbus_valloc_lock);
+       list_add(&node->next, &xenbus_valloc_pages);
+       spin_unlock(&xenbus_valloc_lock);
+
+       *vaddr = area->addr;
+       return 0;
+
+failed:
+       if (!leaked)
+               free_vm_area(area);
+       else
+               pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
+
+       kfree(node);
+       return err;
+}
+
 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
 {
        struct xenbus_map_node *node;
@@ -788,6 +789,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
        return err;
 }
 
+static const struct xenbus_ring_ops ring_ops_pv = {
+       .map = xenbus_map_ring_valloc_pv,
+       .unmap = xenbus_unmap_ring_vfree_pv,
+};
+#endif
+
 struct unmap_ring_vfree_hvm
 {
        unsigned int idx;
@@ -916,11 +923,6 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
 }
 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
 
-static const struct xenbus_ring_ops ring_ops_pv = {
-       .map = xenbus_map_ring_valloc_pv,
-       .unmap = xenbus_unmap_ring_vfree_pv,
-};
-
 static const struct xenbus_ring_ops ring_ops_hvm = {
        .map = xenbus_map_ring_valloc_hvm,
        .unmap = xenbus_unmap_ring_vfree_hvm,
@@ -928,8 +930,10 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
 
 void __init xenbus_ring_ops_init(void)
 {
+#ifdef CONFIG_XEN_PV
        if (!xen_feature(XENFEAT_auto_translated_physmap))
                ring_ops = &ring_ops_pv;
        else
+#endif
                ring_ops = &ring_ops_hvm;
 }
index adaf6f6dd858cb9b0a6b0077782d068b3d024e0b..e1cbdfdb7c684fd24fdb6f25ee03f4e253e9ef58 100644 (file)
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
 
        p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
 
-       if (unlikely(copied < len && !PageUptodate(page))) {
-               copied = 0;
-               goto out;
+       if (!PageUptodate(page)) {
+               if (unlikely(copied < len)) {
+                       copied = 0;
+                       goto out;
+               } else if (len == PAGE_SIZE) {
+                       SetPageUptodate(page);
+               }
        }
        /*
         * No need to use i_size_read() here, the i_size
index ce7181ea60fa9af516e2a8774fd1fc2fac38274a..a7c5a9861bef5a73b8f7377dfa9e045427b915c7 100644 (file)
@@ -54,7 +54,7 @@ typedef struct {
        int size;                       /* size of magic/mask */
        char *magic;                    /* magic or filename extension */
        char *mask;                     /* mask, NULL for exact match */
-       char *interpreter;              /* filename of interpreter */
+       const char *interpreter;        /* filename of interpreter */
        char *name;
        struct dentry *dentry;
        struct file *interp_file;
@@ -131,27 +131,26 @@ static int load_misc_binary(struct linux_binprm *bprm)
 {
        Node *fmt;
        struct file *interp_file = NULL;
-       char iname[BINPRM_BUF_SIZE];
-       const char *iname_addr = iname;
        int retval;
        int fd_binary = -1;
 
        retval = -ENOEXEC;
        if (!enabled)
-               goto ret;
+               return retval;
 
        /* to keep locking time low, we copy the interpreter string */
        read_lock(&entries_lock);
        fmt = check_file(bprm);
        if (fmt)
-               strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE);
+               dget(fmt->dentry);
        read_unlock(&entries_lock);
        if (!fmt)
-               goto ret;
+               return retval;
 
        /* Need to be able to load the file after exec */
+       retval = -ENOENT;
        if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
-               return -ENOENT;
+               goto ret;
 
        if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
                retval = remove_arg_zero(bprm);
@@ -195,22 +194,22 @@ static int load_misc_binary(struct linux_binprm *bprm)
        bprm->argc++;
 
        /* add the interp as argv[0] */
-       retval = copy_strings_kernel(1, &iname_addr, bprm);
+       retval = copy_strings_kernel(1, &fmt->interpreter, bprm);
        if (retval < 0)
                goto error;
        bprm->argc++;
 
        /* Update interp in case binfmt_script needs it. */
-       retval = bprm_change_interp(iname, bprm);
+       retval = bprm_change_interp(fmt->interpreter, bprm);
        if (retval < 0)
                goto error;
 
-       if (fmt->flags & MISC_FMT_OPEN_FILE && fmt->interp_file) {
+       if (fmt->flags & MISC_FMT_OPEN_FILE) {
                interp_file = filp_clone_open(fmt->interp_file);
                if (!IS_ERR(interp_file))
                        deny_write_access(interp_file);
        } else {
-               interp_file = open_exec(iname);
+               interp_file = open_exec(fmt->interpreter);
        }
        retval = PTR_ERR(interp_file);
        if (IS_ERR(interp_file))
@@ -238,6 +237,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
                goto error;
 
 ret:
+       dput(fmt->dentry);
        return retval;
 error:
        if (fd_binary > 0)
@@ -594,8 +594,13 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
 
 static void bm_evict_inode(struct inode *inode)
 {
+       Node *e = inode->i_private;
+
+       if (e && e->flags & MISC_FMT_OPEN_FILE)
+               filp_close(e->interp_file, NULL);
+
        clear_inode(inode);
-       kfree(inode->i_private);
+       kfree(e);
 }
 
 static void kill_node(Node *e)
@@ -603,24 +608,14 @@ static void kill_node(Node *e)
        struct dentry *dentry;
 
        write_lock(&entries_lock);
-       dentry = e->dentry;
-       if (dentry) {
-               list_del_init(&e->list);
-               e->dentry = NULL;
-       }
+       list_del_init(&e->list);
        write_unlock(&entries_lock);
 
-       if ((e->flags & MISC_FMT_OPEN_FILE) && e->interp_file) {
-               filp_close(e->interp_file, NULL);
-               e->interp_file = NULL;
-       }
-
-       if (dentry) {
-               drop_nlink(d_inode(dentry));
-               d_drop(dentry);
-               dput(dentry);
-               simple_release_fs(&bm_mnt, &entry_count);
-       }
+       dentry = e->dentry;
+       drop_nlink(d_inode(dentry));
+       d_drop(dentry);
+       dput(dentry);
+       simple_release_fs(&bm_mnt, &entry_count);
 }
 
 /* /<entry> */
@@ -665,7 +660,8 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
                root = file_inode(file)->i_sb->s_root;
                inode_lock(d_inode(root));
 
-               kill_node(e);
+               if (!list_empty(&e->list))
+                       kill_node(e);
 
                inode_unlock(d_inode(root));
                break;
@@ -794,7 +790,7 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
                inode_lock(d_inode(root));
 
                while (!list_empty(&entries))
-                       kill_node(list_entry(entries.next, Node, list));
+                       kill_node(list_first_entry(&entries, Node, list));
 
                inode_unlock(d_inode(root));
                break;
index afdf4e3cafc2aa5f1c1ff1dc0e8cfb256c6a3b89..7cde3f46ad263ab084aafaefa14161902f33d4f1 100644 (file)
@@ -19,7 +19,6 @@ static int load_script(struct linux_binprm *bprm)
        const char *i_arg, *i_name;
        char *cp;
        struct file *file;
-       char interp[BINPRM_BUF_SIZE];
        int retval;
 
        if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
@@ -55,7 +54,7 @@ static int load_script(struct linux_binprm *bprm)
                        break;
        }
        for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
-       if (*cp == '\0') 
+       if (*cp == '\0')
                return -ENOEXEC; /* No interpreter name found */
        i_name = cp;
        i_arg = NULL;
@@ -65,7 +64,6 @@ static int load_script(struct linux_binprm *bprm)
                *cp++ = '\0';
        if (*cp)
                i_arg = cp;
-       strcpy (interp, i_name);
        /*
         * OK, we've parsed out the interpreter name and
         * (optional) argument.
@@ -80,24 +78,27 @@ static int load_script(struct linux_binprm *bprm)
        if (retval)
                return retval;
        retval = copy_strings_kernel(1, &bprm->interp, bprm);
-       if (retval < 0) return retval; 
+       if (retval < 0)
+               return retval;
        bprm->argc++;
        if (i_arg) {
                retval = copy_strings_kernel(1, &i_arg, bprm);
-               if (retval < 0) return retval; 
+               if (retval < 0)
+                       return retval;
                bprm->argc++;
        }
        retval = copy_strings_kernel(1, &i_name, bprm);
-       if (retval) return retval; 
+       if (retval)
+               return retval;
        bprm->argc++;
-       retval = bprm_change_interp(interp, bprm);
+       retval = bprm_change_interp(i_name, bprm);
        if (retval < 0)
                return retval;
 
        /*
         * OK, now restart the process with the interpreter's dentry.
         */
-       file = open_exec(interp);
+       file = open_exec(i_name);
        if (IS_ERR(file))
                return PTR_ERR(file);
 
index 93d088ffc05c679c7f0f35b847eb1d4341fc3761..789f55e851aeffb6b1212403188638d12a1d2540 100644 (file)
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
 
        set_page_writeback(page);
        result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
-       if (result)
+       if (result) {
                end_page_writeback(page);
-       else
+       } else {
+               clean_page_buffers(page);
                unlock_page(page);
+       }
        blk_queue_exit(bdev->bd_queue);
        return result;
 }
index b51d23f5cafa9eff1cfc9b1936817095a1b3b5e0..280384bf34f13b20b7a0817a36fe49a53d618285 100644 (file)
@@ -107,7 +107,8 @@ static void end_compressed_bio_read(struct bio *bio)
        struct inode *inode;
        struct page *page;
        unsigned long index;
-       int ret;
+       unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
+       int ret = 0;
 
        if (bio->bi_status)
                cb->errors = 1;
@@ -118,6 +119,21 @@ static void end_compressed_bio_read(struct bio *bio)
        if (!refcount_dec_and_test(&cb->pending_bios))
                goto out;
 
+       /*
+        * Record the correct mirror_num in cb->orig_bio so that
+        * read-repair can work properly.
+        */
+       ASSERT(btrfs_io_bio(cb->orig_bio));
+       btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
+       cb->mirror_num = mirror;
+
+       /*
+        * Some IO in this cb have failed, just skip checksum as there
+        * is no way it could be correct.
+        */
+       if (cb->errors == 1)
+               goto csum_failed;
+
        inode = cb->inode;
        ret = check_compressed_csum(BTRFS_I(inode), cb,
                                    (u64)bio->bi_iter.bi_sector << 9);
index 5a8933da39a75f504c5bb0329a2b62508be40042..8fc690384c585779a9a6838b08228b41468a49b6 100644 (file)
@@ -709,7 +709,6 @@ struct btrfs_delayed_root;
 #define BTRFS_FS_OPEN                          5
 #define BTRFS_FS_QUOTA_ENABLED                 6
 #define BTRFS_FS_QUOTA_ENABLING                        7
-#define BTRFS_FS_QUOTA_DISABLING               8
 #define BTRFS_FS_UPDATE_UUID_TREE_GEN          9
 #define BTRFS_FS_CREATING_FREE_SPACE_TREE      10
 #define BTRFS_FS_BTREE_ERR                     11
@@ -723,7 +722,7 @@ struct btrfs_delayed_root;
  * Indicate that a whole-filesystem exclusive operation is running
  * (device replace, resize, device add/delete, balance)
  */
-#define BTRFS_FS_EXCL_OP                       14
+#define BTRFS_FS_EXCL_OP                       16
 
 struct btrfs_fs_info {
        u8 fsid[BTRFS_FSID_SIZE];
index 487bbe4fb3c6e7a92156aec7060dd84e3c2525a0..dfdab849037b70887e9a61d5b1044a313658d437 100644 (file)
@@ -3643,7 +3643,14 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
        u64 flags;
 
        do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
-       backup_super_roots(fs_info);
+
+       /*
+        * max_mirrors == 0 indicates we're from commit_transaction,
+        * not from fsync where the tree roots in fs_info have not
+        * been consistent on disk.
+        */
+       if (max_mirrors == 0)
+               backup_super_roots(fs_info);
 
        sb = fs_info->super_for_commit;
        dev_item = &sb->dev_item;
index 3e5bb0cdd3cdd46dbc92273d370d9593963820e7..970190cd347edd5878479618222dc814b4736573 100644 (file)
@@ -2801,7 +2801,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
                }
        }
 
-       bio = btrfs_bio_alloc(bdev, sector << 9);
+       bio = btrfs_bio_alloc(bdev, (u64)sector << 9);
        bio_add_page(bio, page, page_size, offset);
        bio->bi_end_io = end_io_func;
        bio->bi_private = tree;
@@ -3471,8 +3471,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        unsigned int write_flags = 0;
        unsigned long nr_written = 0;
 
-       if (wbc->sync_mode == WB_SYNC_ALL)
-               write_flags = REQ_SYNC;
+       write_flags = wbc_to_write_flags(wbc);
 
        trace___extent_writepage(page, inode, wbc);
 
@@ -3718,7 +3717,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
        unsigned long i, num_pages;
        unsigned long bio_flags = 0;
        unsigned long start, end;
-       unsigned int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META;
+       unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
        int ret = 0;
 
        clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
@@ -4063,9 +4062,6 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
        if (epd->bio) {
                int ret;
 
-               bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
-                                epd->sync_io ? REQ_SYNC : 0);
-
                ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
                BUG_ON(ret < 0); /* -ENOMEM */
                epd->bio = NULL;
index 128f3e58634f4a6821b4cf18547db075cfbe03d5..d94e3f68b9b134f8e3027dec70f1952c42e8ce52 100644 (file)
@@ -135,6 +135,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
                                                 const u64 offset,
                                                 const u64 bytes)
 {
+       unsigned long index = offset >> PAGE_SHIFT;
+       unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
+       struct page *page;
+
+       while (index <= end_index) {
+               page = find_get_page(inode->i_mapping, index);
+               index++;
+               if (!page)
+                       continue;
+               ClearPagePrivate2(page);
+               put_page(page);
+       }
        return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
                                            bytes - PAGE_SIZE, false);
 }
@@ -8357,11 +8369,8 @@ static void btrfs_endio_direct_read(struct bio *bio)
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        blk_status_t err = bio->bi_status;
 
-       if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) {
+       if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
                err = btrfs_subio_endio_read(inode, io_bio, err);
-               if (!err)
-                       bio->bi_status = 0;
-       }
 
        unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
                      dip->logical_offset + dip->bytes - 1);
@@ -8369,7 +8378,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
 
        kfree(dip);
 
-       dio_bio->bi_status = bio->bi_status;
+       dio_bio->bi_status = err;
        dio_end_io(dio_bio);
 
        if (io_bio->end_io)
@@ -8387,6 +8396,7 @@ static void __endio_write_update_ordered(struct inode *inode,
        btrfs_work_func_t func;
        u64 ordered_offset = offset;
        u64 ordered_bytes = bytes;
+       u64 last_offset;
        int ret;
 
        if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
@@ -8398,6 +8408,7 @@ static void __endio_write_update_ordered(struct inode *inode,
        }
 
 again:
+       last_offset = ordered_offset;
        ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
                                                   &ordered_offset,
                                                   ordered_bytes,
@@ -8408,6 +8419,12 @@ again:
        btrfs_init_work(&ordered->work, func, finish_ordered_fn, NULL, NULL);
        btrfs_queue_work(wq, &ordered->work);
 out_test:
+       /*
+        * If btrfs_dec_test_ordered_pending does not find any ordered extent
+        * in the range, we can exit.
+        */
+       if (ordered_offset == last_offset)
+               return;
        /*
         * our bio might span multiple ordered extents.  If we haven't
         * completed the accounting for the whole dio, go back and try again
index d6715c2bcdc472d567bab9fd617e808fbcbd480f..6c7a49faf4e0636e67c9f818893975dc48d7436a 100644 (file)
@@ -2773,9 +2773,9 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
        }
        mutex_unlock(&fs_devices->device_list_mutex);
 
-       fi_args->nodesize = fs_info->super_copy->nodesize;
-       fi_args->sectorsize = fs_info->super_copy->sectorsize;
-       fi_args->clone_alignment = fs_info->super_copy->sectorsize;
+       fi_args->nodesize = fs_info->nodesize;
+       fi_args->sectorsize = fs_info->sectorsize;
+       fi_args->clone_alignment = fs_info->sectorsize;
 
        if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
                ret = -EFAULT;
@@ -3032,7 +3032,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
 out:
        if (ret)
                btrfs_cmp_data_free(cmp);
-       return 0;
+       return ret;
 }
 
 static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
@@ -4061,6 +4061,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
                ret = PTR_ERR(new_root);
                goto out;
        }
+       if (!is_fstree(new_root->objectid)) {
+               ret = -ENOENT;
+               goto out;
+       }
 
        path = btrfs_alloc_path();
        if (!path) {
index 5c8b61c86e61f9ed5445f5022a3b20f1134068bf..e172d4843eae2d8eb6f0d29dce38fb7f693f4ed0 100644 (file)
@@ -807,7 +807,6 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
        }
        ret = 0;
 out:
-       set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
        btrfs_free_path(path);
        return ret;
 }
@@ -953,7 +952,6 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
        if (!fs_info->quota_root)
                goto out;
        clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
-       set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
        btrfs_qgroup_wait_for_completion(fs_info, false);
        spin_lock(&fs_info->qgroup_lock);
        quota_root = fs_info->quota_root;
@@ -1307,6 +1305,8 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
                }
        }
        ret = del_qgroup_item(trans, quota_root, qgroupid);
+       if (ret && ret != -ENOENT)
+               goto out;
 
        while (!list_empty(&qgroup->groups)) {
                list = list_first_entry(&qgroup->groups,
@@ -2086,8 +2086,6 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
 
        if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
                set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
-       if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
-               clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
 
        spin_lock(&fs_info->qgroup_lock);
        while (!list_empty(&fs_info->dirty_qgroups)) {
index 3a49a3c2fca4549a8e6fd19cf4ec9de119aa0a5d..9841faef08ea72d0b50aa3ad3c881bd27bd7fc76 100644 (file)
@@ -2400,11 +2400,11 @@ void free_reloc_roots(struct list_head *list)
        while (!list_empty(list)) {
                reloc_root = list_entry(list->next, struct btrfs_root,
                                        root_list);
+               __del_reloc_root(reloc_root);
                free_extent_buffer(reloc_root->node);
                free_extent_buffer(reloc_root->commit_root);
                reloc_root->node = NULL;
                reloc_root->commit_root = NULL;
-               __del_reloc_root(reloc_root);
        }
 }
 
index 32b043ef8ac9a3e25f492e99cd4fe4ef347205cf..8fd195cfe81b97b92412b95ac645188eb9a5a693 100644 (file)
@@ -2630,7 +2630,7 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino)
        } else {
                btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
                                (int)(mode & S_IFMT));
-               ret = -ENOTSUPP;
+               ret = -EOPNOTSUPP;
                goto out;
        }
 
index 35a128acfbd157b302386f9890d2a1de160c7be7..161694b660385a3ecedd758c964c1690d3e39386 100644 (file)
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb,
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
 #endif
-       sb->s_flags |= MS_I_VERSION;
+       sb->s_flags |= SB_I_VERSION;
        sb->s_iflags |= SB_I_CGROUPWB;
 
        err = super_setup_bdi(sb);
index ad7f4bab640be36d9f43b8b2f3371a7bd871738f..c800d067fcbf0c8eec90c479eefe5df13e2dc945 100644 (file)
@@ -4181,6 +4181,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        struct extent_map *em, *n;
        struct list_head extents;
        struct extent_map_tree *tree = &inode->extent_tree;
+       u64 logged_start, logged_end;
        u64 test_gen;
        int ret = 0;
        int num = 0;
@@ -4190,10 +4191,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        down_write(&inode->dio_sem);
        write_lock(&tree->lock);
        test_gen = root->fs_info->last_trans_committed;
+       logged_start = start;
+       logged_end = end;
 
        list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
                list_del_init(&em->list);
-
                /*
                 * Just an arbitrary number, this can be really CPU intensive
                 * once we start getting a lot of extents, and really once we
@@ -4208,6 +4210,12 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
 
                if (em->generation <= test_gen)
                        continue;
+
+               if (em->start < logged_start)
+                       logged_start = em->start;
+               if ((em->start + em->len - 1) > logged_end)
+                       logged_end = em->start + em->len - 1;
+
                /* Need a ref to keep it from getting evicted from cache */
                refcount_inc(&em->refs);
                set_bit(EXTENT_FLAG_LOGGING, &em->flags);
@@ -4216,7 +4224,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        }
 
        list_sort(NULL, &extents, extent_cmp);
-       btrfs_get_logged_extents(inode, logged_list, start, end);
+       btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
        /*
         * Some ordered extents started by fsync might have completed
         * before we could collect them into the list logged_list, which
index 0e8f16c305df192b412b86be4a69acb1a2236243..b39737568c223c208d92b2f4ab73ac0263ad3ff4 100644 (file)
@@ -6166,7 +6166,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
        map_length = length;
 
        btrfs_bio_counter_inc_blocked(fs_info);
-       ret = __btrfs_map_block(fs_info, bio_op(bio), logical,
+       ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
                                &map_length, &bbio, mirror_num, 1);
        if (ret) {
                btrfs_bio_counter_dec(fs_info);
index 157fe59fbabe5c9f5608d4e186154c8372643d6f..1978a8cb1cb1cf82b79f3a57ba9163c2343c94af 100644 (file)
@@ -1991,6 +1991,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
 retry:
        spin_lock(&ci->i_ceph_lock);
        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
+               spin_unlock(&ci->i_ceph_lock);
                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
                goto out;
        }
@@ -2008,8 +2009,10 @@ retry:
                        mutex_lock(&session->s_mutex);
                        goto retry;
                }
-               if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+               if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+                       spin_unlock(&ci->i_ceph_lock);
                        goto out;
+               }
 
                flushing = __mark_caps_flushing(inode, session, true,
                                                &flush_tid, &oldest_flush_tid);
index 9dd6b836ac9e5b91d60a439995695d72c06c97e5..f23c820daaedc6e460982d80d55a653a2379debf 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/sched.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
-#include <linux/utsname.h>
 #include <linux/ratelimit.h>
 
 #include "super.h"
@@ -735,12 +734,13 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                        inode = req->r_inode;
                        ihold(inode);
                } else {
-                       /* req->r_dentry is non-null for LSSNAP request.
-                        * fall-thru */
-                       WARN_ON_ONCE(!req->r_dentry);
+                       /* req->r_dentry is non-null for LSSNAP request */
+                       rcu_read_lock();
+                       inode = get_nonsnap_parent(req->r_dentry);
+                       rcu_read_unlock();
+                       dout("__choose_mds using snapdir's parent %p\n", inode);
                }
-       }
-       if (!inode && req->r_dentry) {
+       } else if (req->r_dentry) {
                /* ignore race with rename; old or new d_parent is okay */
                struct dentry *parent;
                struct inode *dir;
@@ -884,8 +884,8 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        void *p;
 
        const char* metadata[][2] = {
-               {"hostname", utsname()->nodename},
-               {"kernel_version", utsname()->release},
+               {"hostname", mdsc->nodename},
+               {"kernel_version", init_utsname()->release},
                {"entity_id", opt->name ? : ""},
                {"root", fsopt->server_path ? : "/"},
                {NULL, NULL}
@@ -3539,6 +3539,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        init_rwsem(&mdsc->pool_perm_rwsem);
        mdsc->pool_perm_tree = RB_ROOT;
 
+       strncpy(mdsc->nodename, utsname()->nodename,
+               sizeof(mdsc->nodename) - 1);
        return 0;
 }
 
index db57ae98ed345e280358a2ac65e75ac6c71e7c2b..636d6b2ec49cbab44954dc8e3e7fb93616e6756d 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/refcount.h>
+#include <linux/utsname.h>
 
 #include <linux/ceph/types.h>
 #include <linux/ceph/messenger.h>
@@ -368,6 +369,8 @@ struct ceph_mds_client {
 
        struct rw_semaphore     pool_perm_rwsem;
        struct rb_root          pool_perm_tree;
+
+       char nodename[__NEW_UTS_LEN + 1];
 };
 
 extern const char *ceph_mds_op_name(int op);
index 1ffc8b426c1c4c9f444c5eda57cc803ca5016783..7fc0b850c35279646a6eb48a595870a2ce9d6cfd 100644 (file)
@@ -374,12 +374,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
             realm->ino, realm, snapc, snapc->seq,
             (unsigned int) snapc->num_snaps);
 
-       if (realm->cached_context) {
-               ceph_put_snap_context(realm->cached_context);
-               /* queue realm for cap_snap creation */
-               list_add_tail(&realm->dirty_item, dirty_realms);
-       }
+       ceph_put_snap_context(realm->cached_context);
        realm->cached_context = snapc;
+       /* queue realm for cap_snap creation */
+       list_add_tail(&realm->dirty_item, dirty_realms);
        return 0;
 
 fail:
index f7243617316c072b71817fe951532c758b8de064..d5b2e12b5d02212046f0a7b816550737cc3b45af 100644 (file)
@@ -5,9 +5,14 @@ config CIFS
        select CRYPTO
        select CRYPTO_MD4
        select CRYPTO_MD5
+       select CRYPTO_SHA256
+       select CRYPTO_CMAC
        select CRYPTO_HMAC
        select CRYPTO_ARC4
+       select CRYPTO_AEAD2
+       select CRYPTO_CCM
        select CRYPTO_ECB
+       select CRYPTO_AES
        select CRYPTO_DES
        help
          This is the client VFS module for the SMB3 family of NAS protocols,
index 9727e1dcacd558a89e5fa0c206e82b6f0941a99c..cbb9534b89b40bc3f3cc3be31254485b976d2bcb 100644 (file)
@@ -160,8 +160,13 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
                        if ((ses->serverDomain == NULL) ||
                                (ses->serverOS == NULL) ||
                                (ses->serverNOS == NULL)) {
-                               seq_printf(m, "\n%d) entry for %s not fully "
-                                          "displayed\n\t", i, ses->serverName);
+                               seq_printf(m, "\n%d) Name: %s Uses: %d Capability: 0x%x\tSession Status: %d\t",
+                                       i, ses->serverName, ses->ses_count,
+                                       ses->capabilities, ses->status);
+                               if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
+                                       seq_printf(m, "Guest\t");
+                               else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
+                                       seq_printf(m, "Anonymous\t");
                        } else {
                                seq_printf(m,
                                    "\n%d) Name: %s  Domain: %s Uses: %d OS:"
index 180b3356ff8612dfc7f3cd702e350bb284ca1a0d..8c8b75d33f310ce5e258042ff489f942379cdd27 100644 (file)
@@ -461,6 +461,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_puts(s, ",nocase");
        if (tcon->retry)
                seq_puts(s, ",hard");
+       else
+               seq_puts(s, ",soft");
        if (tcon->use_persistent)
                seq_puts(s, ",persistenthandles");
        else if (tcon->use_resilient)
@@ -1447,7 +1449,7 @@ exit_cifs(void)
        exit_cifs_idmap();
 #endif
 #ifdef CONFIG_CIFS_UPCALL
-       unregister_key_type(&cifs_spnego_key_type);
+       exit_cifs_spnego();
 #endif
        cifs_destroy_request_bufs();
        cifs_destroy_mids();
index 30bf89b1fd9a789ec02070f534d9d9f68b593650..5a10e566f0e610fcaadf0d949e307087f01ecc90 100644 (file)
@@ -149,5 +149,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.09"
+#define CIFS_VERSION   "2.10"
 #endif                         /* _CIFSFS_H */
index 808486c29f0dcb40dbda7a86e626d77908d82001..e185b2853eab7b1116dafc7ca8aeeb6d09b10687 100644 (file)
@@ -188,6 +188,8 @@ enum smb_version {
 #ifdef CONFIG_CIFS_SMB311
        Smb_311,
 #endif /* SMB311 */
+       Smb_3any,
+       Smb_default,
        Smb_version_err
 };
 
@@ -659,7 +661,9 @@ struct TCP_Server_Info {
 #endif
        unsigned int    max_read;
        unsigned int    max_write;
-       __u8            preauth_hash[512];
+#ifdef CONFIG_CIFS_SMB311
+       __u8    preauth_sha_hash[64]; /* save initital negprot hash */
+#endif /* 3.1.1 */
        struct delayed_work reconnect; /* reconnect workqueue job */
        struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
        unsigned long echo_interval;
@@ -847,7 +851,9 @@ struct cifs_ses {
        __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
        __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
        __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
-       __u8 preauth_hash[512];
+#ifdef CONFIG_CIFS_SMB311
+       __u8 preauth_sha_hash[64];
+#endif /* 3.1.1 */
 };
 
 static inline bool
@@ -1701,6 +1707,10 @@ extern struct smb_version_values smb20_values;
 #define SMB21_VERSION_STRING   "2.1"
 extern struct smb_version_operations smb21_operations;
 extern struct smb_version_values smb21_values;
+#define SMBDEFAULT_VERSION_STRING "default"
+extern struct smb_version_values smbdefault_values;
+#define SMB3ANY_VERSION_STRING "3"
+extern struct smb_version_values smb3any_values;
 #define SMB30_VERSION_STRING   "3.0"
 extern struct smb_version_operations smb30_operations;
 extern struct smb_version_values smb30_values;
index 5aa2d278ca841f7aa7d4de0bb892dbdfc7a48645..0bfc2280436df86c7414288b5cb4afefb401127d 100644 (file)
@@ -301,6 +301,8 @@ static const match_table_t cifs_smb_version_tokens = {
        { Smb_311, SMB311_VERSION_STRING },
        { Smb_311, ALT_SMB311_VERSION_STRING },
 #endif /* SMB311 */
+       { Smb_3any, SMB3ANY_VERSION_STRING },
+       { Smb_default, SMBDEFAULT_VERSION_STRING },
        { Smb_version_err, NULL }
 };
 
@@ -1148,6 +1150,14 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol)
                vol->vals = &smb311_values;
                break;
 #endif /* SMB311 */
+       case Smb_3any:
+               vol->ops = &smb30_operations; /* currently identical with 3.0 */
+               vol->vals = &smb3any_values;
+               break;
+       case Smb_default:
+               vol->ops = &smb30_operations; /* currently identical with 3.0 */
+               vol->vals = &smbdefault_values;
+               break;
        default:
                cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value);
                return 1;
@@ -1274,9 +1284,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 
        vol->actimeo = CIFS_DEF_ACTIMEO;
 
-       /* FIXME: add autonegotiation for SMB3 or later rather than just SMB3 */
-       vol->ops = &smb30_operations; /* both secure and accepted widely */
-       vol->vals = &smb30_values;
+       /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
+       vol->ops = &smb30_operations;
+       vol->vals = &smbdefault_values;
 
        vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
 
@@ -1988,11 +1998,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 
        if (got_version == false)
                pr_warn("No dialect specified on mount. Default has changed to "
-                       "a more secure dialect, SMB3 (vers=3.0), from CIFS "
+                       "a more secure dialect, SMB2.1 or later (e.g. SMB3), from CIFS "
                        "(SMB1). To use the less secure SMB1 dialect to access "
-                       "old servers which do not support SMB3 specify vers=1.0"
-                       " on mount. For somewhat newer servers such as Windows "
-                       "7 try vers=2.1.\n");
+                       "old servers which do not support SMB3 (or SMB2.1) specify vers=1.0"
+                       " on mount.\n");
 
        kfree(mountdata_copy);
        return 0;
@@ -2133,6 +2142,7 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
        if (vol->nosharesock)
                return 0;
 
+       /* BB update this for smb3any and default case */
        if ((server->vals != vol->vals) || (server->ops != vol->ops))
                return 0;
 
@@ -4144,6 +4154,14 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
        cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
                 server->sec_mode, server->capabilities, server->timeAdj);
 
+       if (ses->auth_key.response) {
+               cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
+                        ses->auth_key.response);
+               kfree(ses->auth_key.response);
+               ses->auth_key.response = NULL;
+               ses->auth_key.len = 0;
+       }
+
        if (server->ops->sess_setup)
                rc = server->ops->sess_setup(xid, ses, nls_info);
 
index 0786f19d288f8e5e5defe2c7a97a26a88c646e87..92fdf9c35de2153fd6f008c4d1306ead6d806c89 100644 (file)
@@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
        if (backup_cred(cifs_sb))
                create_options |= CREATE_OPEN_BACKUP_INTENT;
 
+       /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+       if (f_flags & O_SYNC)
+               create_options |= CREATE_WRITE_THROUGH;
+
+       if (f_flags & O_DIRECT)
+               create_options |= CREATE_NO_BUFFER;
+
        oparms.tcon = tcon;
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = desired_access;
@@ -1102,8 +1109,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        struct cifs_tcon *tcon;
        unsigned int num, max_num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
-       int types[] = {LOCKING_ANDX_LARGE_FILES,
-                      LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
+       static const int types[] = {
+               LOCKING_ANDX_LARGE_FILES,
+               LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
+       };
        int i;
 
        xid = get_xid();
@@ -1434,8 +1443,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
                  unsigned int xid)
 {
        int rc = 0, stored_rc;
-       int types[] = {LOCKING_ANDX_LARGE_FILES,
-                      LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
+       static const int types[] = {
+               LOCKING_ANDX_LARGE_FILES,
+               LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
+       };
        unsigned int i;
        unsigned int max_num, num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
index a8693632235f02b349db28e8d77aa45e30d4c783..7c732cb4416411e597f2e1a4af96fd8bf7e49beb 100644 (file)
@@ -234,6 +234,8 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
        fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
        fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
        fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
+       /* old POSIX extensions don't get create time */
+
        fattr->cf_mode = le64_to_cpu(info->Permissions);
 
        /*
@@ -2024,6 +2026,19 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
        stat->blksize = CIFS_MAX_MSGSIZE;
        stat->ino = CIFS_I(inode)->uniqueid;
 
+       /* old CIFS Unix Extensions doesn't return create time */
+       if (CIFS_I(inode)->createtime) {
+               stat->result_mask |= STATX_BTIME;
+               stat->btime =
+                     cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime));
+       }
+
+       stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED);
+       if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_COMPRESSED)
+               stat->attributes |= STATX_ATTR_COMPRESSED;
+       if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_ENCRYPTED)
+               stat->attributes |= STATX_ATTR_ENCRYPTED;
+
        /*
         * If on a multiuser mount without unix extensions or cifsacl being
         * enabled, and the admin hasn't overridden them, set the ownership
index 7ca9808a0daa01bfb149690964bbd165d9c6af20..62c88dfed57b3f66fff931f4abbcc8126ca4cf11 100644 (file)
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
        {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
        {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
-       {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
+       {STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"},
        {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
        {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
        {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
index fb2934b9b97cf56f16359702a7bcb81cd6624252..bdb963d0ba32069035bdd23c9046985b41feb2bb 100644 (file)
@@ -426,6 +426,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
        return rc;
 }
 
+#ifdef CONFIG_CIFS_XATTR
 static ssize_t
 move_smb2_ea_to_cifs(char *dst, size_t dst_size,
                     struct smb2_file_full_ea_info *src, size_t src_size,
@@ -521,6 +522,7 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
        struct smb2_file_full_ea_info *smb2_data;
+       int ea_buf_size = SMB2_MIN_EA_BUF;
 
        utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
        if (!utf16_path)
@@ -540,14 +542,32 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
                return rc;
        }
 
-       smb2_data = kzalloc(SMB2_MAX_EA_BUF, GFP_KERNEL);
-       if (smb2_data == NULL) {
-               SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
-               return -ENOMEM;
+       while (1) {
+               smb2_data = kzalloc(ea_buf_size, GFP_KERNEL);
+               if (smb2_data == NULL) {
+                       SMB2_close(xid, tcon, fid.persistent_fid,
+                                  fid.volatile_fid);
+                       return -ENOMEM;
+               }
+
+               rc = SMB2_query_eas(xid, tcon, fid.persistent_fid,
+                                   fid.volatile_fid,
+                                   ea_buf_size, smb2_data);
+
+               if (rc != -E2BIG)
+                       break;
+
+               kfree(smb2_data);
+               ea_buf_size <<= 1;
+
+               if (ea_buf_size > SMB2_MAX_EA_BUF) {
+                       cifs_dbg(VFS, "EA size is too large\n");
+                       SMB2_close(xid, tcon, fid.persistent_fid,
+                                  fid.volatile_fid);
+                       return -ENOMEM;
+               }
        }
 
-       rc = SMB2_query_eas(xid, tcon, fid.persistent_fid, fid.volatile_fid,
-                           smb2_data);
        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
 
        if (!rc)
@@ -613,6 +633,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
 
        return rc;
 }
+#endif
 
 static bool
 smb2_can_echo(struct TCP_Server_Info *server)
@@ -3110,6 +3131,46 @@ struct smb_version_values smb21_values = {
        .create_lease_size = sizeof(struct create_lease),
 };
 
+struct smb_version_values smb3any_values = {
+       .version_string = SMB3ANY_VERSION_STRING,
+       .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .large_lock_type = 0,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+       .header_size = sizeof(struct smb2_hdr),
+       .max_header_size = MAX_SMB2_HDR_SIZE,
+       .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+       .lock_cmd = SMB2_LOCK,
+       .cap_unix = 0,
+       .cap_nt_find = SMB2_NT_FIND,
+       .cap_large_files = SMB2_LARGE_FILES,
+       .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+       .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+       .create_lease_size = sizeof(struct create_lease_v2),
+};
+
+struct smb_version_values smbdefault_values = {
+       .version_string = SMBDEFAULT_VERSION_STRING,
+       .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .large_lock_type = 0,
+       .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
+       .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
+       .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+       .header_size = sizeof(struct smb2_hdr),
+       .max_header_size = MAX_SMB2_HDR_SIZE,
+       .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+       .lock_cmd = SMB2_LOCK,
+       .cap_unix = 0,
+       .cap_nt_find = SMB2_NT_FIND,
+       .cap_large_files = SMB2_LARGE_FILES,
+       .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+       .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+       .create_lease_size = sizeof(struct create_lease_v2),
+};
+
 struct smb_version_values smb30_values = {
        .version_string = SMB30_VERSION_STRING,
        .protocol_id = SMB30_PROT_ID,
index 5531e7ee1210eba48915a6a0d4d01b348b386cde..5331631386a23bd4a7458ecb5fb96efe1773cf71 100644 (file)
@@ -439,7 +439,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req)
        build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
        req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
        req->NegotiateContextCount = cpu_to_le16(2);
-       inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
+       inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
                        + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
 }
 #else
@@ -491,10 +491,25 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 
        req->hdr.sync_hdr.SessionId = 0;
 
-       req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
-
-       req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
-       inc_rfc1001_len(req, 2);
+       if (strcmp(ses->server->vals->version_string,
+                  SMB3ANY_VERSION_STRING) == 0) {
+               req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
+               req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
+               req->DialectCount = cpu_to_le16(2);
+               inc_rfc1001_len(req, 4);
+       } else if (strcmp(ses->server->vals->version_string,
+                  SMBDEFAULT_VERSION_STRING) == 0) {
+               req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
+               req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
+               req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
+               req->DialectCount = cpu_to_le16(3);
+               inc_rfc1001_len(req, 6);
+       } else {
+               /* otherwise send specific dialect */
+               req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
+               req->DialectCount = cpu_to_le16(1);
+               inc_rfc1001_len(req, 2);
+       }
 
        /* only one of SMB2 signing flags may be set in SMB2 request */
        if (ses->sign)
@@ -528,16 +543,43 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
         */
        if (rc == -EOPNOTSUPP) {
                cifs_dbg(VFS, "Dialect not supported by server. Consider "
-                       "specifying vers=1.0 or vers=2.1 on mount for accessing"
+                       "specifying vers=1.0 or vers=2.0 on mount for accessing"
                        " older servers\n");
                goto neg_exit;
        } else if (rc != 0)
                goto neg_exit;
 
+       if (strcmp(ses->server->vals->version_string,
+                  SMB3ANY_VERSION_STRING) == 0) {
+               if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
+                       cifs_dbg(VFS,
+                               "SMB2 dialect returned but not requested\n");
+                       return -EIO;
+               } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
+                       cifs_dbg(VFS,
+                               "SMB2.1 dialect returned but not requested\n");
+                       return -EIO;
+               }
+       } else if (strcmp(ses->server->vals->version_string,
+                  SMBDEFAULT_VERSION_STRING) == 0) {
+               if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
+                       cifs_dbg(VFS,
+                               "SMB2 dialect returned but not requested\n");
+                       return -EIO;
+               } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
+                       /* ops set to 3.0 by default for default so update */
+                       ses->server->ops = &smb21_operations;
+               }
+       } else if (le16_to_cpu(rsp->DialectRevision) !=
+                               ses->server->vals->protocol_id) {
+               /* if requested single dialect ensure returned dialect matched */
+               cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
+                       le16_to_cpu(rsp->DialectRevision));
+               return -EIO;
+       }
+
        cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
 
-       /* BB we may eventually want to match the negotiated vs. requested
-          dialect, even though we are only requesting one at a time */
        if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
                cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
        else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
@@ -558,6 +600,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        }
        server->dialect = le16_to_cpu(rsp->DialectRevision);
 
+       /* BB: add check that dialect was valid given dialect(s) we asked for */
+
        /* SMB2 only has an extended negflavor */
        server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
        /* set it to the maximum buffer size value we can send with 1 credit */
@@ -604,22 +648,30 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 {
        int rc = 0;
        struct validate_negotiate_info_req vneg_inbuf;
-       struct validate_negotiate_info_rsp *pneg_rsp;
+       struct validate_negotiate_info_rsp *pneg_rsp = NULL;
        u32 rsplen;
+       u32 inbuflen; /* max of 4 dialects */
 
        cifs_dbg(FYI, "validate negotiate\n");
 
        /*
         * validation ioctl must be signed, so no point sending this if we
-        * can not sign it.  We could eventually change this to selectively
+        * can not sign it (ie are not known user).  Even if signing is not
+        * required (enabled but not negotiated), in those cases we selectively
         * sign just this, the first and only signed request on a connection.
-        * This is good enough for now since a user who wants better security
-        * would also enable signing on the mount. Having validation of
-        * negotiate info for signed connections helps reduce attack vectors
+        * Having validation of negotiate info  helps reduce attack vectors.
         */
-       if (tcon->ses->server->sign == false)
+       if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
                return 0; /* validation requires signing */
 
+       if (tcon->ses->user_name == NULL) {
+               cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
+               return 0; /* validation requires signing */
+       }
+
+       if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
+               cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
+
        vneg_inbuf.Capabilities =
                        cpu_to_le32(tcon->ses->server->vals->req_capabilities);
        memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
@@ -634,9 +686,30 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
        else
                vneg_inbuf.SecurityMode = 0;
 
-       vneg_inbuf.DialectCount = cpu_to_le16(1);
-       vneg_inbuf.Dialects[0] =
-               cpu_to_le16(tcon->ses->server->vals->protocol_id);
+
+       if (strcmp(tcon->ses->server->vals->version_string,
+               SMB3ANY_VERSION_STRING) == 0) {
+               vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
+               vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
+               vneg_inbuf.DialectCount = cpu_to_le16(2);
+               /* structure is big enough for 3 dialects, sending only 2 */
+               inbuflen = sizeof(struct validate_negotiate_info_req) - 2;
+       } else if (strcmp(tcon->ses->server->vals->version_string,
+               SMBDEFAULT_VERSION_STRING) == 0) {
+               vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
+               vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
+               vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
+               vneg_inbuf.DialectCount = cpu_to_le16(3);
+               /* structure is big enough for 3 dialects */
+               inbuflen = sizeof(struct validate_negotiate_info_req);
+       } else {
+               /* otherwise specific dialect was requested */
+               vneg_inbuf.Dialects[0] =
+                       cpu_to_le16(tcon->ses->server->vals->protocol_id);
+               vneg_inbuf.DialectCount = cpu_to_le16(1);
+               /* structure is big enough for 3 dialects, sending only 1 */
+               inbuflen = sizeof(struct validate_negotiate_info_req) - 4;
+       }
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
@@ -654,8 +727,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
                         rsplen);
 
                /* relax check since Mac returns max bufsize allowed on ioctl */
-               if (rsplen > CIFSMaxBufSize)
-                       return -EIO;
+               if ((rsplen > CIFSMaxBufSize)
+                    || (rsplen < sizeof(struct validate_negotiate_info_rsp)))
+                       goto err_rsp_free;
        }
 
        /* check validate negotiate info response matches what we got earlier */
@@ -674,10 +748,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 
        /* validate negotiate successful */
        cifs_dbg(FYI, "validate negotiate info successful\n");
+       kfree(pneg_rsp);
        return 0;
 
 vneg_out:
        cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
+err_rsp_free:
+       kfree(pneg_rsp);
        return -EIO;
 }
 
@@ -1110,6 +1187,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
        while (sess_data->func)
                sess_data->func(sess_data);
 
+       if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
+               cifs_dbg(VFS, "signing requested but authenticated as guest\n");
        rc = sess_data->result;
 out:
        kfree(sess_data);
@@ -1180,7 +1259,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        struct smb2_tree_connect_req *req;
        struct smb2_tree_connect_rsp *rsp = NULL;
        struct kvec iov[2];
-       struct kvec rsp_iov;
+       struct kvec rsp_iov = { NULL, 0 };
        int rc = 0;
        int resp_buftype;
        int unc_path_len;
@@ -1297,7 +1376,7 @@ tcon_exit:
        return rc;
 
 tcon_error_exit:
-       if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
+       if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
                cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
        }
        goto tcon_exit;
@@ -1634,7 +1713,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        struct cifs_tcon *tcon = oparms->tcon;
        struct cifs_ses *ses = tcon->ses;
        struct kvec iov[4];
-       struct kvec rsp_iov;
+       struct kvec rsp_iov = {NULL, 0};
        int resp_buftype;
        int uni_path_len;
        __le16 *copy_path = NULL;
@@ -1763,7 +1842,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
 
        if (rc != 0) {
                cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
-               if (err_buf)
+               if (err_buf && rsp)
                        *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
                                           GFP_KERNEL);
                goto creat_exit;
@@ -1900,6 +1979,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        } else
                iov[0].iov_len = get_rfc1002_length(req) + 4;
 
+       /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
+       if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
+               req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
        rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
@@ -2116,9 +2198,13 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        req->PersistentFileId = persistent_fid;
        req->VolatileFileId = volatile_fid;
        req->AdditionalInformation = cpu_to_le32(additional_info);
-       /* 4 for rfc1002 length field and 1 for Buffer */
-       req->InputBufferOffset =
-               cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
+
+       /*
+        * We do not use the input buffer (do not send extra byte)
+        */
+       req->InputBufferOffset = 0;
+       inc_rfc1001_len(req, -1);
+
        req->OutputBufferLength = cpu_to_le32(output_len);
 
        iov[0].iov_base = (char *)req;
@@ -2158,12 +2244,12 @@ qinf_exit:
 }
 
 int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
-       u64 persistent_fid, u64 volatile_fid,
-       struct smb2_file_full_ea_info *data)
+                  u64 persistent_fid, u64 volatile_fid,
+                  int ea_buf_size, struct smb2_file_full_ea_info *data)
 {
        return query_info(xid, tcon, persistent_fid, volatile_fid,
                          FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0,
-                         SMB2_MAX_EA_BUF,
+                         ea_buf_size,
                          sizeof(struct smb2_file_full_ea_info),
                          (void **)&data,
                          NULL);
index 393ed5f4e1b6516b40f30f64890a385a43b44c9a..c2ec934be96891a645537140df279b0fb3e2fcbe 100644 (file)
@@ -716,7 +716,7 @@ struct validate_negotiate_info_req {
        __u8   Guid[SMB2_CLIENT_GUID_SIZE];
        __le16 SecurityMode;
        __le16 DialectCount;
-       __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
+       __le16 Dialects[3]; /* BB expand this if autonegotiate > 3 dialects */
 } __packed;
 
 struct validate_negotiate_info_rsp {
@@ -832,7 +832,7 @@ struct smb2_flush_rsp {
 /* Channel field for read and write: exactly one of following flags can be set*/
 #define SMB2_CHANNEL_NONE              0x00000000
 #define SMB2_CHANNEL_RDMA_V1           0x00000001 /* SMB3 or later */
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */
+#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */
 
 /* SMB2 read request without RFC1001 length at the beginning */
 struct smb2_read_plain_req {
@@ -1178,7 +1178,8 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
        char   FileName[0];     /* Name to be assigned to new link */
 } __packed; /* level 11 Set */
 
-#define SMB2_MAX_EA_BUF 2048
+#define SMB2_MIN_EA_BUF  2048
+#define SMB2_MAX_EA_BUF 65536
 
 struct smb2_file_full_ea_info { /* encoding of response for level 15 */
        __le32 next_entry_offset;
index 003217099ef3e6831a36ed81f7c13f1f611efb9d..e9ab5227e7a8ac3d69e69afe678cee78566056a8 100644 (file)
@@ -134,6 +134,7 @@ extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
                      u64 persistent_file_id, u64 volatile_file_id);
 extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
                          u64 persistent_file_id, u64 volatile_file_id,
+                         int ea_buf_size,
                          struct smb2_file_full_ea_info *data);
 extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
                           u64 persistent_file_id, u64 volatile_file_id,
index 67367cf1f8cd2391359ff041836f0ae34a2091f4..99493946e2f9abcf02673bf685238b0313da4180 100644 (file)
@@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses)
        return generate_smb3signingkey(ses, &triplet);
 }
 
+#ifdef CONFIG_CIFS_SMB311
 int
 generate_smb311signingkey(struct cifs_ses *ses)
 
@@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses)
        struct derivation *d;
 
        d = &triplet.signing;
-       d->label.iov_base = "SMB2AESCMAC";
-       d->label.iov_len = 12;
-       d->context.iov_base = "SmbSign";
-       d->context.iov_len = 8;
+       d->label.iov_base = "SMBSigningKey";
+       d->label.iov_len = 14;
+       d->context.iov_base = ses->preauth_sha_hash;
+       d->context.iov_len = 64;
 
        d = &triplet.encryption;
-       d->label.iov_base = "SMB2AESCCM";
-       d->label.iov_len = 11;
-       d->context.iov_base = "ServerIn ";
-       d->context.iov_len = 10;
+       d->label.iov_base = "SMBC2SCipherKey";
+       d->label.iov_len = 16;
+       d->context.iov_base = ses->preauth_sha_hash;
+       d->context.iov_len = 64;
 
        d = &triplet.decryption;
-       d->label.iov_base = "SMB2AESCCM";
-       d->label.iov_len = 11;
-       d->context.iov_base = "ServerOut";
-       d->context.iov_len = 10;
+       d->label.iov_base = "SMBS2CCipherKey";
+       d->label.iov_len = 16;
+       d->context.iov_base = ses->preauth_sha_hash;
+       d->context.iov_len = 64;
 
        return generate_smb3signingkey(ses, &triplet);
 }
+#endif /* 311 */
 
 int
 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
index 018c588c7ac3b7ac8fd78b4092332f771c6411c0..8e704d12a1cf2781087ec14d6ef6561ea9dd3236 100644 (file)
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
                goto out;
        }
        ukp = user_key_payload_locked(keyring_key);
+       if (!ukp) {
+               /* key was revoked before we acquired its semaphore */
+               res = -EKEYREVOKED;
+               goto out;
+       }
        if (ukp->datalen != sizeof(struct fscrypt_key)) {
                res = -EINVAL;
                goto out;
index 5fa2211e49aee2186546d8db7a70c5295c7591f0..b53e66d9abd7030f6b05a6dac4847928c24bf1a0 100644 (file)
  */
 #define DIO_PAGES      64
 
+/*
+ * Flags for dio_complete()
+ */
+#define DIO_COMPLETE_ASYNC             0x01    /* This is async IO */
+#define DIO_COMPLETE_INVALIDATE                0x02    /* Can invalidate pages */
+
 /*
  * This code generally works in units of "dio_blocks".  A dio_block is
  * somewhere between the hard sector size and the filesystem block size.  it
@@ -225,10 +231,11 @@ static inline struct page *dio_get_page(struct dio *dio,
  * filesystems can use it to hold additional state between get_block calls and
  * dio_complete.
  */
-static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
+static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
 {
        loff_t offset = dio->iocb->ki_pos;
        ssize_t transferred = 0;
+       int err;
 
        /*
         * AIO submission can race with bio completion to get here while
@@ -259,18 +266,37 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
                ret = transferred;
 
        if (dio->end_io) {
-               int err;
-
                // XXX: ki_pos??
                err = dio->end_io(dio->iocb, offset, ret, dio->private);
                if (err)
                        ret = err;
        }
 
+       /*
+        * Try again to invalidate clean pages which might have been cached by
+        * non-direct readahead, or faulted in by get_user_pages() if the source
+        * of the write was an mmap'ed region of the file we're writing.  Either
+        * one is a pretty crazy thing to do, so we don't support it 100%.  If
+        * this invalidation fails, tough, the write still worked...
+        *
+        * And this page cache invalidation has to be after dio->end_io(), as
+        * some filesystems convert unwritten extents to real allocations in
+        * end_io() when necessary, otherwise a racing buffer read would cache
+        * zeros from unwritten extents.
+        */
+       if (flags & DIO_COMPLETE_INVALIDATE &&
+           ret > 0 && dio->op == REQ_OP_WRITE &&
+           dio->inode->i_mapping->nrpages) {
+               err = invalidate_inode_pages2_range(dio->inode->i_mapping,
+                                       offset >> PAGE_SHIFT,
+                                       (offset + ret - 1) >> PAGE_SHIFT);
+               WARN_ON_ONCE(err);
+       }
+
        if (!(dio->flags & DIO_SKIP_DIO_COUNT))
                inode_dio_end(dio->inode);
 
-       if (is_async) {
+       if (flags & DIO_COMPLETE_ASYNC) {
                /*
                 * generic_write_sync expects ki_pos to have been updated
                 * already, but the submission path only does this for
@@ -291,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work)
 {
        struct dio *dio = container_of(work, struct dio, complete_work);
 
-       dio_complete(dio, 0, true);
+       dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
 }
 
 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -304,6 +330,7 @@ static void dio_bio_end_aio(struct bio *bio)
        struct dio *dio = bio->bi_private;
        unsigned long remaining;
        unsigned long flags;
+       bool defer_completion = false;
 
        /* cleanup the bio */
        dio_bio_complete(dio, bio);
@@ -315,12 +342,24 @@ static void dio_bio_end_aio(struct bio *bio)
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
        if (remaining == 0) {
-               if (dio->result && dio->defer_completion) {
+               /*
+                * Defer completion when defer_completion is set or
+                * when the inode has pages mapped and this is AIO write.
+                * We need to invalidate those pages because there is a
+                * chance they contain stale data in the case buffered IO
+                * went in between AIO submission and completion into the
+                * same region.
+                */
+               if (dio->result)
+                       defer_completion = dio->defer_completion ||
+                                          (dio->op == REQ_OP_WRITE &&
+                                           dio->inode->i_mapping->nrpages);
+               if (defer_completion) {
                        INIT_WORK(&dio->complete_work, dio_aio_complete_work);
                        queue_work(dio->inode->i_sb->s_dio_done_wq,
                                   &dio->complete_work);
                } else {
-                       dio_complete(dio, 0, true);
+                       dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
                }
        }
 }
@@ -838,7 +877,8 @@ out:
         */
        if (sdio->boundary) {
                ret = dio_send_cur_page(dio, sdio, map_bh);
-               dio_bio_submit(dio, sdio);
+               if (sdio->bio)
+                       dio_bio_submit(dio, sdio);
                put_page(sdio->cur_page);
                sdio->cur_page = NULL;
        }
@@ -1210,10 +1250,19 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
         * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
         * so that we can call ->fsync.
         */
-       if (dio->is_async && iov_iter_rw(iter) == WRITE &&
-           ((iocb->ki_filp->f_flags & O_DSYNC) ||
-            IS_SYNC(iocb->ki_filp->f_mapping->host))) {
-               retval = dio_set_defer_completion(dio);
+       if (dio->is_async && iov_iter_rw(iter) == WRITE) {
+               retval = 0;
+               if ((iocb->ki_filp->f_flags & O_DSYNC) ||
+                   IS_SYNC(iocb->ki_filp->f_mapping->host))
+                       retval = dio_set_defer_completion(dio);
+               else if (!dio->inode->i_sb->s_dio_done_wq) {
+                       /*
+                        * In case of AIO write racing with buffered read we
+                        * need to defer completion. We can't decide this now,
+                        * however the workqueue needs to be initialized here.
+                        */
+                       retval = sb_init_dio_done_wq(dio->inode->i_sb);
+               }
                if (retval) {
                        /*
                         * We grab i_mutex only for reads so we don't have
@@ -1322,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                dio_await_completion(dio);
 
        if (drop_refcount(dio) == 0) {
-               retval = dio_complete(dio, retval, false);
+               retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
        } else
                BUG_ON(retval != -EIOCBQUEUED);
 
index 9c351bf757b20e037f39aeadf0fa0ed12f963db6..3fbc0ff79699dde5cca295262591b2cb5aafcced 100644 (file)
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
 static inline struct ecryptfs_auth_tok *
 ecryptfs_get_encrypted_key_payload_data(struct key *key)
 {
-       if (key->type == &key_type_encrypted)
-               return (struct ecryptfs_auth_tok *)
-                       (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
-       else
+       struct encrypted_key_payload *payload;
+
+       if (key->type != &key_type_encrypted)
                return NULL;
+
+       payload = key->payload.data[0];
+       if (!payload)
+               return ERR_PTR(-EKEYREVOKED);
+
+       return (struct ecryptfs_auth_tok *)payload->payload_data;
 }
 
 static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
 ecryptfs_get_key_payload_data(struct key *key)
 {
        struct ecryptfs_auth_tok *auth_tok;
+       struct user_key_payload *ukp;
 
        auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
-       if (!auth_tok)
-               return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
-       else
+       if (auth_tok)
                return auth_tok;
+
+       ukp = user_key_payload_locked(key);
+       if (!ukp)
+               return ERR_PTR(-EKEYREVOKED);
+
+       return (struct ecryptfs_auth_tok *)ukp->data;
 }
 
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
index 3cf1546dca8257677572db866f86e3f2190a918e..fa218cd64f746d2c924a786edc1c6052911c1732 100644 (file)
@@ -459,7 +459,8 @@ out:
  * @auth_tok_key: key containing the authentication token
  * @auth_tok: authentication token
  *
- * Returns zero on valid auth tok; -EINVAL otherwise
+ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
+ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
  */
 static int
 ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
        int rc = 0;
 
        (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+       if (IS_ERR(*auth_tok)) {
+               rc = PTR_ERR(*auth_tok);
+               *auth_tok = NULL;
+               goto out;
+       }
+
        if (ecryptfs_verify_version((*auth_tok)->version)) {
                printk(KERN_ERR "Data structure version mismatch. Userspace "
                       "tools must match eCryptfs kernel module with major "
index ac34d972468489cfc9e5e0b70f863296f82ed246..3e14ba25f678bf8869e005a34dc9742e117e90df 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1410,7 +1410,7 @@ static void free_bprm(struct linux_binprm *bprm)
        kfree(bprm);
 }
 
-int bprm_change_interp(char *interp, struct linux_binprm *bprm)
+int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
 {
        /* If a binfmt changed the interp, free it first. */
        if (bprm->interp != bprm->filename)
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename,
        /* execve succeeded */
        current->fs->in_exec = 0;
        current->in_execve = 0;
+       membarrier_execve(current);
        acct_update_integrals(current);
        task_numa_free(current);
        free_bprm(bprm);
index b104096fce9eeec4e6079d3d1b05dd8b83fe796b..b0915b734a3817a811b62b41f677f7a1a6228c9f 100644 (file)
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
                return 1;
        case Opt_i_version:
-               sb->s_flags |= MS_I_VERSION;
+               sb->s_flags |= SB_I_VERSION;
                return 1;
        case Opt_lazytime:
                sb->s_flags |= MS_LAZYTIME;
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
                SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
        if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
                SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
-       if (sb->s_flags & MS_I_VERSION)
+       if (sb->s_flags & SB_I_VERSION)
                SEQ_OPTS_PUTS("i_version");
        if (nodefs || sbi->s_stripe)
                SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
index 9a7c903869477835afe192e15b064c6832297520..4b4a72f392be4be76575d9d485cce0ced2adfdfe 100644 (file)
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
 void stop_discard_thread(struct f2fs_sb_info *sbi);
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
 void release_discard_addrs(struct f2fs_sb_info *sbi);
 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
index 621b9b3d320bba293f68b5eaf74b91aa364dd567..c695ff462ee6990f1abd4d27458b9ad4bf89c025 100644 (file)
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
 }
 
 /* This comes from f2fs_put_super and f2fs_trim_fs */
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
 {
        __issue_discard_cmd(sbi, false);
        __drop_discard_cmd(sbi);
-       __wait_discard_cmd(sbi, false);
+       __wait_discard_cmd(sbi, !umount);
 }
 
 static void mark_discard_range_all(struct f2fs_sb_info *sbi)
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
        }
        /* It's time to issue all the filed discards */
        mark_discard_range_all(sbi);
-       f2fs_wait_discard_bios(sbi);
+       f2fs_wait_discard_bios(sbi, false);
 out:
        range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
        return err;
index 89f61eb3d1671c3c5686a002adca8c0c427abd1c..933c3d529e6531c827c2237c97f816103c6d4ef0 100644 (file)
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb)
        }
 
        /* be sure to wait for any on-going discard commands */
-       f2fs_wait_discard_bios(sbi);
+       f2fs_wait_discard_bios(sbi, true);
 
        if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
                struct cp_control cpc = {
index 0491da3b28c36bbf786f1f1303d3bb44f6bfdb98..448a1119f0becff3d871a98d8e6cee9b673b2652 100644 (file)
@@ -749,7 +749,7 @@ static void send_sigio_to_task(struct task_struct *p,
                         * specific si_codes.  In that case use SI_SIGIO instead
                         * to remove the ambiguity.
                         */
-                       if (sig_specific_sicodes(signum))
+                       if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
                                si.si_code = SI_SIGIO;
 
                        /* Make sure we are called with one of the POLL_*
index b5ab06fabc60a3bd0a7a308f45e4cb5b850398c1..0438d4cd91ef74ee98f8990936d07c3bf8a67b30 100644 (file)
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
        rcu_read_lock();
 
        confkey = user_key_payload_rcu(key);
+       if (!confkey) {
+               /* key was revoked */
+               rcu_read_unlock();
+               key_put(key);
+               goto no_config;
+       }
+
        buf = confkey->data;
 
        for (len = confkey->datalen - 1; len >= 0; len--) {
index 622081b97426d5b439bb1db5ee57dc4c3eead253..24967382a7b15271ae0646c3cb5522b3e58c37d0 100644 (file)
@@ -1308,7 +1308,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
                        */
                        over = !dir_emit(ctx, dirent->name, dirent->namelen,
                                       dirent->ino, dirent->type);
-                       ctx->pos = dirent->off;
+                       if (!over)
+                               ctx->pos = dirent->off;
                }
 
                buf += reclen;
index 65c88379a3a14311cca68b8750d6bb0b9f107444..94a745acaef842eed799e45731bd12dd2ac0f7f7 100644 (file)
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        if (sb->s_flags & MS_MANDLOCK)
                goto err;
 
-       sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
+       sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
 
        if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
index 98e845b7841bda81b4bd9000f4ba027639861d27..11066d8647d29320dcb30d063536aa4f218cf5ec 100644 (file)
@@ -1945,13 +1945,9 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
 {
        struct gfs2_glock_iter *gi = seq->private;
        loff_t n = *pos;
-       int ret;
-
-       if (gi->last_pos <= *pos)
-               n = (*pos - gi->last_pos);
 
-       ret = rhashtable_walk_start(&gi->hti);
-       if (ret)
+       rhashtable_walk_enter(&gl_hash_table, &gi->hti);
+       if (rhashtable_walk_start(&gi->hti) != 0)
                return NULL;
 
        do {
@@ -1959,6 +1955,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
        } while (gi->gl && n--);
 
        gi->last_pos = *pos;
+
        return gi->gl;
 }
 
@@ -1970,6 +1967,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
        (*pos)++;
        gi->last_pos = *pos;
        gfs2_glock_iter_next(gi);
+
        return gi->gl;
 }
 
@@ -1980,6 +1978,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
 
        gi->gl = NULL;
        rhashtable_walk_stop(&gi->hti);
+       rhashtable_walk_exit(&gi->hti);
 }
 
 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -2042,12 +2041,10 @@ static int __gfs2_glocks_open(struct inode *inode, struct file *file,
                struct gfs2_glock_iter *gi = seq->private;
 
                gi->sdp = inode->i_private;
-               gi->last_pos = 0;
                seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
                if (seq->buf)
                        seq->size = GFS2_SEQ_GOODSIZE;
                gi->gl = NULL;
-               rhashtable_walk_enter(&gl_hash_table, &gi->hti);
        }
        return ret;
 }
@@ -2063,7 +2060,6 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
        struct gfs2_glock_iter *gi = seq->private;
 
        gi->gl = NULL;
-       rhashtable_walk_exit(&gi->hti);
        return seq_release_private(inode, file);
 }
 
index 269b24a01f3218ff9cdb9c1ef5254f3e5a64c3b1..d4801f8dd4fd55a111e647a810c3e0788829897d 100644 (file)
@@ -713,6 +713,8 @@ struct iomap_dio {
 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
 {
        struct kiocb *iocb = dio->iocb;
+       struct inode *inode = file_inode(iocb->ki_filp);
+       loff_t offset = iocb->ki_pos;
        ssize_t ret;
 
        if (dio->end_io) {
@@ -726,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
        if (likely(!ret)) {
                ret = dio->size;
                /* check for short read */
-               if (iocb->ki_pos + ret > dio->i_size &&
+               if (offset + ret > dio->i_size &&
                    !(dio->flags & IOMAP_DIO_WRITE))
-                       ret = dio->i_size - iocb->ki_pos;
+                       ret = dio->i_size - offset;
                iocb->ki_pos += ret;
        }
 
+       /*
+        * Try again to invalidate clean pages which might have been cached by
+        * non-direct readahead, or faulted in by get_user_pages() if the source
+        * of the write was an mmap'ed region of the file we're writing.  Either
+        * one is a pretty crazy thing to do, so we don't support it 100%.  If
+        * this invalidation fails, tough, the write still worked...
+        *
+        * And this page cache invalidation has to be after dio->end_io(), as
+        * some filesystems convert unwritten extents to real allocations in
+        * end_io() when necessary, otherwise a racing buffer read would cache
+        * zeros from unwritten extents.
+        */
+       if (!dio->error &&
+           (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
+               int err;
+               err = invalidate_inode_pages2_range(inode->i_mapping,
+                               offset >> PAGE_SHIFT,
+                               (offset + dio->size - 1) >> PAGE_SHIFT);
+               WARN_ON_ONCE(err);
+       }
+
        inode_dio_end(file_inode(iocb->ki_filp));
        kfree(dio);
 
@@ -993,6 +1016,13 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        WARN_ON_ONCE(ret);
        ret = 0;
 
+       if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
+           !inode->i_sb->s_dio_done_wq) {
+               ret = sb_init_dio_done_wq(inode->i_sb);
+               if (ret < 0)
+                       goto out_free_dio;
+       }
+
        inode_dio_begin(inode);
 
        blk_start_plug(&plug);
@@ -1015,13 +1045,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        if (ret < 0)
                iomap_dio_set_error(dio, ret);
 
-       if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
-                       !inode->i_sb->s_dio_done_wq) {
-               ret = sb_init_dio_done_wq(inode->i_sb);
-               if (ret < 0)
-                       iomap_dio_set_error(dio, ret);
-       }
-
        if (!atomic_dec_and_test(&dio->ref)) {
                if (!is_sync_kiocb(iocb))
                        return -EIOCBQUEUED;
@@ -1042,19 +1065,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
 
        ret = iomap_dio_complete(dio);
 
-       /*
-        * Try again to invalidate clean pages which might have been cached by
-        * non-direct readahead, or faulted in by get_user_pages() if the source
-        * of the write was an mmap'ed region of the file we're writing.  Either
-        * one is a pretty crazy thing to do, so we don't support it 100%.  If
-        * this invalidation fails, tough, the write still worked...
-        */
-       if (iov_iter_rw(iter) == WRITE) {
-               int err = invalidate_inode_pages2_range(mapping,
-                               start >> PAGE_SHIFT, end >> PAGE_SHIFT);
-               WARN_ON_ONCE(err);
-       }
-
        return ret;
 
 out_free_dio:
index db692f554158854d4fa683d116827594d6ffd978..447a24d77b894ef733412ba201cadcaa9a226f7e 100644 (file)
@@ -514,9 +514,11 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
        if (sbi->s_fmode != ISOFS_INVALID_MODE)
                seq_printf(m, ",fmode=%o", sbi->s_fmode);
 
+#ifdef CONFIG_JOLIET
        if (sbi->s_nls_iocharset &&
            strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
                seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
+#endif
        return 0;
 }
 
index 37bb77c1302c354adf0386a32e3b678fdc93f54c..c991faec70b928686714bd2679b8715985e4dc9a 100644 (file)
@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
                try_to_free_buffers(page);
 }
 
+/*
+ * For situations where we want to clean all buffers attached to a page.
+ * We don't need to calculate how many buffers are attached to the page,
+ * we just need to specify a number larger than the maximum number of buffers.
+ */
+void clean_page_buffers(struct page *page)
+{
+       clean_buffers(page, ~0U);
+}
+
 static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
                      void *data)
 {
@@ -605,10 +615,8 @@ alloc_new:
        if (bio == NULL) {
                if (first_unmapped == blocks_per_page) {
                        if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
-                                                               page, wbc)) {
-                               clean_buffers(page, first_unmapped);
+                                                               page, wbc))
                                goto out;
-                       }
                }
                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
                                BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
index 54059b142d6ba1153b8adab7114cd7587e0c3aad..d18deb4c410b24ed276c9b60c869f4c06b6ec20f 100644 (file)
@@ -468,7 +468,9 @@ static inline int may_write_real(struct file *file)
 
        /* File refers to upper, writable layer? */
        upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
-       if (upperdentry && file_inode(file) == d_inode(upperdentry))
+       if (upperdentry &&
+           (file_inode(file) == d_inode(upperdentry) ||
+            file_inode(file) == d_inode(dentry)))
                return 0;
 
        /* Lower layer: can't write to real file, sorry... */
@@ -2823,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                            SB_MANDLOCK |
                            SB_DIRSYNC |
                            SB_SILENT |
-                           SB_POSIXACL);
+                           SB_POSIXACL |
+                           SB_I_VERSION);
 
        if (flags & MS_REMOUNT)
                retval = do_remount(&path, flags, sb_flags, mnt_flags,
index efebe6cf4378e32a13d109eebd0b694dd629c2a5..22880ef6d8dd74f6b44d7d755bdb9c8e8de59917 100644 (file)
@@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
 static void pnfs_init_server(struct nfs_server *server)
 {
        rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
-       rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
 }
 
 #else
@@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void)
        ida_init(&server->openowner_id);
        ida_init(&server->lockowner_id);
        pnfs_init_server(server);
+       rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
 
        return server;
 }
index 44c638b7876cfd5824d2d6287731e4411ea02052..508126eb49f9577eeec6f9e1b97abf6286cc82eb 100644 (file)
@@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
        struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
 
        dprintk("--> %s\n", __func__);
-       nfs4_fl_put_deviceid(fl->dsaddr);
+       if (fl->dsaddr != NULL)
+               nfs4_fl_put_deviceid(fl->dsaddr);
        /* This assumes a single RW lseg */
        if (lseg->pls_range.iomode == IOMODE_RW) {
                struct nfs4_filelayout *flo;
index dd5d27da8c0cc5f19d96c33360400778a6a60b5c..30426c1a1bbda245b831bfbac5ff8f1aa570f71f 100644 (file)
@@ -274,7 +274,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen,
        ssize_t ret;
 
        ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
-       if (ret <= 0)
+       if (ret < 0)
                return ERR_PTR(ret);
 
        rkey = request_key(&key_type_id_resolver, desc, "");
index 6c61e2b996351cde05b1c922674cd3a2670913ea..f90090e8c959b14346baf76ac7eaefed7ae30f82 100644 (file)
@@ -8399,8 +8399,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                lo = NFS_I(inode)->layout;
                /* If the open stateid was bad, then recover it. */
                if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
-                   nfs4_stateid_match_other(&lgp->args.stateid,
-                                       &lgp->args.ctx->state->stateid)) {
+                   !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
                        spin_unlock(&inode->i_lock);
                        exception->state = lgp->args.ctx->state;
                        exception->stateid = &lgp->args.stateid;
index 37c8af00327588d772610cac487fcf0651cf8fbf..14ed9791ec9cf156905a3e686042c325173582c0 100644 (file)
@@ -1842,8 +1842,8 @@ static void encode_create_session(struct xdr_stream *xdr,
         * Assumes OPEN is the biggest non-idempotent compound.
         * 2 is the verifier.
         */
-       max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
-                             RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
+       max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 2)
+                               * XDR_UNIT + RPC_MAX_AUTH_SIZE;
 
        encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
        p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
index 3c69db7d4905e379487ebdec7f31c2b4670412d6..8487486ec4963efb72477e7cf2f19616108f12f2 100644 (file)
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u)
                exp_put(u->secinfo.si_exp);
 }
 
+static void
+nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
+{
+       if (u->secinfo_no_name.sin_exp)
+               exp_put(u->secinfo_no_name.sin_exp);
+}
+
 static __be32
 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
              union nfsd4_op_u *u)
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
        },
        [OP_SECINFO_NO_NAME] = {
                .op_func = nfsd4_secinfo_no_name,
-               .op_release = nfsd4_secinfo_release,
+               .op_release = nfsd4_secinfo_no_name_release,
                .op_flags = OP_HANDLES_WRONGSEC,
                .op_name = "OP_SECINFO_NO_NAME",
                .op_rsize_bop = nfsd4_secinfo_rsize,
index aad97b30d5e657199f741f02700208c41e5424ae..c441f9387a1ba0f2ddf1f1426a114a5533fb6e30 100644 (file)
@@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
                c->tmpfile = true;
                err = ovl_copy_up_locked(c);
        } else {
-               err = -EIO;
-               if (lock_rename(c->workdir, c->destdir) != NULL) {
-                       pr_err("overlayfs: failed to lock workdir+upperdir\n");
-               } else {
+               err = ovl_lock_rename_workdir(c->workdir, c->destdir);
+               if (!err) {
                        err = ovl_copy_up_locked(c);
                        unlock_rename(c->workdir, c->destdir);
                }
index 3309b1912241769bba93e2caba483a4e2f57035a..cc961a3bd3bdec34fcace34553f9e8cfb319db25 100644 (file)
@@ -216,26 +216,6 @@ out_unlock:
        return err;
 }
 
-static int ovl_lock_rename_workdir(struct dentry *workdir,
-                                  struct dentry *upperdir)
-{
-       /* Workdir should not be the same as upperdir */
-       if (workdir == upperdir)
-               goto err;
-
-       /* Workdir should not be subdir of upperdir and vice versa */
-       if (lock_rename(workdir, upperdir) != NULL)
-               goto err_unlock;
-
-       return 0;
-
-err_unlock:
-       unlock_rename(workdir, upperdir);
-err:
-       pr_err("overlayfs: failed to lock workdir+upperdir\n");
-       return -EIO;
-}
-
 static struct dentry *ovl_clear_empty(struct dentry *dentry,
                                      struct list_head *list)
 {
index a619addecafcf05713b3a41e7e05a9e0866847a1..321511ed8c4235e102adfe84b98ac17c247eeadb 100644 (file)
@@ -598,18 +598,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
        return true;
 }
 
-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry)
+struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
+                           struct dentry *index)
 {
        struct dentry *lowerdentry = ovl_dentry_lower(dentry);
        struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
        struct inode *inode;
+       /* Already indexed or could be indexed on copy up? */
+       bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
+
+       if (WARN_ON(upperdentry && indexed && !lowerdentry))
+               return ERR_PTR(-EIO);
 
        if (!realinode)
                realinode = d_inode(lowerdentry);
 
-       if (!S_ISDIR(realinode->i_mode) &&
-           (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) {
-               struct inode *key = d_inode(lowerdentry ?: upperdentry);
+       /*
+        * Copy up origin (lower) may exist for non-indexed upper, but we must
+        * not use lower as hash key in that case.
+        * Hash inodes that are or could be indexed by origin inode and
+        * non-indexed upper inodes that could be hard linked by upper inode.
+        */
+       if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
+               struct inode *key = d_inode(indexed ? lowerdentry :
+                                                     upperdentry);
                unsigned int nlink;
 
                inode = iget5_locked(dentry->d_sb, (unsigned long) key,
index c3addd1114f1f562464370e6d640abd22cd3a3e4..a12dc10bf726351a4a47a75f92b66d3b87043174 100644 (file)
@@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack,
         * be treated as stale (i.e. after unlink of the overlay inode).
         * We don't know the verification rules for directory and whiteout
         * index entries, because they have not been implemented yet, so return
-        * EROFS if those entries are found to avoid corrupting an index that
-        * was created by a newer kernel.
+        * EINVAL if those entries are found to abort the mount to avoid
+        * corrupting an index that was created by a newer kernel.
         */
-       err = -EROFS;
+       err = -EINVAL;
        if (d_is_dir(index) || ovl_is_whiteout(index))
                goto fail;
 
-       err = -EINVAL;
        if (index->d_name.len < sizeof(struct ovl_fh)*2)
                goto fail;
 
@@ -506,6 +505,11 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
 
        index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
        if (IS_ERR(index)) {
+               err = PTR_ERR(index);
+               if (err == -ENOENT) {
+                       index = NULL;
+                       goto out;
+               }
                pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
                                    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
                                    d_inode(origin)->i_ino, name.len, name.name,
@@ -515,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
 
        inode = d_inode(index);
        if (d_is_negative(index)) {
-               if (upper && d_inode(origin)->i_nlink > 1) {
-                       pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n",
-                                           d_inode(origin)->i_ino);
-                       goto fail;
-               }
-
-               dput(index);
-               index = NULL;
+               goto out_dput;
        } else if (upper && d_inode(upper) != inode) {
-               pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n",
-                                   index, inode->i_ino, d_inode(upper)->i_ino);
-               goto fail;
+               goto out_dput;
        } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) ||
                   ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) {
                /*
@@ -546,6 +541,11 @@ out:
        kfree(name.name);
        return index;
 
+out_dput:
+       dput(index);
+       index = NULL;
+       goto out;
+
 fail:
        dput(index);
        index = ERR_PTR(-EIO);
@@ -634,6 +634,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                }
 
                if (d.redirect) {
+                       err = -ENOMEM;
                        upperredirect = kstrdup(d.redirect, GFP_KERNEL);
                        if (!upperredirect)
                                goto out_put_upper;
@@ -708,7 +709,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                upperdentry = dget(index);
 
        if (upperdentry || ctr) {
-               inode = ovl_get_inode(dentry, upperdentry);
+               inode = ovl_get_inode(dentry, upperdentry, index);
                err = PTR_ERR(inode);
                if (IS_ERR(inode))
                        goto out_free_oe;
index d4e8c1a08fb0f5d7b4c79de7aa88a5211a4996a4..d9a0edd4e57e40c6157613a652d0dffe86335c07 100644 (file)
@@ -235,6 +235,7 @@ bool ovl_inuse_trylock(struct dentry *dentry);
 void ovl_inuse_unlock(struct dentry *dentry);
 int ovl_nlink_start(struct dentry *dentry, bool *locked);
 void ovl_nlink_end(struct dentry *dentry, bool locked);
+int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
 
 static inline bool ovl_is_impuredir(struct dentry *dentry)
 {
@@ -285,7 +286,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
 bool ovl_is_private_xattr(const char *name);
 
 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry);
+struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
+                           struct dentry *index);
 static inline void ovl_copyattr(struct inode *from, struct inode *to)
 {
        to->i_uid = from->i_uid;
index 878a750986dd799ad17d9acfc3818e93dcf288fa..25d9b5adcd429071537c5edf3185b58a6ab9cd60 100644 (file)
@@ -37,6 +37,9 @@ struct ovl_fs {
        bool noxattr;
        /* sb common to all layers */
        struct super_block *same_sb;
+       /* Did we take the inuse lock? */
+       bool upperdir_locked;
+       bool workdir_locked;
 };
 
 /* private information held for every overlayfs dentry */
index 62e9b22a2077ac2ac9acb382dd5617a34e9e7636..698b74dd750ee6a9fb2586d0f8d42853111e6bd1 100644 (file)
@@ -988,6 +988,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
                         struct path *lowerstack, unsigned int numlower)
 {
        int err;
+       struct dentry *index = NULL;
        struct inode *dir = dentry->d_inode;
        struct path path = { .mnt = mnt, .dentry = dentry };
        LIST_HEAD(list);
@@ -1007,8 +1008,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
 
        inode_lock_nested(dir, I_MUTEX_PARENT);
        list_for_each_entry(p, &list, l_node) {
-               struct dentry *index;
-
                if (p->name[0] == '.') {
                        if (p->len == 1)
                                continue;
@@ -1018,18 +1017,20 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
                index = lookup_one_len(p->name, dentry, p->len);
                if (IS_ERR(index)) {
                        err = PTR_ERR(index);
+                       index = NULL;
                        break;
                }
                err = ovl_verify_index(index, lowerstack, numlower);
-               if (err) {
-                       if (err == -EROFS)
-                               break;
+               /* Cleanup stale and orphan index entries */
+               if (err && (err == -ESTALE || err == -ENOENT))
                        err = ovl_cleanup(dir, index);
-                       if (err)
-                               break;
-               }
+               if (err)
+                       break;
+
                dput(index);
+               index = NULL;
        }
+       dput(index);
        inode_unlock(dir);
 out:
        ovl_cache_free(&list);
index fd5ea4facc622f80861f1495d93b148ecde2a003..f5738e96a052fe06c892923b69cd4708d8b24f70 100644 (file)
@@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
 {
        struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL);
 
+       if (!oi)
+               return NULL;
+
        oi->cache = NULL;
        oi->redirect = NULL;
        oi->version = 0;
@@ -211,9 +214,10 @@ static void ovl_put_super(struct super_block *sb)
 
        dput(ufs->indexdir);
        dput(ufs->workdir);
-       ovl_inuse_unlock(ufs->workbasedir);
+       if (ufs->workdir_locked)
+               ovl_inuse_unlock(ufs->workbasedir);
        dput(ufs->workbasedir);
-       if (ufs->upper_mnt)
+       if (ufs->upper_mnt && ufs->upperdir_locked)
                ovl_inuse_unlock(ufs->upper_mnt->mnt_root);
        mntput(ufs->upper_mnt);
        for (i = 0; i < ufs->numlower; i++)
@@ -881,9 +885,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_put_upperpath;
 
                err = -EBUSY;
-               if (!ovl_inuse_trylock(upperpath.dentry)) {
-                       pr_err("overlayfs: upperdir is in-use by another mount\n");
+               if (ovl_inuse_trylock(upperpath.dentry)) {
+                       ufs->upperdir_locked = true;
+               } else if (ufs->config.index) {
+                       pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
                        goto out_put_upperpath;
+               } else {
+                       pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
                }
 
                err = ovl_mount_dir(ufs->config.workdir, &workpath);
@@ -901,9 +909,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                }
 
                err = -EBUSY;
-               if (!ovl_inuse_trylock(workpath.dentry)) {
-                       pr_err("overlayfs: workdir is in-use by another mount\n");
+               if (ovl_inuse_trylock(workpath.dentry)) {
+                       ufs->workdir_locked = true;
+               } else if (ufs->config.index) {
+                       pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
                        goto out_put_workpath;
+               } else {
+                       pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
                }
 
                ufs->workbasedir = workpath.dentry;
@@ -1156,11 +1168,13 @@ out_put_lowerpath:
 out_free_lowertmp:
        kfree(lowertmp);
 out_unlock_workdentry:
-       ovl_inuse_unlock(workpath.dentry);
+       if (ufs->workdir_locked)
+               ovl_inuse_unlock(workpath.dentry);
 out_put_workpath:
        path_put(&workpath);
 out_unlock_upperdentry:
-       ovl_inuse_unlock(upperpath.dentry);
+       if (ufs->upperdir_locked)
+               ovl_inuse_unlock(upperpath.dentry);
 out_put_upperpath:
        path_put(&upperpath);
 out_free_config:
index 117794582f9fa597d34d8843aa8bdfb7a99cbe76..b9b239fa5cfd28d18b120696d05e817330e44241 100644 (file)
@@ -430,7 +430,7 @@ void ovl_inuse_unlock(struct dentry *dentry)
        }
 }
 
-/* Called must hold OVL_I(inode)->oi_lock */
+/* Caller must hold OVL_I(inode)->lock */
 static void ovl_cleanup_index(struct dentry *dentry)
 {
        struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode;
@@ -469,6 +469,9 @@ static void ovl_cleanup_index(struct dentry *dentry)
        err = PTR_ERR(index);
        if (!IS_ERR(index))
                err = ovl_cleanup(dir, index);
+       else
+               index = NULL;
+
        inode_unlock(dir);
        if (err)
                goto fail;
@@ -557,3 +560,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked)
                mutex_unlock(&OVL_I(d_inode(dentry))->lock);
        }
 }
+
+int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
+{
+       /* Workdir should not be the same as upperdir */
+       if (workdir == upperdir)
+               goto err;
+
+       /* Workdir should not be subdir of upperdir and vice versa */
+       if (lock_rename(workdir, upperdir) != NULL)
+               goto err_unlock;
+
+       return 0;
+
+err_unlock:
+       unlock_rename(workdir, upperdir);
+err:
+       pr_err("overlayfs: failed to lock workdir+upperdir\n");
+       return -EIO;
+}
index 88c355574aa0afe3b1a62b371b048af8d948070e..77a8eacbe032f22d5bd9514738341a55dcc46b34 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/mman.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/numa_balancing.h>
+#include <linux/sched/task_stack.h>
 #include <linux/sched/task.h>
 #include <linux/sched/cputime.h>
 #include <linux/proc_fs.h>
@@ -118,30 +119,25 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
  * simple bit tests.
  */
 static const char * const task_state_array[] = {
-       "R (running)",          /*   0 */
-       "S (sleeping)",         /*   1 */
-       "D (disk sleep)",       /*   2 */
-       "T (stopped)",          /*   4 */
-       "t (tracing stop)",     /*   8 */
-       "X (dead)",             /*  16 */
-       "Z (zombie)",           /*  32 */
+
+       /* states in TASK_REPORT: */
+       "R (running)",          /* 0x00 */
+       "S (sleeping)",         /* 0x01 */
+       "D (disk sleep)",       /* 0x02 */
+       "T (stopped)",          /* 0x04 */
+       "t (tracing stop)",     /* 0x08 */
+       "X (dead)",             /* 0x10 */
+       "Z (zombie)",           /* 0x20 */
+       "P (parked)",           /* 0x40 */
+
+       /* states beyond TASK_REPORT: */
+       "I (idle)",             /* 0x80 */
 };
 
 static inline const char *get_task_state(struct task_struct *tsk)
 {
-       unsigned int state = (tsk->state | tsk->exit_state) & TASK_REPORT;
-
-       /*
-        * Parked tasks do not run; they sit in __kthread_parkme().
-        * Without this check, we would report them as running, which is
-        * clearly wrong, so we report them as sleeping instead.
-        */
-       if (tsk->state == TASK_PARKED)
-               state = TASK_INTERRUPTIBLE;
-
-       BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != ARRAY_SIZE(task_state_array)-1);
-
-       return task_state_array[fls(state)];
+       BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != ARRAY_SIZE(task_state_array));
+       return task_state_array[__get_task_state(tsk)];
 }
 
 static inline int get_task_umask(struct task_struct *tsk)
@@ -421,7 +417,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                 * esp and eip are intentionally zeroed out.  There is no
                 * non-racy way to read them without freezing the task.
                 * Programs that need reliable values can use ptrace(2).
+                *
+                * The only exception is if the task is core dumping because
+                * a program is not able to use ptrace(2) in that case. It is
+                * safe because the task has stopped executing permanently.
                 */
+               if (permitted && (task->flags & PF_DUMPCORE)) {
+                       eip = KSTK_EIP(task);
+                       esp = KSTK_ESP(task);
+               }
        }
 
        get_task_comm(tcomm, task);
index 8381db9db6d9bcc3acfa645bf9f73cbb5933c564..52ad15192e724c00db1a6a3e2d49d0842b62ce6a 100644 (file)
@@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
        spin_lock(&dquot->dq_dqb_lock);
        if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
            test_bit(DQ_FAKE_B, &dquot->dq_flags))
-               goto add;
+               goto finish;
 
        tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
                + space + rsv_space;
 
-       if (flags & DQUOT_SPACE_NOFAIL)
-               goto add;
-
        if (dquot->dq_dqb.dqb_bhardlimit &&
            tspace > dquot->dq_dqb.dqb_bhardlimit &&
             !ignore_hardlimit(dquot)) {
                if (flags & DQUOT_SPACE_WARN)
                        prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
                ret = -EDQUOT;
-               goto out;
+               goto finish;
        }
 
        if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
                if (flags & DQUOT_SPACE_WARN)
                        prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
                ret = -EDQUOT;
-               goto out;
+               goto finish;
        }
 
        if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
                         * be always printed
                         */
                        ret = -EDQUOT;
-                       goto out;
+                       goto finish;
                }
        }
-add:
-       dquot->dq_dqb.dqb_rsvspace += rsv_space;
-       dquot->dq_dqb.dqb_curspace += space;
-out:
+finish:
+       /*
+        * We have to be careful and go through warning generation & grace time
+        * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
+        * only here...
+        */
+       if (flags & DQUOT_SPACE_NOFAIL)
+               ret = 0;
+       if (!ret) {
+               dquot->dq_dqb.dqb_rsvspace += rsv_space;
+               dquot->dq_dqb.dqb_curspace += space;
+       }
        spin_unlock(&dquot->dq_dqb_lock);
        return ret;
 }
@@ -1980,7 +1985,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
                ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0,
                                      &warn_to[cnt]);
                if (ret) {
+                       spin_lock(&transfer_to[cnt]->dq_dqb_lock);
                        dquot_decr_inodes(transfer_to[cnt], inode_usage);
+                       spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
                        goto over_quota;
                }
        }
index c0187cda2c1ed3ff65b449a74aa71c05ca29eb58..a73e5b34db4181272bc943c4e0ea406797ff0311 100644 (file)
@@ -328,12 +328,16 @@ static int v2_write_dquot(struct dquot *dquot)
        if (!dquot->dq_off) {
                alloc = true;
                down_write(&dqopt->dqio_sem);
+       } else {
+               down_read(&dqopt->dqio_sem);
        }
        ret = qtree_write_dquot(
                        sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
                        dquot);
        if (alloc)
                up_write(&dqopt->dqio_sem);
+       else
+               up_read(&dqopt->dqio_sem);
        return ret;
 }
 
index a2b9a47235c5ba48b4db2bf81d03c1579b562f79..f0d4b16873e89aa23522c9ece34b8f1900638176 100644 (file)
@@ -112,7 +112,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
                 * In the generic case the entire file is data, so as long as
                 * offset isn't at the end of the file then the offset is data.
                 */
-               if (offset >= eof)
+               if ((unsigned long long)offset >= eof)
                        return -ENXIO;
                break;
        case SEEK_HOLE:
@@ -120,7 +120,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
                 * There is a virtual hole at the end of the file, so as long as
                 * offset isn't i_size or larger, return i_size.
                 */
-               if (offset >= eof)
+               if ((unsigned long long)offset >= eof)
                        return -ENXIO;
                offset = eof;
                break;
index ef4b48d1ea4270f59f46e29ae898a48cdb0c8aa6..1c713fd5b3e67966c3d998979d2c30eb8e14ba07 100644 (file)
@@ -588,6 +588,12 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                        break;
                if (ACCESS_ONCE(ctx->released) ||
                    fatal_signal_pending(current)) {
+                       /*
+                        * &ewq->wq may be queued in fork_event, but
+                        * __remove_wait_queue ignores the head
+                        * parameter. It would be a problem if it
+                        * didn't.
+                        */
                        __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
                        if (ewq->msg.event == UFFD_EVENT_FORK) {
                                struct userfaultfd_ctx *new;
@@ -1061,6 +1067,12 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
                                        (unsigned long)
                                        uwq->msg.arg.reserved.reserved1;
                                list_move(&uwq->wq.entry, &fork_event);
+                               /*
+                                * fork_nctx can be freed as soon as
+                                * we drop the lock, unless we take a
+                                * reference on it.
+                                */
+                               userfaultfd_ctx_get(fork_nctx);
                                spin_unlock(&ctx->event_wqh.lock);
                                ret = 0;
                                break;
@@ -1091,19 +1103,53 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
 
        if (!ret && msg->event == UFFD_EVENT_FORK) {
                ret = resolve_userfault_fork(ctx, fork_nctx, msg);
+               spin_lock(&ctx->event_wqh.lock);
+               if (!list_empty(&fork_event)) {
+                       /*
+                        * The fork thread didn't abort, so we can
+                        * drop the temporary refcount.
+                        */
+                       userfaultfd_ctx_put(fork_nctx);
+
+                       uwq = list_first_entry(&fork_event,
+                                              typeof(*uwq),
+                                              wq.entry);
+                       /*
+                        * If fork_event list wasn't empty and in turn
+                        * the event wasn't already released by fork
+                        * (the event is allocated on fork kernel
+                        * stack), put the event back to its place in
+                        * the event_wq. fork_event head will be freed
+                        * as soon as we return so the event cannot
+                        * stay queued there no matter the current
+                        * "ret" value.
+                        */
+                       list_del(&uwq->wq.entry);
+                       __add_wait_queue(&ctx->event_wqh, &uwq->wq);
 
-               if (!ret) {
-                       spin_lock(&ctx->event_wqh.lock);
-                       if (!list_empty(&fork_event)) {
-                               uwq = list_first_entry(&fork_event,
-                                                      typeof(*uwq),
-                                                      wq.entry);
-                               list_del(&uwq->wq.entry);
-                               __add_wait_queue(&ctx->event_wqh, &uwq->wq);
+                       /*
+                        * Leave the event in the waitqueue and report
+                        * error to userland if we failed to resolve
+                        * the userfault fork.
+                        */
+                       if (likely(!ret))
                                userfaultfd_event_complete(ctx, uwq);
-                       }
-                       spin_unlock(&ctx->event_wqh.lock);
+               } else {
+                       /*
+                        * Here the fork thread aborted and the
+                        * refcount from the fork thread on fork_nctx
+                        * has already been released. We still hold
+                        * the reference we took before releasing the
+                        * lock above. If resolve_userfault_fork
+                        * failed we've to drop it because the
+                        * fork_nctx has to be freed in such case. If
+                        * it succeeded we'll hold it because the new
+                        * uffd references it.
+                        */
+                       if (ret)
+                               userfaultfd_ctx_put(fork_nctx);
                }
+               spin_unlock(&ctx->event_wqh.lock);
        }
 
        return ret;
index 4424f7fecf14549b65c62d0cac4b8b692718f426..61cd28ba25f364df5af103277924befb6d4a39a0 100644 (file)
@@ -250,7 +250,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value,
        }
        memcpy(value, buffer, len);
 out:
-       security_release_secctx(buffer, len);
+       kfree(buffer);
 out_noalloc:
        return len;
 }
index b008ff3250eba4f3884e18841c4c316b7e7cbf50..df3e600835e8de81e69ac295b06fae6fecefd7a4 100644 (file)
@@ -156,7 +156,8 @@ __xfs_ag_resv_free(
        trace_xfs_ag_resv_free(pag, type, 0);
 
        resv = xfs_perag_resv(pag, type);
-       pag->pag_mount->m_ag_max_usable += resv->ar_asked;
+       if (pag->pag_agno == 0)
+               pag->pag_mount->m_ag_max_usable += resv->ar_asked;
        /*
         * AGFL blocks are always considered "free", so whatever
         * was reserved at mount time must be given back at umount.
@@ -216,7 +217,14 @@ __xfs_ag_resv_init(
                return error;
        }
 
-       mp->m_ag_max_usable -= ask;
+       /*
+        * Reduce the maximum per-AG allocation length by however much we're
+        * trying to reserve for an AG.  Since this is a filesystem-wide
+        * counter, we only make the adjustment for AG 0.  This assumes that
+        * there aren't any AGs hungrier for per-AG reservation than AG 0.
+        */
+       if (pag->pag_agno == 0)
+               mp->m_ag_max_usable -= ask;
 
        resv = xfs_perag_resv(pag, type);
        resv->ar_asked = ask;
index 744dcaec34cca98b2de73e4d900894c57b0807a8..f965ce832bc0561acb50a67ce28373fc7fb0b091 100644 (file)
@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small(
 
                                bp = xfs_btree_get_bufs(args->mp, args->tp,
                                        args->agno, fbno, 0);
+                               if (!bp) {
+                                       error = -EFSCORRUPTED;
+                                       goto error0;
+                               }
                                xfs_trans_binval(args->tp, bp);
                        }
                        args->len = 1;
@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist(
                if (error)
                        goto out_agbp_relse;
                bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
+               if (!bp) {
+                       error = -EFSCORRUPTED;
+                       goto out_agbp_relse;
+               }
                xfs_trans_binval(tp, bp);
        }
 
index 459f4b4f08fe5a1eba6fd725cb3a8866a5876a11..89263797cf3253d905e1e1010625243a0ee9bf23 100644 (file)
@@ -49,7 +49,6 @@
 #include "xfs_rmap.h"
 #include "xfs_ag_resv.h"
 #include "xfs_refcount.h"
-#include "xfs_rmap_btree.h"
 #include "xfs_icache.h"
 
 
@@ -192,12 +191,8 @@ xfs_bmap_worst_indlen(
        int             maxrecs;        /* maximum record count at this level */
        xfs_mount_t     *mp;            /* mount structure */
        xfs_filblks_t   rval;           /* return value */
-       xfs_filblks_t   orig_len;
 
        mp = ip->i_mount;
-
-       /* Calculate the worst-case size of the bmbt. */
-       orig_len = len;
        maxrecs = mp->m_bmap_dmxr[0];
        for (level = 0, rval = 0;
             level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
@@ -205,20 +200,12 @@ xfs_bmap_worst_indlen(
                len += maxrecs - 1;
                do_div(len, maxrecs);
                rval += len;
-               if (len == 1) {
-                       rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
+               if (len == 1)
+                       return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
                                level - 1;
-                       break;
-               }
                if (level == 0)
                        maxrecs = mp->m_bmap_dmxr[1];
        }
-
-       /* Calculate the worst-case size of the rmapbt. */
-       if (xfs_sb_version_hasrmapbt(&mp->m_sb))
-               rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
-                               mp->m_rmap_maxlevels;
-
        return rval;
 }
 
@@ -1490,14 +1477,14 @@ xfs_bmap_isaeof(
        int                     is_empty;
        int                     error;
 
-       bma->aeof = 0;
+       bma->aeof = false;
        error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
                                     &is_empty);
        if (error)
                return error;
 
        if (is_empty) {
-               bma->aeof = 1;
+               bma->aeof = true;
                return 0;
        }
 
@@ -3865,6 +3852,17 @@ xfs_trim_extent(
        }
 }
 
+/* trim extent to within eof */
+void
+xfs_trim_extent_eof(
+       struct xfs_bmbt_irec    *irec,
+       struct xfs_inode        *ip)
+
+{
+       xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
+                                             i_size_read(VFS_I(ip))));
+}
+
 /*
  * Trim the returned map to the required bounds
  */
index 851982a5dfbc54b347d5836898264f56b3b4f957..502e0d8fb4ff63e125328d31434e3ee1eaa39cbd 100644 (file)
@@ -208,6 +208,7 @@ void        xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
 
 void   xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
                xfs_filblks_t len);
+void   xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
 int    xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
 void   xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void   xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
index 988bb3f3144664216bdf6880fd79838785a7b8db..dfd643909f8512be75322a0ebff86ff3b039cb52 100644 (file)
@@ -1962,7 +1962,7 @@ xfs_difree_inobt(
        if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
            rec.ir_free == XFS_INOBT_ALL_FREE &&
            mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
-               xic->deleted = 1;
+               xic->deleted = true;
                xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
                xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
 
@@ -1989,7 +1989,7 @@ xfs_difree_inobt(
 
                xfs_difree_inode_chunk(mp, agno, &rec, dfops);
        } else {
-               xic->deleted = 0;
+               xic->deleted = false;
 
                error = xfs_inobt_update(cur, &rec);
                if (error) {
index 8372e9bcd7b6ba4b8fcc7b9133d5acecef4842e2..71de185735e06c1d44e13e55e30ff1d38b5a62d8 100644 (file)
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
        uint32_t                ilf_fields;     /* flags for fields logged */
        uint16_t                ilf_asize;      /* size of attr d/ext/root */
        uint16_t                ilf_dsize;      /* size of data/ext/root */
+       uint32_t                ilf_pad;        /* pad for 64 bit boundary */
        uint64_t                ilf_ino;        /* inode number */
        union {
                uint32_t        ilfu_rdev;      /* rdev value for dev inode*/
@@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format {
        int32_t                 ilf_boffset;    /* off of inode in buffer */
 } xfs_inode_log_format_t;
 
-typedef struct xfs_inode_log_format_32 {
-       uint16_t                ilf_type;       /* inode log item type */
-       uint16_t                ilf_size;       /* size of this item */
-       uint32_t                ilf_fields;     /* flags for fields logged */
-       uint16_t                ilf_asize;      /* size of attr d/ext/root */
-       uint16_t                ilf_dsize;      /* size of data/ext/root */
-       uint64_t                ilf_ino;        /* inode number */
-       union {
-               uint32_t        ilfu_rdev;      /* rdev value for dev inode*/
-               uuid_t          ilfu_uuid;      /* mount point value */
-       } ilf_u;
-       int64_t                 ilf_blkno;      /* blkno of inode buffer */
-       int32_t                 ilf_len;        /* len of inode buffer */
-       int32_t                 ilf_boffset;    /* off of inode in buffer */
-} __attribute__((packed)) xfs_inode_log_format_32_t;
-
-typedef struct xfs_inode_log_format_64 {
+/*
+ * Old 32 bit systems will log in this format without the 64 bit
+ * alignment padding. Recovery will detect this and convert it to the
+ * correct format.
+ */
+struct xfs_inode_log_format_32 {
        uint16_t                ilf_type;       /* inode log item type */
        uint16_t                ilf_size;       /* size of this item */
        uint32_t                ilf_fields;     /* flags for fields logged */
        uint16_t                ilf_asize;      /* size of attr d/ext/root */
        uint16_t                ilf_dsize;      /* size of data/ext/root */
-       uint32_t                ilf_pad;        /* pad for 64 bit boundary */
        uint64_t                ilf_ino;        /* inode number */
        union {
                uint32_t        ilfu_rdev;      /* rdev value for dev inode*/
@@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 {
        int64_t                 ilf_blkno;      /* blkno of inode buffer */
        int32_t                 ilf_len;        /* len of inode buffer */
        int32_t                 ilf_boffset;    /* off of inode in buffer */
-} xfs_inode_log_format_64_t;
+} __attribute__((packed));
 
 
 /*
index 7034e17535dee2fbce449e72a4505341234a718e..3354140de07eb8aa2566c6c9254ad85e24a2a4bf 100644 (file)
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
 int
 xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
+       umode_t mode;
+       bool set_mode = false;
        int error = 0;
 
        if (!acl)
@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
                return error;
 
        if (type == ACL_TYPE_ACCESS) {
-               umode_t mode;
-
                error = posix_acl_update_mode(inode, &mode, &acl);
                if (error)
                        return error;
-               error = xfs_set_mode(inode, mode);
-               if (error)
-                       return error;
+               set_mode = true;
        }
 
  set_acl:
-       return __xfs_set_acl(inode, acl, type);
+       error =  __xfs_set_acl(inode, acl, type);
+       if (error)
+               return error;
+
+       /*
+        * We set the mode after successfully updating the ACL xattr because the
+        * xattr update can fail at ENOSPC and we don't want to change the mode
+        * if the ACL update hasn't been applied.
+        */
+       if (set_mode)
+               error = xfs_set_mode(inode, mode);
+
+       return error;
 }
index 29172609f2a31b756cd40da7b42f288fe8b0915b..a3eeaba156c5ab8d7d34e6b4f217423c452b73dc 100644 (file)
@@ -343,7 +343,8 @@ xfs_end_io(
                error = xfs_reflink_end_cow(ip, offset, size);
                break;
        case XFS_IO_UNWRITTEN:
-               error = xfs_iomap_write_unwritten(ip, offset, size);
+               /* writeback should never update isize */
+               error = xfs_iomap_write_unwritten(ip, offset, size, false);
                break;
        default:
                ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
@@ -445,6 +446,19 @@ xfs_imap_valid(
 {
        offset >>= inode->i_blkbits;
 
+       /*
+        * We have to make sure the cached mapping is within EOF to protect
+        * against eofblocks trimming on file release leaving us with a stale
+        * mapping. Otherwise, a page for a subsequent file extending buffered
+        * write could get picked up by this writeback cycle and written to the
+        * wrong blocks.
+        *
+        * Note that what we really want here is a generic mapping invalidation
+        * mechanism to protect us from arbitrary extent modifying contexts, not
+        * just eofblocks.
+        */
+       xfs_trim_extent_eof(imap, XFS_I(inode));
+
        return offset >= imap->br_startoff &&
                offset < imap->br_startoff + imap->br_blockcount;
 }
@@ -734,6 +748,14 @@ xfs_vm_invalidatepage(
 {
        trace_xfs_invalidatepage(page->mapping->host, page, offset,
                                 length);
+
+       /*
+        * If we are invalidating the entire page, clear the dirty state from it
+        * so that we can check for attempts to release dirty cached pages in
+        * xfs_vm_releasepage().
+        */
+       if (offset == 0 && length >= PAGE_SIZE)
+               cancel_dirty_page(page);
        block_invalidatepage(page, offset, length);
 }
 
@@ -1189,25 +1211,27 @@ xfs_vm_releasepage(
         * mm accommodates an old ext3 case where clean pages might not have had
         * the dirty bit cleared. Thus, it can send actual dirty pages to
         * ->releasepage() via shrink_active_list(). Conversely,
-        * block_invalidatepage() can send pages that are still marked dirty
-        * but otherwise have invalidated buffers.
+        * block_invalidatepage() can send pages that are still marked dirty but
+        * otherwise have invalidated buffers.
         *
         * We want to release the latter to avoid unnecessary buildup of the
-        * LRU, skip the former and warn if we've left any lingering
-        * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
-        * or unwritten buffers and warn if the page is not dirty. Otherwise
-        * try to release the buffers.
+        * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
+        * that are entirely invalidated and need to be released.  Hence the
+        * only time we should get dirty pages here is through
+        * shrink_active_list() and so we can simply skip those now.
+        *
+        * warn if we've left any lingering delalloc/unwritten buffers on clean
+        * or invalidated pages we are about to release.
         */
+       if (PageDirty(page))
+               return 0;
+
        xfs_count_page_state(page, &delalloc, &unwritten);
 
-       if (delalloc) {
-               WARN_ON_ONCE(!PageDirty(page));
+       if (WARN_ON_ONCE(delalloc))
                return 0;
-       }
-       if (unwritten) {
-               WARN_ON_ONCE(!PageDirty(page));
+       if (WARN_ON_ONCE(unwritten))
                return 0;
-       }
 
        return try_to_free_buffers(page);
 }
index ebd66b19fbfc365982500e67aa654c3838b57886..e3a950ed35a81088f121eccb1a8c2ef760ab5f31 100644 (file)
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
                                                 &bp, XFS_ATTR_FORK);
                        if (error)
                                return error;
+                       node = bp->b_addr;
+                       btree = dp->d_ops->node_tree_p(node);
                        child_fsb = be32_to_cpu(btree[i + 1].before);
                        xfs_trans_brelse(*trans, bp);
                }
index cd9a5400ba4fef2a75d5c24f3f626f5ad0211fae..6503cfa442620efb7abf22458eed86eed9628e90 100644 (file)
@@ -84,6 +84,7 @@ xfs_zero_extent(
                GFP_NOFS, 0);
 }
 
+#ifdef CONFIG_XFS_RT
 int
 xfs_bmap_rtalloc(
        struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
@@ -190,6 +191,7 @@ xfs_bmap_rtalloc(
        }
        return 0;
 }
+#endif /* CONFIG_XFS_RT */
 
 /*
  * Check if the endoff is outside the last extent. If so the caller will grow
@@ -1459,7 +1461,19 @@ xfs_shift_file_space(
                return error;
 
        /*
-        * The extent shiting code works on extent granularity. So, if
+        * Clean out anything hanging around in the cow fork now that
+        * we've flushed all the dirty data out to disk to avoid having
+        * CoW extents at the wrong offsets.
+        */
+       if (xfs_is_reflink_inode(ip)) {
+               error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
+                               true);
+               if (error)
+                       return error;
+       }
+
+       /*
+        * The extent shifting code works on extent granularity. So, if
         * stop_fsb is not the starting block of extent, we need to split
         * the extent at stop_fsb.
         */
@@ -2110,11 +2124,31 @@ xfs_swap_extents(
                ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
                tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
                tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
+       }
+
+       /* Swap the cow forks. */
+       if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+               xfs_extnum_t    extnum;
+
+               ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+               ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+
+               extnum = ip->i_cnextents;
+               ip->i_cnextents = tip->i_cnextents;
+               tip->i_cnextents = extnum;
+
                cowfp = ip->i_cowfp;
                ip->i_cowfp = tip->i_cowfp;
                tip->i_cowfp = cowfp;
-               xfs_inode_set_cowblocks_tag(ip);
-               xfs_inode_set_cowblocks_tag(tip);
+
+               if (ip->i_cowfp && ip->i_cnextents)
+                       xfs_inode_set_cowblocks_tag(ip);
+               else
+                       xfs_inode_clear_cowblocks_tag(ip);
+               if (tip->i_cowfp && tip->i_cnextents)
+                       xfs_inode_set_cowblocks_tag(tip);
+               else
+                       xfs_inode_clear_cowblocks_tag(tip);
        }
 
        xfs_trans_log_inode(tp, ip,  src_log_flags);
index 0eaa81dc49be674137cda51a3364b54721c10d58..7d330b3c77c3576fb6d58974e7af97c4b0ecc8e1 100644 (file)
@@ -28,7 +28,20 @@ struct xfs_mount;
 struct xfs_trans;
 struct xfs_bmalloca;
 
+#ifdef CONFIG_XFS_RT
 int    xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
+#else /* !CONFIG_XFS_RT */
+/*
+ * Attempts to allocate RT extents when RT is disable indicates corruption and
+ * should trigger a shutdown.
+ */
+static inline int
+xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
+{
+       return -EFSCORRUPTED;
+}
+#endif /* CONFIG_XFS_RT */
+
 int    xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
                     int whichfork, int *eof);
 int    xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
index da14658da3103475940555600581f0bf12217d55..2f97c12ca75e4d5554b94b7a195b4dca53f90174 100644 (file)
@@ -1258,8 +1258,6 @@ xfs_buf_ioapply_map(
        int             size;
        int             offset;
 
-       total_nr_pages = bp->b_page_count;
-
        /* skip the pages in the buffer before the start offset */
        page_index = 0;
        offset = *buf_offset;
index bd786a9ac2c38879cf6e94ad660cd1bda19873c5..eaf86f55b7f2156c351aa26fd0190871ef1772bd 100644 (file)
@@ -347,7 +347,7 @@ xfs_verifier_error(
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
 
-       xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
+       xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
                  bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
                  __return_address, bp->b_ops->name, bp->b_bn);
 
index ebdd0bd2b2616a7e9052b4da8378721b144dc0af..6526ef0e2a234965e7895e823b544f24faedb81a 100644 (file)
@@ -58,7 +58,7 @@ xfs_zero_range(
        xfs_off_t               count,
        bool                    *did_zero)
 {
-       return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
+       return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops);
 }
 
 int
@@ -237,11 +237,13 @@ xfs_file_dax_read(
        if (!count)
                return 0; /* skip atime */
 
-       if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
-               if (iocb->ki_flags & IOCB_NOWAIT)
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
                        return -EAGAIN;
+       } else {
                xfs_ilock(ip, XFS_IOLOCK_SHARED);
        }
+
        ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
@@ -259,9 +261,10 @@ xfs_file_buffered_aio_read(
 
        trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
 
-       if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
-               if (iocb->ki_flags & IOCB_NOWAIT)
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
                        return -EAGAIN;
+       } else {
                xfs_ilock(ip, XFS_IOLOCK_SHARED);
        }
        ret = generic_file_read_iter(iocb, to);
@@ -377,8 +380,6 @@ restart:
         */
        spin_lock(&ip->i_flags_lock);
        if (iocb->ki_pos > i_size_read(inode)) {
-               bool    zero = false;
-
                spin_unlock(&ip->i_flags_lock);
                if (!drained_dio) {
                        if (*iolock == XFS_IOLOCK_SHARED) {
@@ -399,7 +400,7 @@ restart:
                        drained_dio = true;
                        goto restart;
                }
-               error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
+               error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), NULL);
                if (error)
                        return error;
        } else
@@ -436,7 +437,6 @@ xfs_dio_write_end_io(
        struct inode            *inode = file_inode(iocb->ki_filp);
        struct xfs_inode        *ip = XFS_I(inode);
        loff_t                  offset = iocb->ki_pos;
-       bool                    update_size = false;
        int                     error = 0;
 
        trace_xfs_end_io_direct_write(ip, offset, size);
@@ -447,6 +447,21 @@ xfs_dio_write_end_io(
        if (size <= 0)
                return size;
 
+       if (flags & IOMAP_DIO_COW) {
+               error = xfs_reflink_end_cow(ip, offset, size);
+               if (error)
+                       return error;
+       }
+
+       /*
+        * Unwritten conversion updates the in-core isize after extent
+        * conversion but before updating the on-disk size. Updating isize any
+        * earlier allows a racing dio read to find unwritten extents before
+        * they are converted.
+        */
+       if (flags & IOMAP_DIO_UNWRITTEN)
+               return xfs_iomap_write_unwritten(ip, offset, size, true);
+
        /*
         * We need to update the in-core inode size here so that we don't end up
         * with the on-disk inode size being outside the in-core inode size. We
@@ -461,20 +476,11 @@ xfs_dio_write_end_io(
        spin_lock(&ip->i_flags_lock);
        if (offset + size > i_size_read(inode)) {
                i_size_write(inode, offset + size);
-               update_size = true;
-       }
-       spin_unlock(&ip->i_flags_lock);
-
-       if (flags & IOMAP_DIO_COW) {
-               error = xfs_reflink_end_cow(ip, offset, size);
-               if (error)
-                       return error;
-       }
-
-       if (flags & IOMAP_DIO_UNWRITTEN)
-               error = xfs_iomap_write_unwritten(ip, offset, size);
-       else if (update_size)
+               spin_unlock(&ip->i_flags_lock);
                error = xfs_setfilesize(ip, offset, size);
+       } else {
+               spin_unlock(&ip->i_flags_lock);
+       }
 
        return error;
 }
@@ -549,9 +555,10 @@ xfs_file_dio_aio_write(
                iolock = XFS_IOLOCK_SHARED;
        }
 
-       if (!xfs_ilock_nowait(ip, iolock)) {
-               if (iocb->ki_flags & IOCB_NOWAIT)
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (!xfs_ilock_nowait(ip, iolock))
                        return -EAGAIN;
+       } else {
                xfs_ilock(ip, iolock);
        }
 
@@ -603,9 +610,10 @@ xfs_file_dax_write(
        size_t                  count;
        loff_t                  pos;
 
-       if (!xfs_ilock_nowait(ip, iolock)) {
-               if (iocb->ki_flags & IOCB_NOWAIT)
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (!xfs_ilock_nowait(ip, iolock))
                        return -EAGAIN;
+       } else {
                xfs_ilock(ip, iolock);
        }
 
@@ -761,7 +769,7 @@ xfs_file_fallocate(
        enum xfs_prealloc_flags flags = 0;
        uint                    iolock = XFS_IOLOCK_EXCL;
        loff_t                  new_size = 0;
-       bool                    do_file_insert = 0;
+       bool                    do_file_insert = false;
 
        if (!S_ISREG(inode->i_mode))
                return -EINVAL;
@@ -822,7 +830,7 @@ xfs_file_fallocate(
                        error = -EINVAL;
                        goto out_unlock;
                }
-               do_file_insert = 1;
+               do_file_insert = true;
        } else {
                flags |= XFS_PREALLOC_SET;
 
index 814ed729881d9a4305c3dd5646d75ef0f112b87b..43cfc07996a43ed1779e7ce6249df2113d87dc28 100644 (file)
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
        return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
 }
 
-/* Transform a rtbitmap "record" into a fsmap */
-STATIC int
-xfs_getfsmap_rtdev_rtbitmap_helper(
-       struct xfs_trans                *tp,
-       struct xfs_rtalloc_rec          *rec,
-       void                            *priv)
-{
-       struct xfs_mount                *mp = tp->t_mountp;
-       struct xfs_getfsmap_info        *info = priv;
-       struct xfs_rmap_irec            irec;
-       xfs_daddr_t                     rec_daddr;
-
-       rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
-
-       irec.rm_startblock = rec->ar_startblock;
-       irec.rm_blockcount = rec->ar_blockcount;
-       irec.rm_owner = XFS_RMAP_OWN_NULL;      /* "free" */
-       irec.rm_offset = 0;
-       irec.rm_flags = 0;
-
-       return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
-}
-
 /* Transform a bnobt irec into a fsmap */
 STATIC int
 xfs_getfsmap_datadev_bnobt_helper(
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
        return xfs_getfsmap_helper(tp, info, &rmap, 0);
 }
 
+#ifdef CONFIG_XFS_RT
+/* Transform a rtbitmap "record" into a fsmap */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap_helper(
+       struct xfs_trans                *tp,
+       struct xfs_rtalloc_rec          *rec,
+       void                            *priv)
+{
+       struct xfs_mount                *mp = tp->t_mountp;
+       struct xfs_getfsmap_info        *info = priv;
+       struct xfs_rmap_irec            irec;
+       xfs_daddr_t                     rec_daddr;
+
+       rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
+
+       irec.rm_startblock = rec->ar_startblock;
+       irec.rm_blockcount = rec->ar_blockcount;
+       irec.rm_owner = XFS_RMAP_OWN_NULL;      /* "free" */
+       irec.rm_offset = 0;
+       irec.rm_flags = 0;
+
+       return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
+}
+
 /* Execute a getfsmap query against the realtime device. */
 STATIC int
 __xfs_getfsmap_rtdev(
@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap(
        return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
                        info);
 }
+#endif /* CONFIG_XFS_RT */
 
 /* Execute a getfsmap query against the regular data device. */
 STATIC int
@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys(
        return false;
 }
 
+/*
+ * There are only two devices if we didn't configure RT devices at build time.
+ */
+#ifdef CONFIG_XFS_RT
 #define XFS_GETFSMAP_DEVS      3
+#else
+#define XFS_GETFSMAP_DEVS      2
+#endif /* CONFIG_XFS_RT */
+
 /*
  * Get filesystem's extents as described in head, and format for
  * output.  Calls formatter to fill the user's buffer until all
@@ -853,10 +863,12 @@ xfs_getfsmap(
                handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
                handlers[1].fn = xfs_getfsmap_logdev;
        }
+#ifdef CONFIG_XFS_RT
        if (mp->m_rtdev_targp) {
                handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
                handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
        }
+#endif /* CONFIG_XFS_RT */
 
        xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
                        xfs_getfsmap_dev_compare);
index 5599dda4727af6736fa1ae15429456b2e5fc6a04..4ec5b7f4540137622856883ab5dafb73d95f89ab 100644 (file)
@@ -1624,10 +1624,12 @@ xfs_itruncate_extents(
                goto out;
 
        /*
-        * Clear the reflink flag if we truncated everything.
+        * Clear the reflink flag if there are no data fork blocks and
+        * there are no extents staged in the cow fork.
         */
-       if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
-               ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+       if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
+               if (ip->i_d.di_nblocks == 0)
+                       ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
                xfs_inode_clear_cowblocks_tag(ip);
        }
 
index 6d0f74ec31e89c35769b55f3402003c2548ae980..9bbc2d7cc8cbb2c7cf42ee335061acfeff68617c 100644 (file)
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
        to->di_dmstate = from->di_dmstate;
        to->di_flags = from->di_flags;
 
+       /* log a dummy value to ensure log structure is fully initialised */
+       to->di_next_unlinked = NULLAGINO;
+
        if (from->di_version == 3) {
                to->di_changecount = inode->i_version;
                to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
  * the second with the on-disk inode structure, and a possible third and/or
  * fourth with the inode data/extents/b-tree root and inode attributes
  * data/extents/b-tree root.
+ *
+ * Note: Always use the 64 bit inode log format structure so we don't
+ * leave an uninitialised hole in the format item on 64 bit systems. Log
+ * recovery on 32 bit systems handles this just fine, so there's no reason
+ * for not using an initialising the properly padded structure all the time.
  */
 STATIC void
 xfs_inode_item_format(
@@ -412,8 +420,8 @@ xfs_inode_item_format(
 {
        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
        struct xfs_inode        *ip = iip->ili_inode;
-       struct xfs_inode_log_format *ilf;
        struct xfs_log_iovec    *vecp = NULL;
+       struct xfs_inode_log_format *ilf;
 
        ASSERT(ip->i_d.di_version > 1);
 
@@ -425,7 +433,17 @@ xfs_inode_item_format(
        ilf->ilf_boffset = ip->i_imap.im_boffset;
        ilf->ilf_fields = XFS_ILOG_CORE;
        ilf->ilf_size = 2; /* format + core */
-       xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
+
+       /*
+        * make sure we don't leak uninitialised data into the log in the case
+        * when we don't log every field in the inode.
+        */
+       ilf->ilf_dsize = 0;
+       ilf->ilf_asize = 0;
+       ilf->ilf_pad = 0;
+       uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
+
+       xlog_finish_iovec(lv, vecp, sizeof(*ilf));
 
        xfs_inode_item_format_core(ip, lv, &vecp);
        xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -745,7 +763,7 @@ xfs_iflush_done(
                 */
                iip = INODE_ITEM(blip);
                if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
-                   lip->li_flags & XFS_LI_FAILED)
+                   (blip->li_flags & XFS_LI_FAILED))
                        need_ail++;
 
                blip = next;
@@ -855,44 +873,29 @@ xfs_istale_done(
 }
 
 /*
- * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
- * (which can have different field alignments) to the native version
+ * convert an xfs_inode_log_format struct from the old 32 bit version
+ * (which can have different field alignments) to the native 64 bit version
  */
 int
 xfs_inode_item_format_convert(
-       xfs_log_iovec_t         *buf,
-       xfs_inode_log_format_t  *in_f)
+       struct xfs_log_iovec            *buf,
+       struct xfs_inode_log_format     *in_f)
 {
-       if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
-               xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
-
-               in_f->ilf_type = in_f32->ilf_type;
-               in_f->ilf_size = in_f32->ilf_size;
-               in_f->ilf_fields = in_f32->ilf_fields;
-               in_f->ilf_asize = in_f32->ilf_asize;
-               in_f->ilf_dsize = in_f32->ilf_dsize;
-               in_f->ilf_ino = in_f32->ilf_ino;
-               /* copy biggest field of ilf_u */
-               uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
-               in_f->ilf_blkno = in_f32->ilf_blkno;
-               in_f->ilf_len = in_f32->ilf_len;
-               in_f->ilf_boffset = in_f32->ilf_boffset;
-               return 0;
-       } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
-               xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
-
-               in_f->ilf_type = in_f64->ilf_type;
-               in_f->ilf_size = in_f64->ilf_size;
-               in_f->ilf_fields = in_f64->ilf_fields;
-               in_f->ilf_asize = in_f64->ilf_asize;
-               in_f->ilf_dsize = in_f64->ilf_dsize;
-               in_f->ilf_ino = in_f64->ilf_ino;
-               /* copy biggest field of ilf_u */
-               uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
-               in_f->ilf_blkno = in_f64->ilf_blkno;
-               in_f->ilf_len = in_f64->ilf_len;
-               in_f->ilf_boffset = in_f64->ilf_boffset;
-               return 0;
-       }
-       return -EFSCORRUPTED;
+       struct xfs_inode_log_format_32  *in_f32 = buf->i_addr;
+
+       if (buf->i_len != sizeof(*in_f32))
+               return -EFSCORRUPTED;
+
+       in_f->ilf_type = in_f32->ilf_type;
+       in_f->ilf_size = in_f32->ilf_size;
+       in_f->ilf_fields = in_f32->ilf_fields;
+       in_f->ilf_asize = in_f32->ilf_asize;
+       in_f->ilf_dsize = in_f32->ilf_dsize;
+       in_f->ilf_ino = in_f32->ilf_ino;
+       /* copy biggest field of ilf_u */
+       uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
+       in_f->ilf_blkno = in_f32->ilf_blkno;
+       in_f->ilf_len = in_f32->ilf_len;
+       in_f->ilf_boffset = in_f32->ilf_boffset;
+       return 0;
 }
index 5049e8ab6e302e05b4c0d66fcf3eaa1ba9e2721b..aa75389be8cfa81d06695f80701c2093b65289b0 100644 (file)
@@ -1088,6 +1088,7 @@ xfs_ioctl_setattr_dax_invalidate(
        int                     *join_flags)
 {
        struct inode            *inode = VFS_I(ip);
+       struct super_block      *sb = inode->i_sb;
        int                     error;
 
        *join_flags = 0;
@@ -1100,7 +1101,7 @@ xfs_ioctl_setattr_dax_invalidate(
        if (fa->fsx_xflags & FS_XFLAG_DAX) {
                if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
                        return -EINVAL;
-               if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE)
+               if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
                        return -EINVAL;
        }
 
index a1909bc064e9e70c90ef6ced3017935951aefc2e..f179bdf1644dc346b2f7f064543eca0c3f808c6a 100644 (file)
@@ -829,7 +829,8 @@ int
 xfs_iomap_write_unwritten(
        xfs_inode_t     *ip,
        xfs_off_t       offset,
-       xfs_off_t       count)
+       xfs_off_t       count,
+       bool            update_isize)
 {
        xfs_mount_t     *mp = ip->i_mount;
        xfs_fileoff_t   offset_fsb;
@@ -840,6 +841,7 @@ xfs_iomap_write_unwritten(
        xfs_trans_t     *tp;
        xfs_bmbt_irec_t imap;
        struct xfs_defer_ops dfops;
+       struct inode    *inode = VFS_I(ip);
        xfs_fsize_t     i_size;
        uint            resblks;
        int             error;
@@ -899,7 +901,8 @@ xfs_iomap_write_unwritten(
                i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
                if (i_size > offset + count)
                        i_size = offset + count;
-
+               if (update_isize && i_size > i_size_read(inode))
+                       i_size_write(inode, i_size);
                i_size = xfs_new_eof(ip, i_size);
                if (i_size) {
                        ip->i_d.di_size = i_size;
index 00db3ecea0840c2587f6c0de2f2828bcaaab6fbe..ee535065c5d0e3795158e9cf1ce7dafad5f3a729 100644 (file)
@@ -27,7 +27,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
                        struct xfs_bmbt_irec *, int);
 int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
                        struct xfs_bmbt_irec *);
-int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
 
 void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
                struct xfs_bmbt_irec *);
index c5107c7bc4bf8cfaad37050babd60e9a4f17e109..dc95a49d62e72f1808a6a8fa31aa82651bb8a643 100644 (file)
@@ -2515,7 +2515,7 @@ next_lv:
                                if (lv)
                                        vecp = lv->lv_iovecp;
                        }
-                       if (record_cnt == 0 && ordered == false) {
+                       if (record_cnt == 0 && !ordered) {
                                if (!lv)
                                        return 0;
                                break;
index ea7d4b4e50d0ca3eedee85ffd840542e0bb105db..e9727d0a541a109329c2c9072b24c7c0d3b6ed23 100644 (file)
@@ -704,7 +704,7 @@ xfs_mountfs(
        xfs_set_maxicount(mp);
 
        /* enable fail_at_unmount as default */
-       mp->m_fail_unmount = 1;
+       mp->m_fail_unmount = true;
 
        error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
        if (error)
index 0c381d71b242ec8553be6e01b8400977c4403365..0492436a053fcf0875764b341cbb150a2dde3baa 100644 (file)
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
        XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log,           28);
        XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp,           8);
        XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32,   52);
-       XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64,   56);
+       XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format,      56);
        XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat,        20);
        XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header,          16);
 }
index 2f2dc3c09ad008a75fa0ffba6fd5d7aed392c162..4246876df7b759cdd63ddd43b5eb4612af0b0c93 100644 (file)
@@ -274,7 +274,7 @@ xfs_fs_commit_blocks(
                                        (end - 1) >> PAGE_SHIFT);
                WARN_ON_ONCE(error);
 
-               error = xfs_iomap_write_unwritten(ip, start, length);
+               error = xfs_iomap_write_unwritten(ip, start, length, false);
                if (error)
                        goto out_drop_iolock;
        }
index 3246815c24d659d83da380c5cce04dd8ee40a101..37e603bf159137dfd38a1210b5d0bb31e5f4829f 100644 (file)
@@ -736,7 +736,13 @@ xfs_reflink_end_cow(
        /* If there is a hole at end_fsb - 1 go to the previous extent */
        if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
            got.br_startoff > end_fsb) {
-               ASSERT(idx > 0);
+               /*
+                * In case of racing, overlapping AIO writes no COW extents
+                * might be left by the time I/O completes for the loser of
+                * the race.  In that case we are done.
+                */
+               if (idx <= 0)
+                       goto out_cancel;
                xfs_iext_get_extent(ifp, --idx, &got);
        }
 
@@ -809,6 +815,7 @@ next_extent:
 
 out_defer:
        xfs_defer_cancel(&dfops);
+out_cancel:
        xfs_trans_cancel(tp);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out:
index c996f4ae4a5f293199ede0d0137329cc0919418b..f663022353c0d98b681e51fe8578096d0fbf57bf 100644 (file)
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super(
 
        /* version 5 superblocks support inode version counters. */
        if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
-               sb->s_flags |= MS_I_VERSION;
+               sb->s_flags |= SB_I_VERSION;
 
        if (mp->m_flags & XFS_MOUNT_DAX) {
                xfs_warn(mp,
@@ -1654,6 +1654,16 @@ xfs_fs_fill_super(
                "DAX and reflink have not been tested together!");
        }
 
+       if (mp->m_flags & XFS_MOUNT_DISCARD) {
+               struct request_queue *q = bdev_get_queue(sb->s_bdev);
+
+               if (!blk_queue_discard(q)) {
+                       xfs_warn(mp, "mounting with \"discard\" option, but "
+                                       "the device does not support discard");
+                       mp->m_flags &= ~XFS_MOUNT_DISCARD;
+               }
+       }
+
        if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
                if (mp->m_sb.sb_rblocks) {
                        xfs_alert(mp,
index dedf9d789166184fbe859faa12a9415d55804dc8..fa1505292f6cda4c4b2434d264d4116bcefe640c 100644 (file)
@@ -399,17 +399,12 @@ extern const struct fwnode_operations acpi_device_fwnode_ops;
 extern const struct fwnode_operations acpi_data_fwnode_ops;
 extern const struct fwnode_operations acpi_static_fwnode_ops;
 
+bool is_acpi_device_node(const struct fwnode_handle *fwnode);
+bool is_acpi_data_node(const struct fwnode_handle *fwnode);
+
 static inline bool is_acpi_node(const struct fwnode_handle *fwnode)
 {
-       return !IS_ERR_OR_NULL(fwnode) &&
-               (fwnode->ops == &acpi_device_fwnode_ops
-                || fwnode->ops == &acpi_data_fwnode_ops);
-}
-
-static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
-{
-       return !IS_ERR_OR_NULL(fwnode) &&
-               fwnode->ops == &acpi_device_fwnode_ops;
+       return (is_acpi_device_node(fwnode) || is_acpi_data_node(fwnode));
 }
 
 #define to_acpi_device_node(__fwnode)                                  \
@@ -422,11 +417,6 @@ static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
                        NULL;                                           \
        })
 
-static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
-{
-       return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
-}
-
 #define to_acpi_data_node(__fwnode)                                    \
        ({                                                              \
                typeof(__fwnode) __to_acpi_data_node_fwnode = __fwnode; \
index 0504ef8f3aa31d5e7a9f0d86d1cb3fc130fee9e0..976f8ac26665b3f4f03cbbfe5222277d9827d847 100644 (file)
@@ -115,15 +115,35 @@ do {                                                                      \
        (__ret);                                                        \
 })
 
-#define this_cpu_generic_read(pcp)                                     \
+#define __this_cpu_generic_read_nopreempt(pcp)                         \
 ({                                                                     \
        typeof(pcp) __ret;                                              \
        preempt_disable_notrace();                                      \
-       __ret = raw_cpu_generic_read(pcp);                              \
+       __ret = READ_ONCE(*raw_cpu_ptr(&(pcp)));                        \
        preempt_enable_notrace();                                       \
        __ret;                                                          \
 })
 
+#define __this_cpu_generic_read_noirq(pcp)                             \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       unsigned long __flags;                                          \
+       raw_local_irq_save(__flags);                                    \
+       __ret = raw_cpu_generic_read(pcp);                              \
+       raw_local_irq_restore(__flags);                                 \
+       __ret;                                                          \
+})
+
+#define this_cpu_generic_read(pcp)                                     \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       if (__native_word(pcp))                                         \
+               __ret = __this_cpu_generic_read_nopreempt(pcp);         \
+       else                                                            \
+               __ret = __this_cpu_generic_read_noirq(pcp);             \
+       __ret;                                                          \
+})
+
 #define this_cpu_generic_to_op(pcp, val, op)                           \
 do {                                                                   \
        unsigned long __flags;                                          \
diff --git a/include/dt-bindings/reset/snps,hsdk-reset.h b/include/dt-bindings/reset/snps,hsdk-reset.h
new file mode 100644 (file)
index 0000000..e1a643e
--- /dev/null
@@ -0,0 +1,17 @@
+/**
+ * This header provides index for the HSDK reset controller.
+ */
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK
+#define _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK
+
+#define HSDK_APB_RESET 0
+#define HSDK_AXI_RESET 1
+#define HSDK_ETH_RESET 2
+#define HSDK_USB_RESET 3
+#define HSDK_SDIO_RESET        4
+#define HSDK_HDMI_RESET        5
+#define HSDK_GFX_RESET 6
+#define HSDK_DMAC_RESET        7
+#define HSDK_EBI_RESET 8
+
+#endif /*_DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK*/
diff --git a/include/dt-bindings/reset/snps,hsdk-v1-reset.h b/include/dt-bindings/reset/snps,hsdk-v1-reset.h
deleted file mode 100644 (file)
index d898c89..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/**
- * This header provides index for the HSDK v1 reset controller.
- */
-#ifndef _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1
-#define _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1
-
-#define HSDK_V1_APB_RESET      0
-#define HSDK_V1_AXI_RESET      1
-#define HSDK_V1_ETH_RESET      2
-#define HSDK_V1_USB_RESET      3
-#define HSDK_V1_SDIO_RESET     4
-#define HSDK_V1_HDMI_RESET     5
-#define HSDK_V1_GFX_RESET      6
-#define HSDK_V1_DMAC_RESET     7
-#define HSDK_V1_EBI_RESET      8
-
-#endif /*_DT_BINDINGS_RESET_CONTROLLER_HSDK_V1*/
index 74d4d4e8e3db9f173611dc169a20955a7c89dc45..cb708eb8accc59d3dafee57c7c696647e80be471 100644 (file)
@@ -314,11 +314,7 @@ void audit_core_dumps(long signr);
 
 static inline void audit_seccomp(unsigned long syscall, long signr, int code)
 {
-       if (!audit_enabled)
-               return;
-
-       /* Force a record to be reported if a signal was delivered. */
-       if (signr || unlikely(!audit_dummy_context()))
+       if (audit_enabled && unlikely(!audit_dummy_context()))
                __audit_seccomp(syscall, signr, code);
 }
 
index fb44d6180ca0e74960abd1cd5dbefb8cd9461273..18d05b5491f3442a886630439b4e2ca592898c76 100644 (file)
@@ -131,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
                           int executable_stack);
 extern int transfer_args_to_stack(struct linux_binprm *bprm,
                                  unsigned long *sp_location);
-extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
 extern int copy_strings_kernel(int argc, const char *const *argv,
                               struct linux_binprm *bprm);
 extern int prepare_bprm_creds(struct linux_binprm *bprm);
index 8b9d6fff002db113cb2233a2d80b3186ff041ef0..f2deb71958b2dbee591c4c9bf95ee6f1e70f41c4 100644 (file)
@@ -92,7 +92,7 @@
 /**
  * FIELD_GET() - extract a bitfield element
  * @_mask: shifted mask defining the field's length and position
- * @_reg:  32bit value of entire bitfield
+ * @_reg:  value of entire bitfield
  *
  * FIELD_GET() extracts the field specified by @_mask from the
  * bitfield passed in as @_reg by masking and shifting it down.
index 460294bb0fa52560b44e4764ce34cf7bce9cd2ce..02fa42d24b52f5632500e2dd530a3b16be1f38d8 100644 (file)
@@ -551,6 +551,7 @@ struct request_queue {
        int                     node;
 #ifdef CONFIG_BLK_DEV_IO_TRACE
        struct blk_trace        *blk_trace;
+       struct mutex            blk_trace_mutex;
 #endif
        /*
         * for flush operations
index 8390859e79e70b9a07f0948e1efaa5adbfb0c99c..f1af7d63d6786673ac087dbfca66310a1236126f 100644 (file)
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
 {
 }
 
+static inline int bpf_obj_get_user(const char __user *pathname)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
                                                       u32 key)
 {
index c8dae555eccf9f30e50213eef55d2e6bee648d06..446b24cac67da00e153d3196d8e2a0eb30675222 100644 (file)
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
                                loff_t, unsigned, unsigned,
                                struct page *, void *);
 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+void clean_page_buffers(struct page *page);
 int cont_write_begin(struct file *, struct address_space *, loff_t,
                        unsigned, unsigned, struct page **, void **,
                        get_block_t *, loff_t *);
index f24bfb2b9a2dcca0ac61034e86fe3bc489da7947..6d508767e14455d52470fe806f5f923637561edc 100644 (file)
@@ -3,8 +3,27 @@
 
 #include <linux/types.h>
 
+/*
+ * CPU-up                      CPU-down
+ *
+ * BP          AP              BP              AP
+ *
+ * OFFLINE                     OFFLINE
+ *   |                           ^
+ *   v                           |
+ * BRINGUP_CPU->AP_OFFLINE     BRINGUP_CPU  <- AP_IDLE_DEAD (idle thread/play_dead)
+ *               |                             AP_OFFLINE
+ *               v (IRQ-off)     ,---------------^
+ *             AP_ONLNE          | (stop_machine)
+ *               |             TEARDOWN_CPU <- AP_ONLINE_IDLE
+ *               |                               ^
+ *               v                               |
+ *              AP_ACTIVE                      AP_ACTIVE
+ */
+
 enum cpuhp_state {
-       CPUHP_OFFLINE,
+       CPUHP_INVALID = -1,
+       CPUHP_OFFLINE = 0,
        CPUHP_CREATE_THREADS,
        CPUHP_PERF_PREPARE,
        CPUHP_PERF_X86_PREPARE,
index c6f27207dbe87392502dba2082e73617bd6d02ac..66fe271c2544d5cf861e1e5c58e23a0793ba1ed6 100644 (file)
@@ -307,8 +307,6 @@ struct driver_attribute {
                         size_t count);
 };
 
-#define DRIVER_ATTR(_name, _mode, _show, _store) \
-       struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
 #define DRIVER_ATTR_RW(_name) \
        struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
 #define DRIVER_ATTR_RO(_name) \
@@ -838,7 +836,7 @@ struct dev_links_info {
  * @driver_data: Private pointer for driver specific info.
  * @links:     Links to suppliers and consumers of this device.
  * @power:     For device power management.
- *             See Documentation/power/admin-guide/devices.rst for details.
+ *             See Documentation/driver-api/pm/devices.rst for details.
  * @pm_domain: Provide callbacks that are executed during system suspend,
  *             hibernation, system resume and during runtime PM transitions
  *             along with subsystem-level and driver-level callbacks.
index d29e58fde364f01168c059f85faac389622a3fc3..818a0b26249ea2a836638204226cd73b106e7ccb 100644 (file)
@@ -728,7 +728,7 @@ void xdp_do_flush_map(void);
 void bpf_warn_invalid_xdp_action(u32 act);
 void bpf_warn_invalid_xdp_redirect(u32 ifindex);
 
-struct sock *do_sk_redirect_map(void);
+struct sock *do_sk_redirect_map(struct sk_buff *skb);
 
 #ifdef CONFIG_BPF_JIT
 extern int bpf_jit_enable;
index 339e73742e736cfcccdfedf323e6ab8bc651138b..13dab191a23e61654d29a4bff297919105bb20a6 100644 (file)
@@ -403,7 +403,7 @@ struct address_space {
        unsigned long           flags;          /* error bits */
        spinlock_t              private_lock;   /* for use by the address_space */
        gfp_t                   gfp_mask;       /* implicit gfp mask for allocations */
-       struct list_head        private_list;   /* ditto */
+       struct list_head        private_list;   /* for use by the address_space */
        void                    *private_data;  /* ditto */
        errseq_t                wb_err;
 } __attribute__((aligned(sizeof(long)))) __randomize_layout;
index c458d7b7ad197688469772fb0c842180c668864c..6431087816ba5b06d9d5f7dae08af53ed57e11ca 100644 (file)
@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
                                const int *srv_version, int srv_vercnt,
                                int *nego_fw_version, int *nego_srv_version);
 
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void hv_process_channel_removal(u32 relid);
 
 void vmbus_setevent(struct vmbus_channel *channel);
 /*
index 4837157da0dc04c4cc2c1a7c02da71e41c787d63..9ae41cdd0d4cc57ca54b56786d9629a81354506f 100644 (file)
@@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap);
 int tap_get_minor(dev_t major, struct tap_dev *tap);
 void tap_free_minor(dev_t major, struct tap_dev *tap);
 int tap_queue_resize(struct tap_dev *tap);
-int tap_create_cdev(struct cdev *tap_cdev,
-                   dev_t *tap_major, const char *device_name);
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+                   const char *device_name, struct module *module);
 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
 
 #endif /*_LINUX_IF_TAP_H_*/
index 5ba430cc9a87352c305b7e6319f3a53ec4a94cc6..1fc7abd28b0b064b54a73e9c5717c2954e25e1cb 100644 (file)
@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
 int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
        unsigned int size, unsigned int *val);
 
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+       unsigned int reset_length);
+
 int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
        const struct iio_chan_spec *chan, int *val);
 int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
index a65e3b24fb1838e43cffc24e2ecdc8a8f92eba12..7c7516eb7d76c64071e51c199fdb0fd0f4e3c934 100644 (file)
@@ -234,6 +234,10 @@ struct input_dev {
 #error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
 #endif
 
+#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
+#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
+#endif
+
 #define INPUT_DEVICE_ID_MATCH_DEVICE \
        (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
 #define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
 int input_set_keycode(struct input_dev *dev,
                      const struct input_keymap_entry *ke);
 
+bool input_match_device_id(const struct input_dev *dev,
+                          const struct input_device_id *id);
+
 void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
 
 extern struct class input_class;
@@ -529,6 +536,7 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code,
 
 int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file);
 int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file);
+int input_ff_flush(struct input_dev *dev, struct file *file);
 
 int input_ff_create_memless(struct input_dev *dev, void *data,
                int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
index a7f2ac689d2917e2902e78604e8b6b33df1ee333..41b8c575785916f7691a7686a5c5d4e7d99003fa 100644 (file)
@@ -167,11 +167,11 @@ struct iommu_resv_region {
  * @map: map a physically contiguous memory region to an iommu domain
  * @unmap: unmap a physically contiguous memory region from an iommu domain
  * @map_sg: map a scatter-gather list of physically contiguous memory chunks
+ *          to an iommu domain
  * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
  * @tlb_range_add: Add a given iova range to the flush queue for this domain
  * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
  *            queue
- * to an iommu domain
  * @iova_to_phys: translate iova to physical address
  * @add_device: add device to iommu grouping
  * @remove_device: remove device from iommu grouping
index b99a784635ff5801c20fce3a187a12cc4358660a..5ad10948ea95241ed1ce4afe1d1da89b73ea2a39 100644 (file)
@@ -783,10 +783,7 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
 static inline
 struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
 {
-       if (!cpumask_empty(d->common->effective_affinity))
-               return d->common->effective_affinity;
-
-       return d->common->affinity;
+       return d->common->effective_affinity;
 }
 static inline void irq_data_update_effective_affinity(struct irq_data *d,
                                                      const struct cpumask *m)
@@ -1012,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
 void irq_gc_unmask_enable_reg(struct irq_data *d);
 void irq_gc_ack_set_bit(struct irq_data *d);
 void irq_gc_ack_clr_bit(struct irq_data *d);
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
 void irq_gc_eoi(struct irq_data *d);
 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
 
index 1ea576c8126f8b8c467f1ea3ab2623826efde0af..14b74f22d43c147f2bc8eec9ef82e2a1461a6643 100644 (file)
 #define GITS_BASER_ENTRY_SIZE_SHIFT            (48)
 #define GITS_BASER_ENTRY_SIZE(r)       ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
 #define GITS_BASER_ENTRY_SIZE_MASK     GENMASK_ULL(52, 48)
+#define GITS_BASER_PHYS_52_to_48(phys)                                 \
+       (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
 #define GITS_BASER_SHAREABILITY_SHIFT  (10)
 #define GITS_BASER_InnerShareable                                      \
        GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
index 0ad4c3044cf9333fa9664c2be0850e9596ab2206..91189bb0c8184f9e66d7ede8b377cd2e86b8074b 100644 (file)
 
 #define STACK_MAGIC    0xdeadbeef
 
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
 #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
 
 /* @a is a power of 2 value */
 #define READ                   0
 #define WRITE                  1
 
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
 
 #define u64_to_user_ptr(x) (           \
 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
 #define round_down(x, y) ((x) & ~__round_mask(x, y))
 
+/**
+ * FIELD_SIZEOF - get the size of a struct's field
+ * @t: the target struct
+ * @f: the target struct's field
+ * Return: the size of @f in the struct definition without having a
+ * declared instance of @t.
+ */
 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+
 #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
 
 #define DIV_ROUND_DOWN_ULL(ll, d) \
 /*
  * Divide positive or negative dividend by positive or negative divisor
  * and round to closest integer. Result is undefined for negative
- * divisors if he dividend variable type is unsigned and for negative
+ * divisors if the dividend variable type is unsigned and for negative
  * dividends if the divisor variable type is unsigned.
  */
 #define DIV_ROUND_CLOSEST(x, divisor)(                 \
@@ -247,13 +265,13 @@ extern int _cond_resched(void);
  * @ep_ro: right open interval endpoint
  *
  * Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, ep_ro), where the upper interval endpoint is right-open.
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
  * This is useful, e.g. for accessing a index of an array containing
- * ep_ro elements, for example. Think of it as sort of modulus, only that
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
  * the result isn't that of modulo. ;) Note that if initial input is a
  * small value, then result will return 0.
  *
- * Return: a result based on val in interval [0, ep_ro).
+ * Return: a result based on @val in interval [0, @ep_ro).
  */
 static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
 {
@@ -618,8 +636,8 @@ do {                                                                        \
  * trace_printk - printf formatting in the ftrace buffer
  * @fmt: the printf format for printing
  *
- * Note: __trace_printk is an internal function for trace_printk and
- *       the @ip is passed in via the trace_printk macro.
+ * Note: __trace_printk is an internal function for trace_printk() and
+ *       the @ip is passed in via the trace_printk() macro.
  *
  * This function allows a kernel developer to debug fast path sections
  * that printk is not appropriate for. By scattering in various
@@ -629,7 +647,7 @@ do {                                                                        \
  * This is intended as a debugging tool for the developer only.
  * Please refrain from leaving trace_printks scattered around in
  * your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used)
+ * allocated when trace_printk() is used.)
  *
  * A little optization trick is done here. If there's only one
  * argument, there's no need to scan the string for printf formats.
@@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
  *       the @ip is passed in via the trace_puts macro.
  *
  * This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" affects,
+ * paths that a developer wants the least amount of "Heisenbug" effects,
  * where the processing of the print format is still too much.
  *
  * This function allows a kernel developer to debug fast path sections
@@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
  * This is intended as a debugging tool for the developer only.
  * Please refrain from leaving trace_puts scattered around in
  * your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used)
+ * allocated when trace_puts() is used.)
  *
  * Returns: 0 if nothing was written, positive # if string was.
  *  (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
        t2 min2 = (y);                                  \
        (void) (&min1 == &min2);                        \
        min1 < min2 ? min1 : min2; })
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
 #define min(x, y)                                      \
        __min(typeof(x), typeof(y),                     \
              __UNIQUE_ID(min1_), __UNIQUE_ID(min2_),   \
@@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
        t2 max2 = (y);                                  \
        (void) (&max1 == &max2);                        \
        max1 > max2 ? max1 : max2; })
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
 #define max(x, y)                                      \
        __max(typeof(x), typeof(y),                     \
              __UNIQUE_ID(max1_), __UNIQUE_ID(max2_),   \
              x, y)
 
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
 #define min3(x, y, z) min((typeof(x))min(x, y), z)
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
 #define max3(x, y, z) max((typeof(x))max(x, y), z)
 
 /**
@@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
  * @lo: lowest allowable value
  * @hi: highest allowable value
  *
- * This macro does strict typechecking of lo/hi to make sure they are of the
- * same type as val.  See the unnecessary pointer comparisons.
+ * This macro does strict typechecking of @lo/@hi to make sure they are of the
+ * same type as @val.  See the unnecessary pointer comparisons.
  */
 #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
 
@@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
  *
  * Or not use min/max/clamp at all, of course.
  */
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
 #define min_t(type, x, y)                              \
        __min(type, type,                               \
              __UNIQUE_ID(min1_), __UNIQUE_ID(min2_),   \
              x, y)
 
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
 #define max_t(type, x, y)                              \
        __max(type, type,                               \
              __UNIQUE_ID(min1_), __UNIQUE_ID(min2_),   \
@@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
  * @hi: maximum allowable value
  *
  * This macro does no typechecking and uses temporary variables of type
- * 'type' to make all the comparisons.
+ * @type to make all the comparisons.
  */
 #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
 
@@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
  * @hi: maximum allowable value
  *
  * This macro does no typechecking and uses temporary variables of whatever
- * type the input argument 'val' is.  This is useful when val is an unsigned
- * type and min and max are literals that will otherwise be assigned a signed
+ * type the input argument @val is.  This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
  * integer type.
  */
 #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
 
 
-/*
- * swap - swap value of @a and @b
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
  */
 #define swap(a, b) \
        do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
index 044114185120633b8ca5c678b22570c3c2f8e264..8a15cabe928d0ee282742f0c44be26e7af01851d 100644 (file)
@@ -138,6 +138,11 @@ struct key_restriction {
        struct key_type *keytype;
 };
 
+enum key_state {
+       KEY_IS_UNINSTANTIATED,
+       KEY_IS_POSITIVE,                /* Positively instantiated */
+};
+
 /*****************************************************************************/
 /*
  * authentication token / access credential / keyring
@@ -169,6 +174,7 @@ struct key {
                                                 * - may not match RCU dereferenced payload
                                                 * - payload should contain own length
                                                 */
+       short                   state;          /* Key state (+) or rejection error (-) */
 
 #ifdef KEY_DEBUGGING
        unsigned                magic;
@@ -176,17 +182,16 @@ struct key {
 #endif
 
        unsigned long           flags;          /* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED  0       /* set if key has been instantiated */
-#define KEY_FLAG_DEAD          1       /* set if key type has been deleted */
-#define KEY_FLAG_REVOKED       2       /* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA      3       /* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT        4       /* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE      5       /* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR        6       /* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED   7       /* set if key has been invalidated */
-#define KEY_FLAG_BUILTIN       8       /* set if key is built in to the kernel */
-#define KEY_FLAG_ROOT_CAN_INVAL        9       /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP          10      /* set if key should not be removed */
+#define KEY_FLAG_DEAD          0       /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED       1       /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA      2       /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT        3       /* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR        4       /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED   5       /* set if key has been invalidated */
+#define KEY_FLAG_BUILTIN       6       /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL        7       /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP          8       /* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING   9       /* set if key is a user or user session keyring */
 
        /* the key type and key description string
         * - the desc is used to match a key against search criteria
@@ -212,7 +217,6 @@ struct key {
                        struct list_head name_link;
                        struct assoc_array keys;
                };
-               int reject_error;
        };
 
        /* This is set on a keyring to restrict the addition of a link to a key
@@ -243,6 +247,7 @@ extern struct key *key_alloc(struct key_type *type,
 #define KEY_ALLOC_NOT_IN_QUOTA         0x0002  /* not in quota */
 #define KEY_ALLOC_BUILT_IN             0x0004  /* Key is built into kernel */
 #define KEY_ALLOC_BYPASS_RESTRICTION   0x0008  /* Override the check on restricted keyrings */
+#define KEY_ALLOC_UID_KEYRING          0x0010  /* allocating a user or user session keyring */
 
 extern void key_revoke(struct key *key);
 extern void key_invalidate(struct key *key);
@@ -351,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
 #define        KEY_NEED_SETATTR 0x20   /* Require permission to change attributes */
 #define        KEY_NEED_ALL    0x3f    /* All the above permissions */
 
+static inline short key_read_state(const struct key *key)
+{
+       /* Barrier versus mark_key_instantiated(). */
+       return smp_load_acquire(&key->state);
+}
+
 /**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
  * @key: The key to check.
  *
  * Return true if the specified key has been positively instantiated, false
  * otherwise.
  */
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
+{
+       return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
 {
-       return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
-               !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+       return key_read_state(key) < 0;
 }
 
 #define dereference_key_rcu(KEY)                                       \
index 0d3f14fd26217fcb2a472c411c2e9ee73a6c6c85..4773145246ed264f47e2a1c885dc7bd36654ddd3 100644 (file)
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
        struct mbus_dram_window {
                u8      cs_index;
                u8      mbus_attr;
-               u32     base;
-               u32     size;
+               u64     base;
+               u64     size;
        } cs[4];
 };
 
index eaf4ad209c8fa9bb8859d6c7e0c4392f8f8c23aa..e32dbc4934dbbf7e743bd33dd1b3ed8a4c9cee88 100644 (file)
@@ -980,7 +980,6 @@ enum mlx5_cap_type {
        MLX5_CAP_RESERVED,
        MLX5_CAP_VECTOR_CALC,
        MLX5_CAP_QOS,
-       MLX5_CAP_FPGA,
        /* NUM OF CAP Types */
        MLX5_CAP_NUM
 };
@@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
        MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
 
 #define MLX5_CAP_FPGA(mdev, cap) \
-       MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+       MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
 
 #define MLX5_CAP64_FPGA(mdev, cap) \
-       MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+       MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
 
 enum {
        MLX5_CMD_STAT_OK                        = 0x0,
index 02ff700e4f30cb66f70773caf6d467a5000d580d..401c8972cc3a869d77bcf1245dfc5095bcb6bbaf 100644 (file)
@@ -774,6 +774,7 @@ struct mlx5_core_dev {
                u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
                u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
                u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+               u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
        } caps;
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
index a528b35a022e0bd1ad840392c7d32a0c44aa3a6f..69772347f866680d1a19c08d4e5e5e48b5c233f8 100644 (file)
@@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         reserved_at_80[0x18];
        u8         log_max_destination[0x8];
 
-       u8         reserved_at_a0[0x18];
+       u8         log_max_flow_counter[0x8];
+       u8         reserved_at_a8[0x10];
        u8         log_max_flow[0x8];
 
        u8         reserved_at_c0[0x40];
index c57d4b7de3a80203719f36319d17fb4794dd40b7..c59af8ab753a4f9eb35cb5da124fd7a0990339d0 100644 (file)
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
 int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
                            u8 prio, u8 *tc);
 int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+                            u8 tc, u8 *tc_group);
 int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
 int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
                                u8 tc, u8 *bw_pct);
index f8c10d336e42ea5b43e6f72e593eaf90027a6cc5..065d99deb847e17fd54193035d0f0512b45df79d 100644 (file)
@@ -240,7 +240,7 @@ extern unsigned int kobjsize(const void *objp);
 
 #if defined(CONFIG_X86_INTEL_MPX)
 /* MPX specific bounds table or bounds directory */
-# define VM_MPX                VM_HIGH_ARCH_BIT_4
+# define VM_MPX                VM_HIGH_ARCH_4
 #else
 # define VM_MPX                VM_NONE
 #endif
index 46f4ecf5479adbb2829c26e63d5e56b20e779312..1861ea8dba775989dcf298cb77d92ef9429fa221 100644 (file)
@@ -445,6 +445,9 @@ struct mm_struct {
        unsigned long flags; /* Must use atomic bitops to access the bits */
 
        struct core_state *core_state; /* coredumping support */
+#ifdef CONFIG_MEMBARRIER
+       atomic_t membarrier_state;
+#endif
 #ifdef CONFIG_AIO
        spinlock_t                      ioctx_lock;
        struct kioctx_table __rcu       *ioctx_table;
index f3f2d07feb2a627d9b65409b7e5a0c0e53fe54c2..9a43763a68adb3e998ec942300688f99e065489b 100644 (file)
@@ -316,7 +316,7 @@ struct mmc_host {
 #define MMC_CAP_UHS_SDR50      (1 << 18)       /* Host supports UHS SDR50 mode */
 #define MMC_CAP_UHS_SDR104     (1 << 19)       /* Host supports UHS SDR104 mode */
 #define MMC_CAP_UHS_DDR50      (1 << 20)       /* Host supports UHS DDR50 mode */
-#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21)       /* Disable bounce buffers on host */
+/* (1 << 21) is free for reuse */
 #define MMC_CAP_DRIVER_TYPE_A  (1 << 23)       /* Host supports Driver Type A */
 #define MMC_CAP_DRIVER_TYPE_C  (1 << 24)       /* Host supports Driver Type C */
 #define MMC_CAP_DRIVER_TYPE_D  (1 << 25)       /* Host supports Driver Type D */
index 7b2e31b1745aaf3a7d3e8af0ca915c6c2dc04516..6866e8126982e3ebc328725f4a84699f010ff001 100644 (file)
@@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
 
 #else /* CONFIG_MMU_NOTIFIER */
 
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+       return 0;
+}
+
 static inline void mmu_notifier_release(struct mm_struct *mm)
 {
 }
index 356a814e7c8eb8b56179dd72bdbcf76703fa56b1..c8f89417740bb5819bc9cb2b81549e31a4af4fc0 100644 (file)
@@ -1094,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
 #error Allocator MAX_ORDER exceeds SECTION_SIZE
 #endif
 
-#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
-#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
+static inline unsigned long pfn_to_section_nr(unsigned long pfn)
+{
+       return pfn >> PFN_SECTION_SHIFT;
+}
+static inline unsigned long section_nr_to_pfn(unsigned long sec)
+{
+       return sec << PFN_SECTION_SHIFT;
+}
 
 #define SECTION_ALIGN_UP(pfn)  (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
 #define SECTION_ALIGN_DOWN(pfn)        ((pfn) & PAGE_SECTION_MASK)
index 694cebb50f72c7d1cb404e26416e5400545c114c..2657f9f51536c369188fb13e6c7943234bd272f3 100644 (file)
@@ -293,6 +293,7 @@ struct pcmcia_device_id {
 #define INPUT_DEVICE_ID_SND_MAX                0x07
 #define INPUT_DEVICE_ID_FF_MAX         0x7f
 #define INPUT_DEVICE_ID_SW_MAX         0x0f
+#define INPUT_DEVICE_ID_PROP_MAX       0x1f
 
 #define INPUT_DEVICE_ID_MATCH_BUS      1
 #define INPUT_DEVICE_ID_MATCH_VENDOR   2
@@ -308,6 +309,7 @@ struct pcmcia_device_id {
 #define INPUT_DEVICE_ID_MATCH_SNDBIT   0x0400
 #define INPUT_DEVICE_ID_MATCH_FFBIT    0x0800
 #define INPUT_DEVICE_ID_MATCH_SWBIT    0x1000
+#define INPUT_DEVICE_ID_MATCH_PROPBIT  0x2000
 
 struct input_device_id {
 
@@ -327,6 +329,7 @@ struct input_device_id {
        kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
        kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
        kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
+       kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
 
        kernel_ulong_t driver_info;
 };
index f535779d9dc1dfe36934c2abba4e43d053ac5d6f..2eaac7d75af4f1bbdaf876acc55b4bd0d37a7f36 100644 (file)
@@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
                                    unsigned char name_assign_type,
                                    void (*setup)(struct net_device *),
                                    unsigned int txqs, unsigned int rxqs);
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+                      const char *name);
+
 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
        alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
 
index 2c2a5514b0df98a0fd92294aad15b00855c9256a..528b24c78308e7fe128027f1717863d68e081cbc 100644 (file)
@@ -108,9 +108,10 @@ struct ebt_table {
 
 #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
                     ~(__alignof__(struct _xt_align)-1))
-extern struct ebt_table *ebt_register_table(struct net *net,
-                                           const struct ebt_table *table,
-                                           const struct nf_hook_ops *);
+extern int ebt_register_table(struct net *net,
+                             const struct ebt_table *table,
+                             const struct nf_hook_ops *ops,
+                             struct ebt_table **res);
 extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
                                 const struct nf_hook_ops *);
 extern unsigned int ebt_do_table(struct sk_buff *skb,
index a36abe2da13e1a23edc8aa152205af143f658990..27e249ed7c5c8dfcbd969e347ff844a56899edc7 100644 (file)
 
 #ifdef CONFIG_LOCKUP_DETECTOR
 void lockup_detector_init(void);
+void lockup_detector_soft_poweroff(void);
+void lockup_detector_cleanup(void);
+bool is_hardlockup(void);
+
+extern int watchdog_user_enabled;
+extern int nmi_watchdog_user_enabled;
+extern int soft_watchdog_user_enabled;
+extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
+
+extern struct cpumask watchdog_cpumask;
+extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
+extern int sysctl_softlockup_all_cpu_backtrace;
+extern int sysctl_hardlockup_all_cpu_backtrace;
 #else
-static inline void lockup_detector_init(void)
-{
-}
-#endif
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif /* !CONFIG_SMP */
+
+#else /* CONFIG_LOCKUP_DETECTOR */
+static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_soft_poweroff(void) { }
+static inline void lockup_detector_cleanup(void) { }
+#endif /* !CONFIG_LOCKUP_DETECTOR */
 
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
 extern void touch_softlockup_watchdog_sched(void);
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void);
 extern void touch_softlockup_watchdog_sync(void);
 extern void touch_all_softlockup_watchdogs(void);
 extern unsigned int  softlockup_panic;
-extern int soft_watchdog_enabled;
-extern atomic_t watchdog_park_in_progress;
 #else
-static inline void touch_softlockup_watchdog_sched(void)
-{
-}
-static inline void touch_softlockup_watchdog(void)
-{
-}
-static inline void touch_softlockup_watchdog_sync(void)
-{
-}
-static inline void touch_all_softlockup_watchdogs(void)
-{
-}
+static inline void touch_softlockup_watchdog_sched(void) { }
+static inline void touch_softlockup_watchdog(void) { }
+static inline void touch_softlockup_watchdog_sync(void) { }
+static inline void touch_all_softlockup_watchdogs(void) { }
 #endif
 
 #ifdef CONFIG_DETECT_HUNG_TASK
 void reset_hung_task_detector(void);
 #else
-static inline void reset_hung_task_detector(void)
-{
-}
+static inline void reset_hung_task_detector(void) { }
 #endif
 
 /*
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void)
  * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
  * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
  *
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
+ * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
+ * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'interface' between the parameters in /proc/sys/kernel and the internal
+ * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
+ * handled differently because its value is not boolean, and the lockup
+ * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
  */
 #define NMI_WATCHDOG_ENABLED_BIT   0
 #define SOFT_WATCHDOG_ENABLED_BIT  1
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic;
 static inline void hardlockup_detector_disable(void) {}
 #endif
 
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+# define NMI_WATCHDOG_SYSCTL_PERM      0644
+#else
+# define NMI_WATCHDOG_SYSCTL_PERM      0444
+#endif
+
 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
 extern void arch_touch_nmi_watchdog(void);
+extern void hardlockup_detector_perf_stop(void);
+extern void hardlockup_detector_perf_restart(void);
+extern void hardlockup_detector_perf_disable(void);
+extern void hardlockup_detector_perf_enable(void);
+extern void hardlockup_detector_perf_cleanup(void);
+extern int hardlockup_detector_perf_init(void);
 #else
-#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline void hardlockup_detector_perf_stop(void) { }
+static inline void hardlockup_detector_perf_restart(void) { }
+static inline void hardlockup_detector_perf_disable(void) { }
+static inline void hardlockup_detector_perf_enable(void) { }
+static inline void hardlockup_detector_perf_cleanup(void) { }
+# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
 static inline void arch_touch_nmi_watchdog(void) {}
+# else
+static inline int hardlockup_detector_perf_init(void) { return 0; }
+# endif
 #endif
-#endif
+
+void watchdog_nmi_stop(void);
+void watchdog_nmi_start(void);
+int watchdog_nmi_probe(void);
 
 /**
  * touch_nmi_watchdog - restart NMI watchdog timeout.
- * 
+ *
  * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
 u64 hw_nmi_get_sample_period(int watchdog_thresh);
 #endif
 
-#ifdef CONFIG_LOCKUP_DETECTOR
-extern int nmi_watchdog_enabled;
-extern int watchdog_user_enabled;
-extern int watchdog_thresh;
-extern unsigned long watchdog_enabled;
-extern struct cpumask watchdog_cpumask;
-extern unsigned long *watchdog_cpumask_bits;
-extern int __read_mostly watchdog_suspended;
-#ifdef CONFIG_SMP
-extern int sysctl_softlockup_all_cpu_backtrace;
-extern int sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
-#endif
-
 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
     defined(CONFIG_HARDLOCKUP_DETECTOR)
 void watchdog_update_hrtimer_threshold(u64 period);
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
 static inline void watchdog_update_hrtimer_threshold(u64 period) { }
 #endif
 
-extern bool is_hardlockup(void);
 struct ctl_table;
 extern int proc_watchdog(struct ctl_table *, int ,
                         void __user *, size_t *, loff_t *);
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
                                void __user *, size_t *, loff_t *);
 extern int proc_watchdog_cpumask(struct ctl_table *, int,
                                 void __user *, size_t *, loff_t *);
-extern int lockup_detector_suspend(void);
-extern void lockup_detector_resume(void);
-#else
-static inline int lockup_detector_suspend(void)
-{
-       return 0;
-}
-
-static inline void lockup_detector_resume(void)
-{
-}
-#endif
 
 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
 #include <asm/nmi.h>
index 9c5cb44808063d8531cfc800beb1e3f5e0e440a4..a726f96010d59d9d99eda4e6d5d5ea9776ebdd18 100644 (file)
@@ -346,11 +346,6 @@ struct nvme_fc_remote_port {
  *       indicating an FC transport Aborted status.
  *       Entrypoint is Mandatory.
  *
- * @defer_rcv:  Called by the transport to signal the LLLD that it has
- *       begun processing of a previously received NVME CMD IU. The LLDD
- *       is now free to re-use the rcv buffer associated with the
- *       nvmefc_tgt_fcp_req.
- *
  * @max_hw_queues:  indicates the maximum number of hw queues the LLDD
  *       supports for cpu affinitization.
  *       Value is Mandatory. Must be at least 1.
@@ -806,11 +801,19 @@ struct nvmet_fc_target_port {
  *       outstanding operation (if there was one) to complete, then will
  *       call the fcp_req_release() callback to return the command's
  *       exchange context back to the LLDD.
+ *       Entrypoint is Mandatory.
  *
  * @fcp_req_release:  Called by the transport to return a nvmefc_tgt_fcp_req
  *       to the LLDD after all operations on the fcp operation are complete.
  *       This may be due to the command completing or upon completion of
  *       abort cleanup.
+ *       Entrypoint is Mandatory.
+ *
+ * @defer_rcv:  Called by the transport to signal the LLLD that it has
+ *       begun processing of a previously received NVME CMD IU. The LLDD
+ *       is now free to re-use the rcv buffer associated with the
+ *       nvmefc_tgt_fcp_req.
+ *       Entrypoint is Optional.
  *
  * @max_hw_queues:  indicates the maximum number of hw queues the LLDD
  *       supports for cpu affinitization.
index 87723c86f136f0e48c64a4c39fa9f0dcb3ad979d..9310ce77d8e1f3b324e8c046ba414084530826e9 100644 (file)
@@ -471,12 +471,14 @@ enum nvme_opcode {
  *
  * @NVME_SGL_FMT_ADDRESS:     absolute address of the data block
  * @NVME_SGL_FMT_OFFSET:      relative offset of the in-capsule data block
+ * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
  * @NVME_SGL_FMT_INVALIDATE:  RDMA transport specific remote invalidation
  *                            request subtype
  */
 enum {
        NVME_SGL_FMT_ADDRESS            = 0x00,
        NVME_SGL_FMT_OFFSET             = 0x01,
+       NVME_SGL_FMT_TRANSPORT_A        = 0x0A,
        NVME_SGL_FMT_INVALIDATE         = 0x0f,
 };
 
@@ -490,12 +492,16 @@ enum {
  *
  * For struct nvme_keyed_sgl_desc:
  *   @NVME_KEY_SGL_FMT_DATA_DESC:      keyed data block descriptor
+ *
+ * Transport-specific SGL types:
+ *   @NVME_TRANSPORT_SGL_DATA_DESC:    Transport SGL data dlock descriptor
  */
 enum {
        NVME_SGL_FMT_DATA_DESC          = 0x00,
        NVME_SGL_FMT_SEG_DESC           = 0x02,
        NVME_SGL_FMT_LAST_SEG_DESC      = 0x03,
        NVME_KEY_SGL_FMT_DATA_DESC      = 0x04,
+       NVME_TRANSPORT_SGL_DATA_DESC    = 0x05,
 };
 
 struct nvme_sgl_desc {
@@ -1127,19 +1133,6 @@ enum {
        NVME_SC_UNWRITTEN_BLOCK         = 0x287,
 
        NVME_SC_DNR                     = 0x4000,
-
-
-       /*
-        * FC Transport-specific error status values for NVME commands
-        *
-        * Transport-specific status code values must be in the range 0xB0..0xBF
-        */
-
-       /* Generic FC failure - catchall */
-       NVME_SC_FC_TRANSPORT_ERROR      = 0x00B0,
-
-       /* I/O failure due to FC ABTS'd */
-       NVME_SC_FC_TRANSPORT_ABORTED    = 0x00B1,
 };
 
 struct nvme_completion {
index cfc34117fc9203eefc2702a87271a95f761a2a3a..b240ed69dc9626a266498807e568a5b09c0b888b 100644 (file)
@@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
        return NULL;
 }
 
+static inline int of_n_addr_cells(struct device_node *np)
+{
+       return 0;
+
+}
+static inline int of_n_size_cells(struct device_node *np)
+{
+       return 0;
+}
+
 static inline int of_property_read_u64(const struct device_node *np,
                                       const char *propname, u64 *out_value)
 {
index e0d1946270f38e5238ddf0a3bb25cf03c3f3ebe4..fb908e598348097712981459b969f45fed4a8d09 100644 (file)
@@ -57,7 +57,14 @@ extern const struct of_device_id of_default_bus_match_table[];
 extern struct platform_device *of_device_alloc(struct device_node *np,
                                         const char *bus_id,
                                         struct device *parent);
+#ifdef CONFIG_OF
 extern struct platform_device *of_find_device_by_node(struct device_node *np);
+#else
+static inline struct platform_device *of_find_device_by_node(struct device_node *np)
+{
+       return NULL;
+}
+#endif
 
 /* Platform devices and busses creation */
 extern struct platform_device *of_platform_device_create(struct device_node *np,
index f68c58a93dd045b9b58de9882088621253ec7758..f4f8ee5a7362e982c0084d619c8ed1996a38f770 100644 (file)
@@ -1685,6 +1685,8 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
 
 #define dev_is_pci(d) (false)
 #define dev_is_pf(d) (false)
+static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
+{ return false; }
 #endif /* CONFIG_PCI */
 
 /* Include architecture-dependent settings and functions */
index 032b559091450ad17dd752cff3bdf58f297aeb18..6737a8c9e8c694c8bbf2e04d67de4da6b961d3e6 100644 (file)
@@ -27,16 +27,17 @@ enum pm_qos_flags_status {
        PM_QOS_FLAGS_ALL,
 };
 
-#define PM_QOS_DEFAULT_VALUE -1
+#define PM_QOS_DEFAULT_VALUE   (-1)
+#define PM_QOS_LATENCY_ANY     S32_MAX
 
 #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE        0
 #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE  0
 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE    0
+#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT    PM_QOS_LATENCY_ANY
 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
 #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
-#define PM_QOS_LATENCY_ANY                     ((s32)(~(__u32)0 >> 1))
 
 #define PM_QOS_FLAG_NO_POWER_OFF       (1 << 0)
 #define PM_QOS_FLAG_REMOTE_WAKEUP      (1 << 1)
index b1fd8bf85fdc430eaaa2195cd6dc18417bb64585..2bea1d5e99302bd1b440d595f9df7ab30531e228 100644 (file)
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 #define list_entry_rcu(ptr, type, member) \
        container_of(lockless_dereference(ptr), type, member)
 
-/**
+/*
  * Where are list_empty_rcu() and list_first_entry_rcu()?
  *
  * Implementing those functions following their counterparts list_empty() and
index de50d8a4cf414121a9f83e50df7738a5f530494a..1a9f70d44af954ffe790dcb75872704beab152b2 100644 (file)
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  * Return the value of the specified RCU-protected pointer, but omit
  * both the smp_read_barrier_depends() and the READ_ONCE().  This
  * is useful in cases where update-side locks prevent the value of the
- * pointer from changing.  Please note that this primitive does -not-
+ * pointer from changing.  Please note that this primitive does *not*
  * prevent the compiler from repeating this reference or combining it
  * with other references, so it should not be used without protection
  * of appropriate locks.
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  * is handed off from RCU to some other synchronization mechanism, for
  * example, reference counting or locking.  In C11, it would map to
  * kill_dependency().  It could be used as follows:
- *
+ * ``
  *     rcu_read_lock();
  *     p = rcu_dereference(gp);
  *     long_lived = is_long_lived(p);
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  *                     p = rcu_pointer_handoff(p);
  *     }
  *     rcu_read_unlock();
+ *``
  */
 #define rcu_pointer_handoff(p) (p)
 
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 
 /**
  * RCU_INIT_POINTER() - initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
  *
  * Initialize an RCU-protected pointer in special cases where readers
  * do not need ordering constraints on the CPU or the compiler.  These
  * special cases are:
  *
- * 1.  This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
+ * 1.  This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
  * 2.  The caller has taken whatever steps are required to prevent
- *     RCU readers from concurrently accessing this pointer -or-
+ *     RCU readers from concurrently accessing this pointer *or*
  * 3.  The referenced data structure has already been exposed to
- *     readers either at compile time or via rcu_assign_pointer() -and-
- *     a.      You have not made -any- reader-visible changes to
- *             this structure since then -or-
+ *     readers either at compile time or via rcu_assign_pointer() *and*
+ *
+ *     a.      You have not made *any* reader-visible changes to
+ *             this structure since then *or*
  *     b.      It is OK for readers accessing this structure from its
  *             new location to see the old state of the structure.  (For
  *             example, the changes were to statistical counters or to
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
  * by a single external-to-structure RCU-protected pointer, then you may
  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
  * pointers, but you must use rcu_assign_pointer() to initialize the
- * external-to-structure pointer -after- you have completely initialized
+ * external-to-structure pointer *after* you have completely initialized
  * the reader-accessible portions of the linked structure.
  *
  * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 
 /**
  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
  *
  * GCC-style initialization for an RCU-protected pointer in a structure field.
  */
index 92fb8dd5a9e4884bfde2225bb0c6d933ed13c660..26a7df4e558c249c40b979ead73425989f8e8c7f 100644 (file)
@@ -65,25 +65,23 @@ struct task_group;
  */
 
 /* Used in tsk->state: */
-#define TASK_RUNNING                   0
-#define TASK_INTERRUPTIBLE             1
-#define TASK_UNINTERRUPTIBLE           2
-#define __TASK_STOPPED                 4
-#define __TASK_TRACED                  8
+#define TASK_RUNNING                   0x0000
+#define TASK_INTERRUPTIBLE             0x0001
+#define TASK_UNINTERRUPTIBLE           0x0002
+#define __TASK_STOPPED                 0x0004
+#define __TASK_TRACED                  0x0008
 /* Used in tsk->exit_state: */
-#define EXIT_DEAD                      16
-#define EXIT_ZOMBIE                    32
+#define EXIT_DEAD                      0x0010
+#define EXIT_ZOMBIE                    0x0020
 #define EXIT_TRACE                     (EXIT_ZOMBIE | EXIT_DEAD)
 /* Used in tsk->state again: */
-#define TASK_DEAD                      64
-#define TASK_WAKEKILL                  128
-#define TASK_WAKING                    256
-#define TASK_PARKED                    512
-#define TASK_NOLOAD                    1024
-#define TASK_NEW                       2048
-#define TASK_STATE_MAX                 4096
-
-#define TASK_STATE_TO_CHAR_STR         "RSDTtXZxKWPNn"
+#define TASK_PARKED                    0x0040
+#define TASK_DEAD                      0x0080
+#define TASK_WAKEKILL                  0x0100
+#define TASK_WAKING                    0x0200
+#define TASK_NOLOAD                    0x0400
+#define TASK_NEW                       0x0800
+#define TASK_STATE_MAX                 0x1000
 
 /* Convenience macros for the sake of set_current_state: */
 #define TASK_KILLABLE                  (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -99,7 +97,8 @@ struct task_group;
 /* get_task_state(): */
 #define TASK_REPORT                    (TASK_RUNNING | TASK_INTERRUPTIBLE | \
                                         TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
-                                        __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+                                        __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
+                                        TASK_PARKED)
 
 #define task_is_traced(task)           ((task->state & __TASK_TRACED) != 0)
 
@@ -1243,17 +1242,34 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
        return task_pgrp_nr_ns(tsk, &init_pid_ns);
 }
 
-static inline char task_state_to_char(struct task_struct *task)
+#define TASK_REPORT_IDLE       (TASK_REPORT + 1)
+#define TASK_REPORT_MAX                (TASK_REPORT_IDLE << 1)
+
+static inline unsigned int __get_task_state(struct task_struct *tsk)
+{
+       unsigned int tsk_state = READ_ONCE(tsk->state);
+       unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
+
+       BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
+
+       if (tsk_state == TASK_IDLE)
+               state = TASK_REPORT_IDLE;
+
+       return fls(state);
+}
+
+static inline char __task_state_to_char(unsigned int state)
 {
-       const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
-       unsigned long state = task->state;
+       static const char state_char[] = "RSDTtXZPI";
 
-       state = state ? __ffs(state) + 1 : 0;
+       BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
 
-       /* Make sure the string lines up properly with the number of task states: */
-       BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1);
+       return state_char[state];
+}
 
-       return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?';
+static inline char task_state_to_char(struct task_struct *tsk)
+{
+       return __task_state_to_char(__get_task_state(tsk));
 }
 
 /**
index 3a19c253bdb1c52faaed181db73077d6eec6e989..ab9bf7b739545fc3526d94fcd7fa7cd3d6b11054 100644 (file)
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
 
 /* mmput gets rid of the mappings and all user-space */
 extern void mmput(struct mm_struct *);
+#ifdef CONFIG_MMU
+/* same as above but performs the slow path from the async context. Can
+ * be called from the atomic context as well
+ */
+void mmput_async(struct mm_struct *);
+#endif
 
 /* Grab a reference to a task's mm, if it is not already going away */
 extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -205,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
        current->flags = (current->flags & ~PF_MEMALLOC) | flags;
 }
 
+#ifdef CONFIG_MEMBARRIER
+enum {
+       MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY        = (1U << 0),
+       MEMBARRIER_STATE_SWITCH_MM                      = (1U << 1),
+};
+
+static inline void membarrier_execve(struct task_struct *t)
+{
+       atomic_set(&t->mm->membarrier_state, 0);
+}
+#else
+static inline void membarrier_execve(struct task_struct *t)
+{
+}
+#endif
+
 #endif /* _LINUX_SCHED_MM_H */
index d7b6dab956ec4a130cd835d255b3a93bab764e54..7d065abc7a470d7b7efdadb634788c695d809af5 100644 (file)
@@ -71,14 +71,6 @@ struct sched_domain_shared {
        atomic_t        ref;
        atomic_t        nr_busy_cpus;
        int             has_idle_cores;
-
-       /*
-        * Some variables from the most recent sd_lb_stats for this domain,
-        * used by wake_affine().
-        */
-       unsigned long   nr_running;
-       unsigned long   load;
-       unsigned long   capacity;
 };
 
 struct sched_domain {
index 82b171e1aa0b8e0f074b0300f630c909a91d6a16..da803dfc7a3980a732ed328db276e12a1ae75e03 100644 (file)
@@ -231,7 +231,7 @@ struct sctp_datahdr {
        __be32 tsn;
        __be16 stream;
        __be16 ssn;
-       __be32 ppid;
+       __u32 ppid;
        __u8  payload[0];
 };
 
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
 
 struct sctp_strreset_outreq {
        struct sctp_paramhdr param_hdr;
-       __u32 request_seq;
-       __u32 response_seq;
-       __u32 send_reset_at_tsn;
-       __u16 list_of_streams[0];
+       __be32 request_seq;
+       __be32 response_seq;
+       __be32 send_reset_at_tsn;
+       __be16 list_of_streams[0];
 };
 
 struct sctp_strreset_inreq {
        struct sctp_paramhdr param_hdr;
-       __u32 request_seq;
-       __u16 list_of_streams[0];
+       __be32 request_seq;
+       __be16 list_of_streams[0];
 };
 
 struct sctp_strreset_tsnreq {
        struct sctp_paramhdr param_hdr;
-       __u32 request_seq;
+       __be32 request_seq;
 };
 
 struct sctp_strreset_addstrm {
        struct sctp_paramhdr param_hdr;
-       __u32 request_seq;
-       __u16 number_of_streams;
-       __u16 reserved;
+       __be32 request_seq;
+       __be16 number_of_streams;
+       __be16 reserved;
 };
 
 enum {
@@ -752,16 +752,16 @@ enum {
 
 struct sctp_strreset_resp {
        struct sctp_paramhdr param_hdr;
-       __u32 response_seq;
-       __u32 result;
+       __be32 response_seq;
+       __be32 result;
 };
 
 struct sctp_strreset_resptsn {
        struct sctp_paramhdr param_hdr;
-       __u32 response_seq;
-       __u32 result;
-       __u32 senders_next_tsn;
-       __u32 receivers_next_tsn;
+       __be32 response_seq;
+       __be32 result;
+       __be32 senders_next_tsn;
+       __be32 receivers_next_tsn;
 };
 
 #endif /* __LINUX_SCTP_H__ */
index ecc296c137cd2e3e926821dc357e5c00563bedd4..c8bef436b61df236f5b600622ef54da883e1d7a7 100644 (file)
@@ -3,7 +3,8 @@
 
 #include <uapi/linux/seccomp.h>
 
-#define SECCOMP_FILTER_FLAG_MASK       (SECCOMP_FILTER_FLAG_TSYNC)
+#define SECCOMP_FILTER_FLAG_MASK       (SECCOMP_FILTER_FLAG_TSYNC | \
+                                        SECCOMP_FILTER_FLAG_LOG)
 
 #ifdef CONFIG_SECCOMP
 
index 12910cf19869c7db32d1ca34ddda0931e80570d1..c149aa7bedf306c8a04d9fd61cb8930af12d416f 100644 (file)
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
 }
 
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                        const struct cpumask *);
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+                                         const struct cpumask *);
 
 #endif
index 39af9bc0f653ec97f739b2e803385cba669d1aa7..62be8966e8370535ac6cb9b15b7c154e23381dda 100644 (file)
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
 
 /**
  * srcu_read_lock_held - might we be in SRCU read-side critical section?
+ * @sp: The srcu_struct structure to check
  *
  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
index 73e97a08d3d0a9be1e5467b11d1b0a0ad235b063..cf30f50224721a27e412c3a959928844a622d96d 100644 (file)
@@ -9,13 +9,16 @@
 /*
  * Simple wait queues
  *
- * While these are very similar to the other/complex wait queues (wait.h) the
- * most important difference is that the simple waitqueue allows for
- * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
- * times.
+ * While these are very similar to regular wait queues (wait.h) the most
+ * important difference is that the simple waitqueue allows for deterministic
+ * behaviour -- IOW it has strictly bounded IRQ and lock hold times.
  *
- * In order to make this so, we had to drop a fair number of features of the
- * other waitqueue code; notably:
+ * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
+ * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
+ * priority task a chance to run.
+ *
+ * Secondly, we had to drop a fair number of features of the other waitqueue
+ * code; notably:
  *
  *  - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
  *    all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
  *  - the exclusive mode; because this requires preserving the list order
  *    and this is hard.
  *
- *  - custom wake functions; because you cannot give any guarantees about
- *    random code.
- *
- * As a side effect of this; the data structures are slimmer.
+ *  - custom wake callback functions; because you cannot give any guarantees
+ *    about random code. This also allows swait to be used in RT, such that
+ *    raw spinlock can be used for the swait queue head.
  *
- * One would recommend using this wait queue where possible.
+ * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
+ * For all the above, note that simple wait queues should _only_ be used under
+ * very specific realtime constraints -- it is best to stick with the regular
+ * wait queues in most cases.
  */
 
 struct task_struct;
index 95606a2d556fd299f487240078922abf408e435d..a78186d826d7963d99f168908f4a5ec386de04e1 100644 (file)
@@ -221,21 +221,25 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
        }                                                               \
        static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
-#ifdef TIF_FSCHECK
 /*
  * Called before coming back to user-mode. Returning to user-mode with an
  * address limit different than USER_DS can allow to overwrite kernel memory.
  */
 static inline void addr_limit_user_check(void)
 {
-
+#ifdef TIF_FSCHECK
        if (!test_thread_flag(TIF_FSCHECK))
                return;
+#endif
 
-       BUG_ON(!segment_eq(get_fs(), USER_DS));
+       if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS),
+                                 "Invalid address limit on user-mode return"))
+               force_sig(SIGKILL, current);
+
+#ifdef TIF_FSCHECK
        clear_thread_flag(TIF_FSCHECK);
-}
 #endif
+}
 
 asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
                               qid_t id, void __user *addr);
index 905d769d8ddcfdbf9657371dda29744bcedb53f5..5f7eeab990fe7eb2fa9a3ce16821f574eeee8f94 100644 (file)
@@ -42,7 +42,7 @@ enum {
 #define THREAD_ALIGN   THREAD_SIZE
 #endif
 
-#ifdef CONFIG_DEBUG_STACK_USAGE
+#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
 # define THREADINFO_GFP                (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
                                 __GFP_ZERO)
 #else
index e6789b8757d5021439c332751e52726bde83bec1..6383c528b1484fed5731b3074e1d1accf414ff31 100644 (file)
@@ -168,6 +168,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
 #define setup_pinned_deferrable_timer_on_stack(timer, fn, data)                \
        __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
 
+#define TIMER_DATA_TYPE                unsigned long
+#define TIMER_FUNC_TYPE                void (*)(TIMER_DATA_TYPE)
+
+static inline void timer_setup(struct timer_list *timer,
+                              void (*callback)(struct timer_list *),
+                              unsigned int flags)
+{
+       __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
+                     (TIMER_DATA_TYPE)timer, flags);
+}
+
+#define from_timer(var, callback_timer, timer_fieldname) \
+       container_of(callback_timer, typeof(*var), timer_fieldname)
+
 /**
  * timer_pending - is a timer pending?
  * @timer: the timer in question
index 7f11050746aecb21dc6f12e9b9d58e6d05d7d9d9..2e0f22298fe952748edbc68b35d5e3435335e53a 100644 (file)
@@ -272,6 +272,7 @@ struct trace_event_call {
        int                             perf_refcount;
        struct hlist_head __percpu      *perf_events;
        struct bpf_prog                 *prog;
+       struct perf_event               *bpf_prog_owner;
 
        int     (*perf_perm)(struct trace_event_call *,
                             struct perf_event *);
index 93568bd0a3520bb7402f04d90cf04ac99c81cfbe..06a6765da074449e6f1fe42ee05e711e898ad372 100644 (file)
@@ -271,7 +271,7 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
 {
        if (dst)
-               atomic_inc(&dst->__refcnt);
+               dst_hold(dst);
        return dst;
 }
 
@@ -311,21 +311,6 @@ static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb
        __skb_dst_copy(nskb, oskb->_skb_refdst);
 }
 
-/**
- * skb_dst_force - makes sure skb dst is refcounted
- * @skb: buffer
- *
- * If dst is not yet refcounted, let's do it
- */
-static inline void skb_dst_force(struct sk_buff *skb)
-{
-       if (skb_dst_is_noref(skb)) {
-               WARN_ON(!rcu_read_lock_held());
-               skb->_skb_refdst &= ~SKB_DST_NOREF;
-               dst_clone(skb_dst(skb));
-       }
-}
-
 /**
  * dst_hold_safe - Take a reference on a dst if possible
  * @dst: pointer to dst entry
@@ -339,16 +324,17 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
 }
 
 /**
- * skb_dst_force_safe - makes sure skb dst is refcounted
+ * skb_dst_force - makes sure skb dst is refcounted
  * @skb: buffer
  *
  * If dst is not yet refcounted and not destroyed, grab a ref on it.
  */
-static inline void skb_dst_force_safe(struct sk_buff *skb)
+static inline void skb_dst_force(struct sk_buff *skb)
 {
        if (skb_dst_is_noref(skb)) {
                struct dst_entry *dst = skb_dst(skb);
 
+               WARN_ON(!rcu_read_lock_held());
                if (!dst_hold_safe(dst))
                        dst = NULL;
 
index 4e6131cd3f43dfb711049a1925abbea6074ec25b..ac1a2317941e7186abd01d3df0bf9ebc055be38b 100644 (file)
@@ -146,6 +146,7 @@ static void fq_tin_enqueue(struct fq *fq,
                           fq_flow_get_default_t get_default_func)
 {
        struct fq_flow *flow;
+       bool oom;
 
        lockdep_assert_held(&fq->lock);
 
@@ -167,8 +168,8 @@ static void fq_tin_enqueue(struct fq *fq,
        }
 
        __skb_queue_tail(&flow->queue, skb);
-
-       if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
+       oom = (fq->memory_usage > fq->memory_limit);
+       while (fq->backlog > fq->limit || oom) {
                flow = list_first_entry_or_null(&fq->backlogs,
                                                struct fq_flow,
                                                backlogchain);
@@ -183,8 +184,10 @@ static void fq_tin_enqueue(struct fq *fq,
 
                flow->tin->overlimit++;
                fq->overlimit++;
-               if (fq->memory_usage > fq->memory_limit)
+               if (oom) {
                        fq->overmemory++;
+                       oom = (fq->memory_usage > fq->memory_limit);
+               }
        }
 }
 
index aa95053dfc78d35d04aef276e2a5dce7343f72a0..db8162dd8c0bcbcaffcb1a0f6da1be139a5008d4 100644 (file)
@@ -96,7 +96,7 @@ struct inet_request_sock {
        kmemcheck_bitfield_end(flags);
        u32                     ir_mark;
        union {
-               struct ip_options_rcu   *opt;
+               struct ip_options_rcu __rcu     *ireq_opt;
 #if IS_ENABLED(CONFIG_IPV6)
                struct {
                        struct ipv6_txoptions   *ipv6_opt;
@@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
        return sk->sk_bound_dev_if;
 }
 
+static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
+{
+       return rcu_dereference_check(ireq->ireq_opt,
+                                    refcount_read(&ireq->req.rsk_refcnt) > 0);
+}
+
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index e51cf5f815977adcff74d11cdd8ea1b4a5343ce1..14c289393071945821880c39b6100dbe57df08ac 100644 (file)
@@ -773,7 +773,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
  */
 static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
 {
-       return nla_put(skb, attrtype, sizeof(u8), &value);
+       /* temporary variables to work around GCC PR81715 with asan-stack=1 */
+       u8 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(u8), &tmp);
 }
 
 /**
@@ -784,7 +787,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
  */
 static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
 {
-       return nla_put(skb, attrtype, sizeof(u16), &value);
+       u16 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(u16), &tmp);
 }
 
 /**
@@ -795,7 +800,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
  */
 static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
 {
-       return nla_put(skb, attrtype, sizeof(__be16), &value);
+       __be16 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(__be16), &tmp);
 }
 
 /**
@@ -806,7 +813,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
  */
 static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
 {
-       return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+       __be16 tmp = value;
+
+       return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
 }
 
 /**
@@ -817,7 +826,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
  */
 static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
 {
-       return nla_put(skb, attrtype, sizeof(__le16), &value);
+       __le16 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(__le16), &tmp);
 }
 
 /**
@@ -828,7 +839,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
  */
 static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
 {
-       return nla_put(skb, attrtype, sizeof(u32), &value);
+       u32 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(u32), &tmp);
 }
 
 /**
@@ -839,7 +852,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
  */
 static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
 {
-       return nla_put(skb, attrtype, sizeof(__be32), &value);
+       __be32 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(__be32), &tmp);
 }
 
 /**
@@ -850,7 +865,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
  */
 static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
 {
-       return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+       __be32 tmp = value;
+
+       return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
 }
 
 /**
@@ -861,7 +878,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
  */
 static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
 {
-       return nla_put(skb, attrtype, sizeof(__le32), &value);
+       __le32 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(__le32), &tmp);
 }
 
 /**
@@ -874,7 +893,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
 static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
                                    u64 value, int padattr)
 {
-       return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
+       u64 tmp = value;
+
+       return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
 }
 
 /**
@@ -887,7 +908,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
 static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
                               int padattr)
 {
-       return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
+       __be64 tmp = value;
+
+       return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
 }
 
 /**
@@ -900,7 +923,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
 static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
                                int padattr)
 {
-       return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+       __be64 tmp = value;
+
+       return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
                            padattr);
 }
 
@@ -914,7 +939,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
 static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
                               int padattr)
 {
-       return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
+       __le64 tmp = value;
+
+       return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
 }
 
 /**
@@ -925,7 +952,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
  */
 static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
 {
-       return nla_put(skb, attrtype, sizeof(s8), &value);
+       s8 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(s8), &tmp);
 }
 
 /**
@@ -936,7 +965,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
  */
 static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
 {
-       return nla_put(skb, attrtype, sizeof(s16), &value);
+       s16 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(s16), &tmp);
 }
 
 /**
@@ -947,7 +978,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
  */
 static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
 {
-       return nla_put(skb, attrtype, sizeof(s32), &value);
+       s32 tmp = value;
+
+       return nla_put(skb, attrtype, sizeof(s32), &tmp);
 }
 
 /**
@@ -960,7 +993,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
 static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
                              int padattr)
 {
-       return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
+       s64 tmp = value;
+
+       return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
 }
 
 /**
@@ -1010,7 +1045,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
 static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
                                  __be32 addr)
 {
-       return nla_put_be32(skb, attrtype, addr);
+       __be32 tmp = addr;
+
+       return nla_put_be32(skb, attrtype, tmp);
 }
 
 /**
index e80edd8879efc70f96dc0fbe91714c48714edc54..3009547f3c66347b38be87831ec17d7aa7a149ed 100644 (file)
@@ -2,6 +2,7 @@
 #define __NET_PKT_CLS_H
 
 #include <linux/pkt_cls.h>
+#include <linux/workqueue.h>
 #include <net/sch_generic.h>
 #include <net/act_api.h>
 
@@ -17,6 +18,8 @@ struct tcf_walker {
 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
 
+bool tcf_queue_work(struct work_struct *work);
+
 #ifdef CONFIG_NET_CLS
 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
                                bool create);
index 65ba335b0e7e66bb7f1b4bd279d31e616e0dd31e..4fc75f7ae23beb47674b04df27ac6ef4594679fe 100644 (file)
@@ -39,8 +39,8 @@
 
 /* This is used to register protocols. */
 struct net_protocol {
-       void                    (*early_demux)(struct sk_buff *skb);
-       void                    (*early_demux_handler)(struct sk_buff *skb);
+       int                     (*early_demux)(struct sk_buff *skb);
+       int                     (*early_demux_handler)(struct sk_buff *skb);
        int                     (*handler)(struct sk_buff *skb);
        void                    (*err_handler)(struct sk_buff *skb, u32 info);
        unsigned int            no_policy:1,
index 1b09a9368c68d46f0c5ee8ce3cefe566000c1ec1..d538e6db1afef1e7ab50d8b491949e8ccdeca4a8 100644 (file)
@@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
        fl4->fl4_gre_key = gre_key;
        return ip_route_output_key(net, fl4);
 }
-
+int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+                         u8 tos, struct net_device *dev,
+                         struct in_device *in_dev, u32 *itag);
 int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
                         u8 tos, struct net_device *devin);
 int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
@@ -190,7 +192,7 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
        rcu_read_lock();
        err = ip_route_input_noref(skb, dst, src, tos, devin);
        if (!err) {
-               skb_dst_force_safe(skb);
+               skb_dst_force(skb);
                if (!skb_dst(skb))
                        err = -EINVAL;
        }
index 135f5a2dd93122dd905557028068a31aeea37cb0..0dec8a23be574cd54e4dab927f6df5c7879c33aa 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/dynamic_queue_limits.h>
 #include <linux/list.h>
 #include <linux/refcount.h>
+#include <linux/workqueue.h>
 #include <net/gen_stats.h>
 #include <net/rtnetlink.h>
 
@@ -271,6 +272,7 @@ struct tcf_chain {
 
 struct tcf_block {
        struct list_head chain_list;
+       struct work_struct work;
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
index 2db3d3a9ce1d2cd053954bff3d66d06a9246e5c1..88233cf8b8d4f1c4d59a7a897b7a4c4af9f8c779 100644 (file)
@@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
                                    struct sctp_fwdtsn_skip *skiplist);
 struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
 struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
-                                         __u16 stream_num, __u16 *stream_list,
+                                         __u16 stream_num, __be16 *stream_list,
                                          bool out, bool in);
 struct sctp_chunk *sctp_make_strreset_tsnreq(
                                        const struct sctp_association *asoc);
index b8c86ec1a8f5a4b3f025de849b8a6da5772b7427..231dc42f1da687b647bcf43d2fb856e252f4b881 100644 (file)
@@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
 
 struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
        const struct sctp_association *asoc, __u16 flags,
-       __u16 stream_num, __u16 *stream_list, gfp_t gfp);
+       __u16 stream_num, __be16 *stream_list, gfp_t gfp);
 
 struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
        const struct sctp_association *asoc, __u16 flags,
index 03a362568357acc7278a318423dd3873103f90ca..a6b9a8d1a6df3f72df8f1aac0f577257fa6452d0 100644 (file)
@@ -856,7 +856,7 @@ void sk_stream_write_space(struct sock *sk);
 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
        /* dont let skb dst not refcounted, we are going to leave rcu lock */
-       skb_dst_force_safe(skb);
+       skb_dst_force(skb);
 
        if (!sk->sk_backlog.tail)
                sk->sk_backlog.head = skb;
index 7dc131d62ad5c3a5a8cb67c91d1f3d86e7207410..d96b59f45ebaac1280f1ca53bb923a0147ade6a9 100644 (file)
@@ -74,10 +74,9 @@ struct strparser {
        u32 unrecov_intr : 1;
 
        struct sk_buff **skb_nextp;
-       struct timer_list msg_timer;
        struct sk_buff *skb_head;
        unsigned int need_bytes;
-       struct delayed_work delayed_work;
+       struct delayed_work msg_timer_work;
        struct work_struct work;
        struct strp_stats stats;
        struct strp_callbacks cb;
index b510f284427aabc1f508d24d29d0f812e5e0aa61..33599d17522d6a19b9d9a316cc1579cd5e71ee32 100644 (file)
@@ -345,7 +345,7 @@ void tcp_v4_err(struct sk_buff *skb, u32);
 
 void tcp_shutdown(struct sock *sk, int how);
 
-void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_early_demux(struct sk_buff *skb);
 int tcp_v4_rcv(struct sk_buff *skb);
 
 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
@@ -544,7 +544,6 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
                     int min_tso_segs);
 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
                               int nonagle);
-bool tcp_may_send_now(struct sock *sk);
 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 void tcp_retransmit_timer(struct sock *sk);
@@ -841,6 +840,12 @@ struct tcp_skb_cb {
                        struct inet6_skb_parm   h6;
 #endif
                } header;       /* For incoming skbs */
+               struct {
+                       __u32 key;
+                       __u32 flags;
+                       struct bpf_map *map;
+                       void *data_end;
+               } bpf;
        };
 };
 
index 12dfbfe2e2d7853427e244f9d6e2e39ca19bd41e..6c759c8594e25c7f9f79dc6bf76325c39e705f94 100644 (file)
@@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
        return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
 }
 
-void udp_v4_early_demux(struct sk_buff *skb);
+int udp_v4_early_demux(struct sk_buff *skb);
 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
 int udp_get_port(struct sock *sk, unsigned short snum,
                 int (*saddr_cmp)(const struct sock *,
index bdb1279a415b39f0597a02a6eb36aa6bd5e7ded5..e8608b2dc844fb27c0e7e5ceb359fe357ecf6553 100644 (file)
@@ -285,7 +285,7 @@ enum ib_tm_cap_flags {
        IB_TM_CAP_RC                = 1 << 0,
 };
 
-struct ib_xrq_caps {
+struct ib_tm_caps {
        /* Max size of RNDV header */
        u32 max_rndv_hdr_size;
        /* Max number of entries in tag matching list */
@@ -358,7 +358,7 @@ struct ib_device_attr {
        struct ib_rss_caps      rss_caps;
        u32                     max_wq_type_rq;
        u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
-       struct ib_xrq_caps      xrq_caps;
+       struct ib_tm_caps       tm_caps;
 };
 
 enum ib_mtu {
@@ -1739,7 +1739,7 @@ struct ib_mr {
        u32                lkey;
        u32                rkey;
        u64                iova;
-       u32                length;
+       u64                length;
        unsigned int       page_size;
        bool               need_inval;
        union {
index 82e93ee94708c9f7de073b3e6c47826664525075..67c5a9f223f77aa19d6f5c8b7badba1ecf138299 100644 (file)
@@ -192,6 +192,7 @@ struct scsi_device {
        unsigned no_dif:1;      /* T10 PI (DIF) should be disabled */
        unsigned broken_fua:1;          /* Don't set FUA bit */
        unsigned lun_in_cdb:1;          /* Store LUN bits in CDB[1] */
+       unsigned unmap_limit_for_ws:1;  /* Use the UNMAP limit for WRITE SAME */
 
        atomic_t disk_events_disable_depth; /* disable depth for disk events */
 
index 9592570e092a3bba93065d6e66a21665f8bd52ae..36b03013d6291cf9fda3c99eec690d3841f0ca5e 100644 (file)
@@ -29,5 +29,6 @@
 #define BLIST_TRY_VPD_PAGES    0x10000000 /* Attempt to read VPD pages */
 #define BLIST_NO_RSOC          0x20000000 /* don't try to issue RSOC */
 #define BLIST_MAX_1024         0x40000000 /* maximum 1024 sector cdb length */
+#define BLIST_UNMAP_LIMIT_WS   0x80000000 /* Use UNMAP limit for WRITE SAME */
 
 #endif
index 6183d20a01fbe8f94bff514c8c8caa7585185b62..b266d2a3bcb1d099ed4a16237a8f06ab820714d5 100644 (file)
@@ -434,7 +434,6 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
                                                unsigned int target_id);
 extern void iscsi_remove_session(struct iscsi_cls_session *session);
 extern void iscsi_free_session(struct iscsi_cls_session *session);
-extern int iscsi_destroy_session(struct iscsi_cls_session *session);
 extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
                                                int dd_size, uint32_t cid);
 extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
index bd7246de58e7c4d0cdca0cbd233aece13a311b35..a1f1152bc687613b87d7d4a1f73a3d139ef9f354 100644 (file)
@@ -248,6 +248,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
                             void *private_data);
 void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
 #define snd_ctl_sync_vmaster_hook(kctl)        snd_ctl_sync_vmaster(kctl, true)
+int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
+                                int (*func)(struct snd_kcontrol *, void *),
+                                void *arg);
 
 /*
  * Helper functions for jack-detection controls
index d0509db6d0ec56b8034388dcf1d3492c2dd7955e..f89cd5ee1c7aae51f6975d31b5e43007fc4499e0 100644 (file)
@@ -95,6 +95,7 @@ enum {
 #define AC_VERB_SET_EAPD_BTLENABLE             0x70c
 #define AC_VERB_SET_DIGI_CONVERT_1             0x70d
 #define AC_VERB_SET_DIGI_CONVERT_2             0x70e
+#define AC_VERB_SET_DIGI_CONVERT_3             0x73e
 #define AC_VERB_SET_VOLUME_KNOB_CONTROL                0x70f
 #define AC_VERB_SET_GPIO_DATA                  0x715
 #define AC_VERB_SET_GPIO_MASK                  0x716
index a03acd0d398a433d0d93af683416e5eb1e0688e6..695257ae64acccc1df2fc68e8b93b2744d5c98e8 100644 (file)
@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
        int port;                       /* created/attached port */
        unsigned int flags;             /* SNDRV_VIRMIDI_* */
        rwlock_t filelist_lock;
+       struct rw_semaphore filelist_sem;
        struct list_head filelist;
 };
 
index ae1409ffe99a00817f02a2cc51771840712fc1c3..3c8b7f6256701b6c6eeb05b2bae9387fa95df692 100644 (file)
@@ -114,7 +114,10 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
         * Preemption ignores task state, therefore preempted tasks are always
         * RUNNING (we will not have dequeued if state != RUNNING).
         */
-       return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
+       if (preempt)
+               return TASK_STATE_MAX;
+
+       return __get_task_state(p);
 }
 #endif /* CREATE_TRACE_POINTS */
 
@@ -152,12 +155,14 @@ TRACE_EVENT(sched_switch,
 
        TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
                __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
-               __entry->prev_state & (TASK_STATE_MAX-1) ?
-                 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
-                               { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
-                               { 16, "Z" }, { 32, "X" }, { 64, "x" },
-                               { 128, "K" }, { 256, "W" }, { 512, "P" },
-                               { 1024, "N" }) : "R",
+
+               (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
+                 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
+                               { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" },
+                               { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" },
+                               { 0x40, "P" }, { 0x80, "I" }) :
+                 "R",
+
                __entry->prev_state & TASK_STATE_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
index 43ab5c402f98f3c2dc15ccc49ec475530a39e401..0d7948ce21282ad01e340d018f8dfcf811dca26b 100644 (file)
@@ -312,7 +312,7 @@ union bpf_attr {
  *     jump into another BPF program
  *     @ctx: context pointer passed to next program
  *     @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
- *     @index: index inside array that selects specific program to run
+ *     @index: 32-bit index inside array that selects specific program to run
  *     Return: 0 on success or negative error
  *
  * int bpf_clone_redirect(skb, ifindex, flags)
@@ -575,7 +575,7 @@ union bpf_attr {
  *     @map: pointer to sockmap
  *     @key: key to lookup sock in map
  *     @flags: reserved for future use
- *     Return: SK_REDIRECT
+ *     Return: SK_PASS
  *
  * int bpf_sock_map_update(skops, map, key, flags)
  *     @skops: pointer to bpf_sock_ops
@@ -786,8 +786,8 @@ struct xdp_md {
 };
 
 enum sk_action {
-       SK_ABORTED = 0,
-       SK_DROP,
+       SK_DROP = 0,
+       SK_PASS,
        SK_REDIRECT,
 };
 
index 412c06a624c83674e02f1d6669c4201d2a6c6e1b..ccaea525340b9d857a9f4fe8a5834def9c9367be 100644 (file)
@@ -269,9 +269,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY    _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR       4
-#define DM_VERSION_MINOR       36
+#define DM_VERSION_MINOR       37
 #define DM_VERSION_PATCHLEVEL  0
-#define DM_VERSION_EXTRA       "-ioctl (2017-06-09)"
+#define DM_VERSION_EXTRA       "-ioctl (2017-09-20)"
 
 /* Status bits */
 #define DM_READONLY_FLAG       (1 << 0) /* In/Out */
index 9c041dae8e2ca70ee8bf72d734db67c6118096d1..5bd1b1de4ea0670caf18ffb28e36ba9e35592605 100644 (file)
@@ -1753,6 +1753,8 @@ enum ethtool_reset_flags {
  *     %ethtool_link_mode_bit_indices for the link modes, and other
  *     link features that the link partner advertised through
  *     autonegotiation; 0 if unknown or not applicable.  Read-only.
+ * @transceiver: Used to distinguish different possible PHY types,
+ *     reported consistently by PHYLIB.  Read-only.
  *
  * If autonegotiation is disabled, the speed and @duplex represent the
  * fixed link mode and are writable if the driver supports multiple
@@ -1804,7 +1806,9 @@ struct ethtool_link_settings {
        __u8    eth_tp_mdix;
        __u8    eth_tp_mdix_ctrl;
        __s8    link_mode_masks_nwords;
-       __u32   reserved[8];
+       __u8    transceiver;
+       __u8    reserved1[3];
+       __u32   reserved[7];
        __u32   link_mode_masks[0];
        /* layout of link_mode_masks fields:
         * __u32 map_supported[link_mode_masks_nwords];
index 7b4567bacfc206a02d4e8a680cf8ecd81b87a84a..26283fefdf5fd5f86719fb954c02ae3b2492cd30 100644 (file)
 #ifndef KFD_IOCTL_H_INCLUDED
 #define KFD_IOCTL_H_INCLUDED
 
-#include <linux/types.h>
+#include <drm/drm.h>
 #include <linux/ioctl.h>
 
 #define KFD_IOCTL_MAJOR_VERSION 1
 #define KFD_IOCTL_MINOR_VERSION 1
 
 struct kfd_ioctl_get_version_args {
-       uint32_t major_version; /* from KFD */
-       uint32_t minor_version; /* from KFD */
+       __u32 major_version;    /* from KFD */
+       __u32 minor_version;    /* from KFD */
 };
 
 /* For kfd_ioctl_create_queue_args.queue_type. */
@@ -43,36 +43,36 @@ struct kfd_ioctl_get_version_args {
 #define KFD_MAX_QUEUE_PRIORITY         15
 
 struct kfd_ioctl_create_queue_args {
-       uint64_t ring_base_address;     /* to KFD */
-       uint64_t write_pointer_address; /* from KFD */
-       uint64_t read_pointer_address;  /* from KFD */
-       uint64_t doorbell_offset;       /* from KFD */
-
-       uint32_t ring_size;             /* to KFD */
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t queue_type;            /* to KFD */
-       uint32_t queue_percentage;      /* to KFD */
-       uint32_t queue_priority;        /* to KFD */
-       uint32_t queue_id;              /* from KFD */
-
-       uint64_t eop_buffer_address;    /* to KFD */
-       uint64_t eop_buffer_size;       /* to KFD */
-       uint64_t ctx_save_restore_address; /* to KFD */
-       uint64_t ctx_save_restore_size; /* to KFD */
+       __u64 ring_base_address;        /* to KFD */
+       __u64 write_pointer_address;    /* from KFD */
+       __u64 read_pointer_address;     /* from KFD */
+       __u64 doorbell_offset;  /* from KFD */
+
+       __u32 ring_size;                /* to KFD */
+       __u32 gpu_id;           /* to KFD */
+       __u32 queue_type;               /* to KFD */
+       __u32 queue_percentage; /* to KFD */
+       __u32 queue_priority;   /* to KFD */
+       __u32 queue_id;         /* from KFD */
+
+       __u64 eop_buffer_address;       /* to KFD */
+       __u64 eop_buffer_size;  /* to KFD */
+       __u64 ctx_save_restore_address; /* to KFD */
+       __u64 ctx_save_restore_size;    /* to KFD */
 };
 
 struct kfd_ioctl_destroy_queue_args {
-       uint32_t queue_id;              /* to KFD */
-       uint32_t pad;
+       __u32 queue_id;         /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_update_queue_args {
-       uint64_t ring_base_address;     /* to KFD */
+       __u64 ring_base_address;        /* to KFD */
 
-       uint32_t queue_id;              /* to KFD */
-       uint32_t ring_size;             /* to KFD */
-       uint32_t queue_percentage;      /* to KFD */
-       uint32_t queue_priority;        /* to KFD */
+       __u32 queue_id;         /* to KFD */
+       __u32 ring_size;                /* to KFD */
+       __u32 queue_percentage; /* to KFD */
+       __u32 queue_priority;   /* to KFD */
 };
 
 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -80,13 +80,13 @@ struct kfd_ioctl_update_queue_args {
 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
 
 struct kfd_ioctl_set_memory_policy_args {
-       uint64_t alternate_aperture_base;       /* to KFD */
-       uint64_t alternate_aperture_size;       /* to KFD */
+       __u64 alternate_aperture_base;  /* to KFD */
+       __u64 alternate_aperture_size;  /* to KFD */
 
-       uint32_t gpu_id;                        /* to KFD */
-       uint32_t default_policy;                /* to KFD */
-       uint32_t alternate_policy;              /* to KFD */
-       uint32_t pad;
+       __u32 gpu_id;                   /* to KFD */
+       __u32 default_policy;           /* to KFD */
+       __u32 alternate_policy;         /* to KFD */
+       __u32 pad;
 };
 
 /*
@@ -97,26 +97,26 @@ struct kfd_ioctl_set_memory_policy_args {
  */
 
 struct kfd_ioctl_get_clock_counters_args {
-       uint64_t gpu_clock_counter;     /* from KFD */
-       uint64_t cpu_clock_counter;     /* from KFD */
-       uint64_t system_clock_counter;  /* from KFD */
-       uint64_t system_clock_freq;     /* from KFD */
+       __u64 gpu_clock_counter;        /* from KFD */
+       __u64 cpu_clock_counter;        /* from KFD */
+       __u64 system_clock_counter;     /* from KFD */
+       __u64 system_clock_freq;        /* from KFD */
 
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t pad;
+       __u32 gpu_id;           /* to KFD */
+       __u32 pad;
 };
 
 #define NUM_OF_SUPPORTED_GPUS 7
 
 struct kfd_process_device_apertures {
-       uint64_t lds_base;              /* from KFD */
-       uint64_t lds_limit;             /* from KFD */
-       uint64_t scratch_base;          /* from KFD */
-       uint64_t scratch_limit;         /* from KFD */
-       uint64_t gpuvm_base;            /* from KFD */
-       uint64_t gpuvm_limit;           /* from KFD */
-       uint32_t gpu_id;                /* from KFD */
-       uint32_t pad;
+       __u64 lds_base;         /* from KFD */
+       __u64 lds_limit;                /* from KFD */
+       __u64 scratch_base;             /* from KFD */
+       __u64 scratch_limit;            /* from KFD */
+       __u64 gpuvm_base;               /* from KFD */
+       __u64 gpuvm_limit;              /* from KFD */
+       __u32 gpu_id;           /* from KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_get_process_apertures_args {
@@ -124,8 +124,8 @@ struct kfd_ioctl_get_process_apertures_args {
                        process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
 
        /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
-       uint32_t num_of_nodes;
-       uint32_t pad;
+       __u32 num_of_nodes;
+       __u32 pad;
 };
 
 #define MAX_ALLOWED_NUM_POINTS    100
@@ -133,25 +133,25 @@ struct kfd_ioctl_get_process_apertures_args {
 #define MAX_ALLOWED_WAC_BUFF_SIZE  128
 
 struct kfd_ioctl_dbg_register_args {
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t pad;
+       __u32 gpu_id;           /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_dbg_unregister_args {
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t pad;
+       __u32 gpu_id;           /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_dbg_address_watch_args {
-       uint64_t content_ptr;           /* a pointer to the actual content */
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t buf_size_in_bytes;     /*including gpu_id and buf_size */
+       __u64 content_ptr;              /* a pointer to the actual content */
+       __u32 gpu_id;           /* to KFD */
+       __u32 buf_size_in_bytes;        /*including gpu_id and buf_size */
 };
 
 struct kfd_ioctl_dbg_wave_control_args {
-       uint64_t content_ptr;           /* a pointer to the actual content */
-       uint32_t gpu_id;                /* to KFD */
-       uint32_t buf_size_in_bytes;     /*including gpu_id and buf_size */
+       __u64 content_ptr;              /* a pointer to the actual content */
+       __u32 gpu_id;           /* to KFD */
+       __u32 buf_size_in_bytes;        /*including gpu_id and buf_size */
 };
 
 /* Matching HSA_EVENTTYPE */
@@ -172,44 +172,44 @@ struct kfd_ioctl_dbg_wave_control_args {
 #define KFD_SIGNAL_EVENT_LIMIT                 256
 
 struct kfd_ioctl_create_event_args {
-       uint64_t event_page_offset;     /* from KFD */
-       uint32_t event_trigger_data;    /* from KFD - signal events only */
-       uint32_t event_type;            /* to KFD */
-       uint32_t auto_reset;            /* to KFD */
-       uint32_t node_id;               /* to KFD - only valid for certain
+       __u64 event_page_offset;        /* from KFD */
+       __u32 event_trigger_data;       /* from KFD - signal events only */
+       __u32 event_type;               /* to KFD */
+       __u32 auto_reset;               /* to KFD */
+       __u32 node_id;          /* to KFD - only valid for certain
                                                        event types */
-       uint32_t event_id;              /* from KFD */
-       uint32_t event_slot_index;      /* from KFD */
+       __u32 event_id;         /* from KFD */
+       __u32 event_slot_index; /* from KFD */
 };
 
 struct kfd_ioctl_destroy_event_args {
-       uint32_t event_id;              /* to KFD */
-       uint32_t pad;
+       __u32 event_id;         /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_set_event_args {
-       uint32_t event_id;              /* to KFD */
-       uint32_t pad;
+       __u32 event_id;         /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_reset_event_args {
-       uint32_t event_id;              /* to KFD */
-       uint32_t pad;
+       __u32 event_id;         /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_memory_exception_failure {
-       uint32_t NotPresent;    /* Page not present or supervisor privilege */
-       uint32_t ReadOnly;      /* Write access to a read-only page */
-       uint32_t NoExecute;     /* Execute access to a page marked NX */
-       uint32_t pad;
+       __u32 NotPresent;       /* Page not present or supervisor privilege */
+       __u32 ReadOnly; /* Write access to a read-only page */
+       __u32 NoExecute;        /* Execute access to a page marked NX */
+       __u32 pad;
 };
 
 /* memory exception data*/
 struct kfd_hsa_memory_exception_data {
        struct kfd_memory_exception_failure failure;
-       uint64_t va;
-       uint32_t gpu_id;
-       uint32_t pad;
+       __u64 va;
+       __u32 gpu_id;
+       __u32 pad;
 };
 
 /* Event data*/
@@ -217,19 +217,19 @@ struct kfd_event_data {
        union {
                struct kfd_hsa_memory_exception_data memory_exception_data;
        };                              /* From KFD */
-       uint64_t kfd_event_data_ext;    /* pointer to an extension structure
+       __u64 kfd_event_data_ext;       /* pointer to an extension structure
                                           for future exception types */
-       uint32_t event_id;              /* to KFD */
-       uint32_t pad;
+       __u32 event_id;         /* to KFD */
+       __u32 pad;
 };
 
 struct kfd_ioctl_wait_events_args {
-       uint64_t events_ptr;            /* pointed to struct
+       __u64 events_ptr;               /* pointed to struct
                                           kfd_event_data array, to KFD */
-       uint32_t num_events;            /* to KFD */
-       uint32_t wait_for_all;          /* to KFD */
-       uint32_t timeout;               /* to KFD */
-       uint32_t wait_result;           /* from KFD */
+       __u32 num_events;               /* to KFD */
+       __u32 wait_for_all;             /* to KFD */
+       __u32 timeout;          /* to KFD */
+       __u32 wait_result;              /* from KFD */
 };
 
 struct kfd_ioctl_set_scratch_backing_va_args {
index 6d47b3249d8ad84a2fcf2b38585338ccbf05a99f..4e01ad7ffe9831c63f90a46d40445e467365beec 100644 (file)
  *                          (non-running threads are de facto in such a
  *                          state). This only covers threads from the
  *                          same processes as the caller thread. This
- *                          command returns 0. The "expedited" commands
- *                          complete faster than the non-expedited ones,
- *                          they never block, but have the downside of
- *                          causing extra overhead.
+ *                          command returns 0 on success. The
+ *                          "expedited" commands complete faster than
+ *                          the non-expedited ones, they never block,
+ *                          but have the downside of causing extra
+ *                          overhead. A process needs to register its
+ *                          intent to use the private expedited command
+ *                          prior to using it, otherwise this command
+ *                          returns -EPERM.
+ * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
+ *                          Register the process intent to use
+ *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
+ *                          returns 0.
  *
  * Command to be passed to the membarrier system call. The commands need to
  * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
  * the value 0.
  */
 enum membarrier_cmd {
-       MEMBARRIER_CMD_QUERY                    = 0,
-       MEMBARRIER_CMD_SHARED                   = (1 << 0),
+       MEMBARRIER_CMD_QUERY                            = 0,
+       MEMBARRIER_CMD_SHARED                           = (1 << 0),
        /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
        /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
-       MEMBARRIER_CMD_PRIVATE_EXPEDITED        = (1 << 3),
+       MEMBARRIER_CMD_PRIVATE_EXPEDITED                = (1 << 3),
+       MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED       = (1 << 4),
 };
 
 #endif /* _UAPI_LINUX_MEMBARRIER_H */
index b97725af2ac0ef14893ec459701a2d1ae4ac76a4..da161b56c79e443afacce238d4a120b373673db9 100644 (file)
@@ -23,6 +23,7 @@ enum xt_bpf_modes {
        XT_BPF_MODE_FD_PINNED,
        XT_BPF_MODE_FD_ELF,
 };
+#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
 
 struct xt_bpf_info_v1 {
        __u16 mode;
index 6217ff8500a1d818fd1002fbd6f81c0c11974665..84fc2914b7fbf94616f5af46754c494af055e97c 100644 (file)
@@ -376,7 +376,7 @@ struct sctp_remote_error {
        __u16 sre_type;
        __u16 sre_flags;
        __u32 sre_length;
-       __u16 sre_error;
+       __be16 sre_error;
        sctp_assoc_t sre_assoc_id;
        __u8 sre_data[0];
 };
index 0f238a43ff1e7e5ebd8bd746a055fa910c10b51d..f6bc1dea32479f080de8f0d380b4cfaf2a547aae 100644 (file)
 #define SECCOMP_MODE_FILTER    2 /* uses user-supplied filter. */
 
 /* Valid operations for seccomp syscall. */
-#define SECCOMP_SET_MODE_STRICT        0
-#define SECCOMP_SET_MODE_FILTER        1
+#define SECCOMP_SET_MODE_STRICT                0
+#define SECCOMP_SET_MODE_FILTER                1
+#define SECCOMP_GET_ACTION_AVAIL       2
 
 /* Valid flags for SECCOMP_SET_MODE_FILTER */
 #define SECCOMP_FILTER_FLAG_TSYNC      1
+#define SECCOMP_FILTER_FLAG_LOG                2
 
 /*
  * All BPF programs must return a 32-bit value.
  * The bottom 16-bits are for optional return data.
- * The upper 16-bits are ordered from least permissive values to most.
+ * The upper 16-bits are ordered from least permissive values to most,
+ * as a signed value (so 0x8000000 is negative).
  *
  * The ordering ensures that a min_t() over composed return values always
  * selects the least permissive choice.
  */
-#define SECCOMP_RET_KILL       0x00000000U /* kill the task immediately */
-#define SECCOMP_RET_TRAP       0x00030000U /* disallow and force a SIGSYS */
-#define SECCOMP_RET_ERRNO      0x00050000U /* returns an errno */
-#define SECCOMP_RET_TRACE      0x7ff00000U /* pass to a tracer or disallow */
-#define SECCOMP_RET_ALLOW      0x7fff0000U /* allow */
+#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
+#define SECCOMP_RET_KILL_THREAD         0x00000000U /* kill the thread */
+#define SECCOMP_RET_KILL        SECCOMP_RET_KILL_THREAD
+#define SECCOMP_RET_TRAP        0x00030000U /* disallow and force a SIGSYS */
+#define SECCOMP_RET_ERRNO       0x00050000U /* returns an errno */
+#define SECCOMP_RET_TRACE       0x7ff00000U /* pass to a tracer or disallow */
+#define SECCOMP_RET_LOG                 0x7ffc0000U /* allow after logging */
+#define SECCOMP_RET_ALLOW       0x7fff0000U /* allow */
 
 /* Masks for the return value sections. */
+#define SECCOMP_RET_ACTION_FULL        0xffff0000U
 #define SECCOMP_RET_ACTION     0x7fff0000U
 #define SECCOMP_RET_DATA       0x0000ffffU
 
index dd5f21e758057f0f3ac98ee0f04823aaffeaa53f..856de39d0b8900c49bb1097c7844e016b24312e3 100644 (file)
@@ -23,6 +23,7 @@
 #define SPIDEV_H
 
 #include <linux/types.h>
+#include <linux/ioctl.h>
 
 /* User space versions of kernel symbols for SPI clocking modes,
  * matching <linux/spi/spi.h>
index ce1169af39d72c58663e08f4ff7e0c175f467507..2a5d63040a0b0c7a713b9dcffa8dfd3cc7ac382f 100644 (file)
@@ -780,6 +780,7 @@ struct usb_interface_assoc_descriptor {
        __u8  iFunction;
 } __attribute__ ((packed));
 
+#define USB_DT_INTERFACE_ASSOCIATION_SIZE      8
 
 /*-------------------------------------------------------------------------*/
 
index 9a0b6479fe0c626a05cc86da382181c81b1db73f..d4e0b53bfc75ccdf03585c7b402389a21aba4dca 100644 (file)
@@ -261,7 +261,7 @@ struct ib_uverbs_ex_query_device_resp {
        struct ib_uverbs_rss_caps rss_caps;
        __u32  max_wq_type_rq;
        __u32 raw_packet_caps;
-       struct ib_uverbs_tm_caps xrq_caps;
+       struct ib_uverbs_tm_caps tm_caps;
 };
 
 struct ib_uverbs_query_port {
index 415dbc6e43fd7ac6c256e2b30b704567ab5966d2..6adc2a955340627c590e1e06e8ab146b3c058f12 100644 (file)
@@ -84,16 +84,6 @@ static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
        BUG();
 }
 
-/* TODO: this shouldn't be here but it is because the frontend drivers
- * are using it (its rolled in headers) even though we won't hit the code path.
- * So for right now just punt with this.
- */
-static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
-{
-       BUG();
-       return NULL;
-}
-
 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                                   struct gnttab_map_grant_ref *kmap_ops,
                                   struct page **pages, unsigned int count);
index 78cb2461012ef530f2755a0a6a8458428ed19043..3c1faaa2af4aad761c427a092c929aeb1acb05f6 100644 (file)
@@ -1033,7 +1033,7 @@ endif
 
 choice
        prompt "Compiler optimization level"
-       default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+       default CC_OPTIMIZE_FOR_PERFORMANCE
 
 config CC_OPTIMIZE_FOR_PERFORMANCE
        bool "Optimize for performance"
index 1b3adfe3c60e259e7366aa76a23b14e7c1a173b5..badac463e2c8ea7a035c73acd385d4d58211e6ac 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1154,7 +1154,7 @@ static int put_compat_shm_info(struct shm_info *ip,
        info.shm_swp = ip->shm_swp;
        info.swap_attempts = ip->swap_attempts;
        info.swap_successes = ip->swap_successes;
-       return copy_to_user(up, &info, sizeof(info));
+       return copy_to_user(uip, &info, sizeof(info));
 }
 
 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
@@ -1237,7 +1237,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
                err = shmctl_stat(ns, shmid, cmd, &sem64);
                if (err < 0)
                        return err;
-               if (copy_compat_shmid_to_user(&sem64, uptr, version))
+               if (copy_compat_shmid_to_user(uptr, &sem64, version))
                        err = -EFAULT;
                return err;
 
index 98c0f00c3f5e05007287de1a1636c4e6722beed7..e2636737b69bd8bdd1a690e5453616effb9800d5 100644 (file)
@@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
 
        if (array_size >= U32_MAX - PAGE_SIZE ||
-           elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
+           bpf_array_alloc_percpu(array)) {
                bpf_map_area_free(array);
                return ERR_PTR(-ENOMEM);
        }
index 917cc04a0a94083d46f0a927e18f0228bf356c94..7b62df86be1d7e56044f947985b6c67ee5746c0d 100644 (file)
@@ -1022,7 +1022,7 @@ select_insn:
                struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
                struct bpf_array *array = container_of(map, struct bpf_array, map);
                struct bpf_prog *prog;
-               u64 index = BPF_R3;
+               u32 index = BPF_R3;
 
                if (unlikely(index >= array->map.max_entries))
                        goto out;
index 959c9a07f318b14f6ab4719973f6010de061aadc..e745d6a88224f5b5e9a8241dc7dee5d35e4832de 100644 (file)
@@ -69,14 +69,17 @@ static LIST_HEAD(dev_map_list);
 
 static u64 dev_map_bitmap_size(const union bpf_attr *attr)
 {
-       return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
+       return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
 }
 
 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 {
        struct bpf_dtab *dtab;
+       int err = -EINVAL;
        u64 cost;
-       int err;
+
+       if (!capable(CAP_NET_ADMIN))
+               return ERR_PTR(-EPERM);
 
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -108,9 +111,12 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
        if (err)
                goto free_dtab;
 
+       err = -ENOMEM;
+
        /* A per cpu bitfield with a bit per possible net device */
-       dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
-                                           __alignof__(unsigned long));
+       dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
+                                               __alignof__(unsigned long),
+                                               GFP_KERNEL | __GFP_NOWARN);
        if (!dtab->flush_needed)
                goto free_dtab;
 
@@ -128,7 +134,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 free_dtab:
        free_percpu(dtab->flush_needed);
        kfree(dtab);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(err);
 }
 
 static void dev_map_free(struct bpf_map *map)
index 431126f31ea3c90648366295e2b77fd3bb79b6e7..6533f08d1238e136895a5cf0665be31d7b23df51 100644 (file)
@@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 */
                goto free_htab;
 
-       if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
-               /* make sure the size for pcpu_alloc() is reasonable */
-               goto free_htab;
-
        htab->elem_size = sizeof(struct htab_elem) +
                          round_up(htab->map.key_size, 8);
        if (percpu)
index e833ed91435832dc4d822ad4ed1c4a6a8a5893cf..be1dde967208eb19d1f92872275df5fdce437024 100644 (file)
@@ -363,6 +363,7 @@ out:
        putname(pname);
        return ret;
 }
+EXPORT_SYMBOL_GPL(bpf_obj_get_user);
 
 static void bpf_evict_inode(struct inode *inode)
 {
index 6424ce0e49698abee1da6242a0a8494d8ba0f03f..66f00a2b27f44af12362ae3c8df2fbca4d82edce 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/workqueue.h>
 #include <linux/list.h>
 #include <net/strparser.h>
+#include <net/tcp.h>
 
 struct bpf_stab {
        struct bpf_map map;
@@ -92,6 +93,14 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
        return rcu_dereference_sk_user_data(sk);
 }
 
+/* compute the linear packet data range [data, data_end) for skb when
+ * sk_skb type programs are in use.
+ */
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+       TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
+
 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
 {
        struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@@ -101,12 +110,20 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
                return SK_DROP;
 
        skb_orphan(skb);
+       /* We need to ensure that BPF metadata for maps is also cleared
+        * when we orphan the skb so that we don't have the possibility
+        * to reference a stale map.
+        */
+       TCP_SKB_CB(skb)->bpf.map = NULL;
        skb->sk = psock->sock;
-       bpf_compute_data_end(skb);
+       bpf_compute_data_end_sk_skb(skb);
+       preempt_disable();
        rc = (*prog->bpf_func)(skb, prog->insnsi);
+       preempt_enable();
        skb->sk = NULL;
 
-       return rc;
+       return rc == SK_PASS ?
+               (TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP;
 }
 
 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
@@ -114,17 +131,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
        struct sock *sk;
        int rc;
 
-       /* Because we use per cpu values to feed input from sock redirect
-        * in BPF program to do_sk_redirect_map() call we need to ensure we
-        * are not preempted. RCU read lock is not sufficient in this case
-        * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
-        */
-       preempt_disable();
        rc = smap_verdict_func(psock, skb);
        switch (rc) {
        case SK_REDIRECT:
-               sk = do_sk_redirect_map();
-               preempt_enable();
+               sk = do_sk_redirect_map(skb);
                if (likely(sk)) {
                        struct smap_psock *peer = smap_psock_sk(sk);
 
@@ -141,8 +151,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
        /* Fall through and free skb otherwise */
        case SK_DROP:
        default:
-               if (rc != SK_REDIRECT)
-                       preempt_enable();
                kfree_skb(skb);
        }
 }
@@ -369,7 +377,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
         * any socket yet.
         */
        skb->sk = psock->sock;
-       bpf_compute_data_end(skb);
+       bpf_compute_data_end_sk_skb(skb);
        rc = (*prog->bpf_func)(skb, prog->insnsi);
        skb->sk = NULL;
        rcu_read_unlock();
@@ -487,6 +495,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
        int err = -EINVAL;
        u64 cost;
 
+       if (!capable(CAP_NET_ADMIN))
+               return ERR_PTR(-EPERM);
+
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -840,6 +851,12 @@ static int sock_map_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       if (skops.sk->sk_type != SOCK_STREAM ||
+           skops.sk->sk_protocol != IPPROTO_TCP) {
+               fput(socket->file);
+               return -EOPNOTSUPP;
+       }
+
        err = sock_map_ctx_update_elem(&skops, map, key, flags);
        fput(socket->file);
        return err;
index cb17e1cd1d434dc2e052a2a9fb0aea967fcf4417..25d074920a009ff682d97bf88e68f466c79bd564 100644 (file)
@@ -186,15 +186,17 @@ static int bpf_map_alloc_id(struct bpf_map *map)
 
 static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 {
+       unsigned long flags;
+
        if (do_idr_lock)
-               spin_lock_bh(&map_idr_lock);
+               spin_lock_irqsave(&map_idr_lock, flags);
        else
                __acquire(&map_idr_lock);
 
        idr_remove(&map_idr, map->id);
 
        if (do_idr_lock)
-               spin_unlock_bh(&map_idr_lock);
+               spin_unlock_irqrestore(&map_idr_lock, flags);
        else
                __release(&map_idr_lock);
 }
index 799b2451ef2df42b17aadfe679a09dee9a0848c9..c48ca2a34b5e131420f4795c4a0eaf9d9a64861d 100644 (file)
@@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
 {
        struct bpf_verifier_state *parent = state->parent;
 
+       if (regno == BPF_REG_FP)
+               /* We don't need to worry about FP liveness because it's read-only */
+               return;
+
        while (parent) {
                /* if read wasn't screened by an earlier write ... */
                if (state->regs[regno].live & REG_LIVE_WRITTEN)
@@ -1112,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                /* ctx accesses must be at a fixed offset, so that we can
                 * determine what type of data were returned.
                 */
-               if (!tnum_is_const(reg->var_off)) {
+               if (reg->off) {
+                       verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
+                               regno, reg->off, off - reg->off);
+                       return -EACCES;
+               }
+               if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
                        char tn_buf[48];
 
                        tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@ -1120,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                                tn_buf, off, size);
                        return -EACCES;
                }
-               off += reg->var_off.value;
                err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
                if (!err && t == BPF_READ && value_regno >= 0) {
                        /* ctx access returns either a scalar, or a
@@ -2345,6 +2353,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                 * copy register state to dest reg
                                 */
                                regs[insn->dst_reg] = regs[insn->src_reg];
+                               regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
                        } else {
                                /* R1 = (u32) R2 */
                                if (is_pointer_value(env, insn->src_reg)) {
@@ -2421,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 }
 
 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
-                                  struct bpf_reg_state *dst_reg)
+                                  struct bpf_reg_state *dst_reg,
+                                  bool range_right_open)
 {
        struct bpf_reg_state *regs = state->regs, *reg;
+       u16 new_range;
        int i;
 
-       if (dst_reg->off < 0)
+       if (dst_reg->off < 0 ||
+           (dst_reg->off == 0 && range_right_open))
                /* This doesn't give us any range */
                return;
 
@@ -2437,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
                 */
                return;
 
-       /* LLVM can generate four kind of checks:
+       new_range = dst_reg->off;
+       if (range_right_open)
+               new_range--;
+
+       /* Examples for register markings:
         *
-        * Type 1/2:
+        * pkt_data in dst register:
         *
         *   r2 = r3;
         *   r2 += 8;
@@ -2456,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
         *     r2=pkt(id=n,off=8,r=0)
         *     r3=pkt(id=n,off=0,r=0)
         *
-        * Type 3/4:
+        * pkt_data in src register:
         *
         *   r2 = r3;
         *   r2 += 8;
@@ -2474,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
         *     r3=pkt(id=n,off=0,r=0)
         *
         * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
-        * so that range of bytes [r3, r3 + 8) is safe to access.
+        * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
+        * and [r3, r3 + 8-1) respectively is safe to access depending on
+        * the check.
         */
 
        /* If our ids match, then we must have the same max_value.  And we
@@ -2485,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
        for (i = 0; i < MAX_BPF_REG; i++)
                if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
                        /* keep the maximum range already checked */
-                       regs[i].range = max_t(u16, regs[i].range, dst_reg->off);
+                       regs[i].range = max(regs[i].range, new_range);
 
        for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
                if (state->stack_slot_type[i] != STACK_SPILL)
                        continue;
                reg = &state->spilled_regs[i / BPF_REG_SIZE];
                if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
-                       reg->range = max_t(u16, reg->range, dst_reg->off);
+                       reg->range = max(reg->range, new_range);
        }
 }
 
@@ -2856,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
                   dst_reg->type == PTR_TO_PACKET &&
                   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
-               find_good_pkt_pointers(this_branch, dst_reg);
+               /* pkt_data' > pkt_end */
+               find_good_pkt_pointers(this_branch, dst_reg, false);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
+                  dst_reg->type == PTR_TO_PACKET_END &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET) {
+               /* pkt_end > pkt_data' */
+               find_good_pkt_pointers(other_branch, &regs[insn->src_reg], true);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
+                  dst_reg->type == PTR_TO_PACKET &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+               /* pkt_data' < pkt_end */
+               find_good_pkt_pointers(other_branch, dst_reg, true);
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
+                  dst_reg->type == PTR_TO_PACKET_END &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET) {
+               /* pkt_end < pkt_data' */
+               find_good_pkt_pointers(this_branch, &regs[insn->src_reg], false);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
                   dst_reg->type == PTR_TO_PACKET &&
                   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
-               find_good_pkt_pointers(other_branch, dst_reg);
+               /* pkt_data' >= pkt_end */
+               find_good_pkt_pointers(this_branch, dst_reg, true);
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
                   dst_reg->type == PTR_TO_PACKET_END &&
                   regs[insn->src_reg].type == PTR_TO_PACKET) {
-               find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
+               /* pkt_end >= pkt_data' */
+               find_good_pkt_pointers(other_branch, &regs[insn->src_reg], false);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
+                  dst_reg->type == PTR_TO_PACKET &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+               /* pkt_data' <= pkt_end */
+               find_good_pkt_pointers(other_branch, dst_reg, false);
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
                   dst_reg->type == PTR_TO_PACKET_END &&
                   regs[insn->src_reg].type == PTR_TO_PACKET) {
-               find_good_pkt_pointers(this_branch, &regs[insn->src_reg]);
+               /* pkt_end <= pkt_data' */
+               find_good_pkt_pointers(this_branch, &regs[insn->src_reg], true);
        } else if (is_pointer_value(env, insn->dst_reg)) {
                verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
                return -EACCES;
@@ -4205,7 +4247,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                }
 
                if (insn->imm == BPF_FUNC_redirect_map) {
-                       u64 addr = (unsigned long)prog;
+                       /* Note, we cannot use prog directly as imm as subsequent
+                        * rewrites would still change the prog pointer. The only
+                        * stable address we can use is aux, which also works with
+                        * prog clones during blinding.
+                        */
+                       u64 addr = (unsigned long)prog->aux;
                        struct bpf_insn r4_ld[] = {
                                BPF_LD_IMM64(BPF_REG_4, addr),
                                *insn,
index d6551cd452380b6c1f398e2410e47d83fb208b53..44857278eb8aa6a2bbf27b7eb12137ef42628170 100644 (file)
@@ -2311,6 +2311,14 @@ out_release_tset:
                list_del_init(&cset->mg_node);
        }
        spin_unlock_irq(&css_set_lock);
+
+       /*
+        * Re-initialize the cgroup_taskset structure in case it is reused
+        * again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
+        * iteration.
+        */
+       tset->nr_tasks = 0;
+       tset->csets    = &tset->src_csets;
        return ret;
 }
 
index acf5308fad51f17706197ca6550ce7ae3c8b5378..04892a82f6ac36c92324806b66a1c1855880c8f7 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/lockdep.h>
 #include <linux/tick.h>
 #include <linux/irq.h>
+#include <linux/nmi.h>
 #include <linux/smpboot.h>
 #include <linux/relay.h>
 #include <linux/slab.h>
  * @bringup:   Single callback bringup or teardown selector
  * @cb_state:  The state for a single callback (install/uninstall)
  * @result:    Result of the operation
- * @done:      Signal completion to the issuer of the task
+ * @done_up:   Signal completion to the issuer of the task for cpu-up
+ * @done_down: Signal completion to the issuer of the task for cpu-down
  */
 struct cpuhp_cpu_state {
        enum cpuhp_state        state;
        enum cpuhp_state        target;
+       enum cpuhp_state        fail;
 #ifdef CONFIG_SMP
        struct task_struct      *thread;
        bool                    should_run;
@@ -58,18 +61,39 @@ struct cpuhp_cpu_state {
        bool                    single;
        bool                    bringup;
        struct hlist_node       *node;
+       struct hlist_node       *last;
        enum cpuhp_state        cb_state;
        int                     result;
-       struct completion       done;
+       struct completion       done_up;
+       struct completion       done_down;
 #endif
 };
 
-static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
+       .fail = CPUHP_INVALID,
+};
 
 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
-static struct lock_class_key cpuhp_state_key;
-static struct lockdep_map cpuhp_state_lock_map =
-       STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
+static struct lockdep_map cpuhp_state_up_map =
+       STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
+static struct lockdep_map cpuhp_state_down_map =
+       STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
+
+
+static void inline cpuhp_lock_acquire(bool bringup)
+{
+       lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
+}
+
+static void inline cpuhp_lock_release(bool bringup)
+{
+       lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
+}
+#else
+
+static void inline cpuhp_lock_acquire(bool bringup) { }
+static void inline cpuhp_lock_release(bool bringup) { }
+
 #endif
 
 /**
@@ -123,13 +147,16 @@ static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
 /**
  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
  * @cpu:       The cpu for which the callback should be invoked
- * @step:      The step in the state machine
+ * @state:     The state to do callbacks for
  * @bringup:   True if the bringup callback should be invoked
+ * @node:      For multi-instance, do a single entry callback for install/remove
+ * @lastp:     For multi-instance rollback, remember how far we got
  *
  * Called from cpu hotplug and from the state register machinery.
  */
 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
-                                bool bringup, struct hlist_node *node)
+                                bool bringup, struct hlist_node *node,
+                                struct hlist_node **lastp)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
        struct cpuhp_step *step = cpuhp_get_step(state);
@@ -137,7 +164,17 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
        int (*cb)(unsigned int cpu);
        int ret, cnt;
 
+       if (st->fail == state) {
+               st->fail = CPUHP_INVALID;
+
+               if (!(bringup ? step->startup.single : step->teardown.single))
+                       return 0;
+
+               return -EAGAIN;
+       }
+
        if (!step->multi_instance) {
+               WARN_ON_ONCE(lastp && *lastp);
                cb = bringup ? step->startup.single : step->teardown.single;
                if (!cb)
                        return 0;
@@ -152,6 +189,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
 
        /* Single invocation for instance add/remove */
        if (node) {
+               WARN_ON_ONCE(lastp && *lastp);
                trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
                ret = cbm(cpu, node);
                trace_cpuhp_exit(cpu, st->state, state, ret);
@@ -161,13 +199,23 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
        /* State transition. Invoke on all instances */
        cnt = 0;
        hlist_for_each(node, &step->list) {
+               if (lastp && node == *lastp)
+                       break;
+
                trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
                ret = cbm(cpu, node);
                trace_cpuhp_exit(cpu, st->state, state, ret);
-               if (ret)
-                       goto err;
+               if (ret) {
+                       if (!lastp)
+                               goto err;
+
+                       *lastp = node;
+                       return ret;
+               }
                cnt++;
        }
+       if (lastp)
+               *lastp = NULL;
        return 0;
 err:
        /* Rollback the instances if one failed */
@@ -178,12 +226,39 @@ err:
        hlist_for_each(node, &step->list) {
                if (!cnt--)
                        break;
-               cbm(cpu, node);
+
+               trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
+               ret = cbm(cpu, node);
+               trace_cpuhp_exit(cpu, st->state, state, ret);
+               /*
+                * Rollback must not fail,
+                */
+               WARN_ON_ONCE(ret);
        }
        return ret;
 }
 
 #ifdef CONFIG_SMP
+static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
+{
+       struct completion *done = bringup ? &st->done_up : &st->done_down;
+       wait_for_completion(done);
+}
+
+static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
+{
+       struct completion *done = bringup ? &st->done_up : &st->done_down;
+       complete(done);
+}
+
+/*
+ * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
+ */
+static bool cpuhp_is_atomic_state(enum cpuhp_state state)
+{
+       return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
+}
+
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
 static DEFINE_MUTEX(cpu_add_remove_lock);
 bool cpuhp_tasks_frozen;
@@ -271,14 +346,79 @@ void cpu_hotplug_enable(void)
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
+static inline enum cpuhp_state
+cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+{
+       enum cpuhp_state prev_state = st->state;
+
+       st->rollback = false;
+       st->last = NULL;
+
+       st->target = target;
+       st->single = false;
+       st->bringup = st->state < target;
+
+       return prev_state;
+}
+
+static inline void
+cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
+{
+       st->rollback = true;
+
+       /*
+        * If we have st->last we need to undo partial multi_instance of this
+        * state first. Otherwise start undo at the previous state.
+        */
+       if (!st->last) {
+               if (st->bringup)
+                       st->state--;
+               else
+                       st->state++;
+       }
+
+       st->target = prev_state;
+       st->bringup = !st->bringup;
+}
+
+/* Regular hotplug invocation of the AP hotplug thread */
+static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
+{
+       if (!st->single && st->state == st->target)
+               return;
+
+       st->result = 0;
+       /*
+        * Make sure the above stores are visible before should_run becomes
+        * true. Paired with the mb() above in cpuhp_thread_fun()
+        */
+       smp_mb();
+       st->should_run = true;
+       wake_up_process(st->thread);
+       wait_for_ap_thread(st, st->bringup);
+}
+
+static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+{
+       enum cpuhp_state prev_state;
+       int ret;
+
+       prev_state = cpuhp_set_state(st, target);
+       __cpuhp_kick_ap(st);
+       if ((ret = st->result)) {
+               cpuhp_reset_state(st, prev_state);
+               __cpuhp_kick_ap(st);
+       }
+
+       return ret;
+}
 
 static int bringup_wait_for_ap(unsigned int cpu)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 
        /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
-       wait_for_completion(&st->done);
+       wait_for_ap_thread(st, true);
        if (WARN_ON_ONCE((!cpu_online(cpu))))
                return -ECANCELED;
 
@@ -286,12 +426,10 @@ static int bringup_wait_for_ap(unsigned int cpu)
        stop_machine_unpark(cpu);
        kthread_unpark(st->thread);
 
-       /* Should we go further up ? */
-       if (st->target > CPUHP_AP_ONLINE_IDLE) {
-               __cpuhp_kick_ap_work(st);
-               wait_for_completion(&st->done);
-       }
-       return st->result;
+       if (st->target <= CPUHP_AP_ONLINE_IDLE)
+               return 0;
+
+       return cpuhp_kick_ap(st, st->target);
 }
 
 static int bringup_cpu(unsigned int cpu)
@@ -317,32 +455,6 @@ static int bringup_cpu(unsigned int cpu)
 /*
  * Hotplug state machine related functions
  */
-static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
-{
-       for (st->state++; st->state < st->target; st->state++) {
-               struct cpuhp_step *step = cpuhp_get_step(st->state);
-
-               if (!step->skip_onerr)
-                       cpuhp_invoke_callback(cpu, st->state, true, NULL);
-       }
-}
-
-static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
-                               enum cpuhp_state target)
-{
-       enum cpuhp_state prev_state = st->state;
-       int ret = 0;
-
-       for (; st->state > target; st->state--) {
-               ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
-               if (ret) {
-                       st->target = prev_state;
-                       undo_cpu_down(cpu, st);
-                       break;
-               }
-       }
-       return ret;
-}
 
 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
 {
@@ -350,7 +462,7 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
                struct cpuhp_step *step = cpuhp_get_step(st->state);
 
                if (!step->skip_onerr)
-                       cpuhp_invoke_callback(cpu, st->state, false, NULL);
+                       cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
        }
 }
 
@@ -362,7 +474,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 
        while (st->state < target) {
                st->state++;
-               ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
+               ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
                if (ret) {
                        st->target = prev_state;
                        undo_cpu_up(cpu, st);
@@ -379,7 +491,8 @@ static void cpuhp_create(unsigned int cpu)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 
-       init_completion(&st->done);
+       init_completion(&st->done_up);
+       init_completion(&st->done_down);
 }
 
 static int cpuhp_should_run(unsigned int cpu)
@@ -389,69 +502,90 @@ static int cpuhp_should_run(unsigned int cpu)
        return st->should_run;
 }
 
-/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
-static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
-{
-       enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
-
-       return cpuhp_down_callbacks(cpu, st, target);
-}
-
-/* Execute the online startup callbacks. Used to be CPU_ONLINE */
-static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
-{
-       return cpuhp_up_callbacks(cpu, st, st->target);
-}
-
 /*
  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
  * callbacks when a state gets [un]installed at runtime.
+ *
+ * Each invocation of this function by the smpboot thread does a single AP
+ * state callback.
+ *
+ * It has 3 modes of operation:
+ *  - single: runs st->cb_state
+ *  - up:     runs ++st->state, while st->state < st->target
+ *  - down:   runs st->state--, while st->state > st->target
+ *
+ * When complete or on error, should_run is cleared and the completion is fired.
  */
 static void cpuhp_thread_fun(unsigned int cpu)
 {
        struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
-       int ret = 0;
+       bool bringup = st->bringup;
+       enum cpuhp_state state;
 
        /*
-        * Paired with the mb() in cpuhp_kick_ap_work and
-        * cpuhp_invoke_ap_callback, so the work set is consistent visible.
+        * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
+        * that if we see ->should_run we also see the rest of the state.
         */
        smp_mb();
-       if (!st->should_run)
+
+       if (WARN_ON_ONCE(!st->should_run))
                return;
 
-       st->should_run = false;
+       cpuhp_lock_acquire(bringup);
 
-       lock_map_acquire(&cpuhp_state_lock_map);
-       /* Single callback invocation for [un]install ? */
        if (st->single) {
-               if (st->cb_state < CPUHP_AP_ONLINE) {
-                       local_irq_disable();
-                       ret = cpuhp_invoke_callback(cpu, st->cb_state,
-                                                   st->bringup, st->node);
-                       local_irq_enable();
+               state = st->cb_state;
+               st->should_run = false;
+       } else {
+               if (bringup) {
+                       st->state++;
+                       state = st->state;
+                       st->should_run = (st->state < st->target);
+                       WARN_ON_ONCE(st->state > st->target);
                } else {
-                       ret = cpuhp_invoke_callback(cpu, st->cb_state,
-                                                   st->bringup, st->node);
+                       state = st->state;
+                       st->state--;
+                       st->should_run = (st->state > st->target);
+                       WARN_ON_ONCE(st->state < st->target);
                }
-       } else if (st->rollback) {
-               BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
+       }
+
+       WARN_ON_ONCE(!cpuhp_is_ap_state(state));
 
-               undo_cpu_down(cpu, st);
-               st->rollback = false;
+       if (st->rollback) {
+               struct cpuhp_step *step = cpuhp_get_step(state);
+               if (step->skip_onerr)
+                       goto next;
+       }
+
+       if (cpuhp_is_atomic_state(state)) {
+               local_irq_disable();
+               st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
+               local_irq_enable();
+
+               /*
+                * STARTING/DYING must not fail!
+                */
+               WARN_ON_ONCE(st->result);
        } else {
-               /* Cannot happen .... */
-               BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
-
-               /* Regular hotplug work */
-               if (st->state < st->target)
-                       ret = cpuhp_ap_online(cpu, st);
-               else if (st->state > st->target)
-                       ret = cpuhp_ap_offline(cpu, st);
+               st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
+       }
+
+       if (st->result) {
+               /*
+                * If we fail on a rollback, we're up a creek without no
+                * paddle, no way forward, no way back. We loose, thanks for
+                * playing.
+                */
+               WARN_ON_ONCE(st->rollback);
+               st->should_run = false;
        }
-       lock_map_release(&cpuhp_state_lock_map);
-       st->result = ret;
-       complete(&st->done);
+
+next:
+       cpuhp_lock_release(bringup);
+
+       if (!st->should_run)
+               complete_ap_thread(st, bringup);
 }
 
 /* Invoke a single callback on a remote cpu */
@@ -460,62 +594,69 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
                         struct hlist_node *node)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+       int ret;
 
        if (!cpu_online(cpu))
                return 0;
 
-       lock_map_acquire(&cpuhp_state_lock_map);
-       lock_map_release(&cpuhp_state_lock_map);
+       cpuhp_lock_acquire(false);
+       cpuhp_lock_release(false);
+
+       cpuhp_lock_acquire(true);
+       cpuhp_lock_release(true);
 
        /*
         * If we are up and running, use the hotplug thread. For early calls
         * we invoke the thread function directly.
         */
        if (!st->thread)
-               return cpuhp_invoke_callback(cpu, state, bringup, node);
+               return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
 
+       st->rollback = false;
+       st->last = NULL;
+
+       st->node = node;
+       st->bringup = bringup;
        st->cb_state = state;
        st->single = true;
-       st->bringup = bringup;
-       st->node = node;
+
+       __cpuhp_kick_ap(st);
 
        /*
-        * Make sure the above stores are visible before should_run becomes
-        * true. Paired with the mb() above in cpuhp_thread_fun()
+        * If we failed and did a partial, do a rollback.
         */
-       smp_mb();
-       st->should_run = true;
-       wake_up_process(st->thread);
-       wait_for_completion(&st->done);
-       return st->result;
-}
+       if ((ret = st->result) && st->last) {
+               st->rollback = true;
+               st->bringup = !bringup;
+
+               __cpuhp_kick_ap(st);
+       }
 
-/* Regular hotplug invocation of the AP hotplug thread */
-static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
-{
-       st->result = 0;
-       st->single = false;
        /*
-        * Make sure the above stores are visible before should_run becomes
-        * true. Paired with the mb() above in cpuhp_thread_fun()
+        * Clean up the leftovers so the next hotplug operation wont use stale
+        * data.
         */
-       smp_mb();
-       st->should_run = true;
-       wake_up_process(st->thread);
+       st->node = st->last = NULL;
+       return ret;
 }
 
 static int cpuhp_kick_ap_work(unsigned int cpu)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
-       enum cpuhp_state state = st->state;
+       enum cpuhp_state prev_state = st->state;
+       int ret;
+
+       cpuhp_lock_acquire(false);
+       cpuhp_lock_release(false);
+
+       cpuhp_lock_acquire(true);
+       cpuhp_lock_release(true);
 
-       trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
-       lock_map_acquire(&cpuhp_state_lock_map);
-       lock_map_release(&cpuhp_state_lock_map);
-       __cpuhp_kick_ap_work(st);
-       wait_for_completion(&st->done);
-       trace_cpuhp_exit(cpu, st->state, state, st->result);
-       return st->result;
+       trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
+       ret = cpuhp_kick_ap(st, st->target);
+       trace_cpuhp_exit(cpu, st->state, prev_state, ret);
+
+       return ret;
 }
 
 static struct smp_hotplug_thread cpuhp_threads = {
@@ -581,6 +722,7 @@ static int take_cpu_down(void *_param)
        struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
        enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
        int err, cpu = smp_processor_id();
+       int ret;
 
        /* Ensure this CPU doesn't handle any more interrupts. */
        err = __cpu_disable();
@@ -594,8 +736,13 @@ static int take_cpu_down(void *_param)
        WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
        st->state--;
        /* Invoke the former CPU_DYING callbacks */
-       for (; st->state > target; st->state--)
-               cpuhp_invoke_callback(cpu, st->state, false, NULL);
+       for (; st->state > target; st->state--) {
+               ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
+               /*
+                * DYING must not fail!
+                */
+               WARN_ON_ONCE(ret);
+       }
 
        /* Give up timekeeping duties */
        tick_handover_do_timer();
@@ -639,7 +786,7 @@ static int takedown_cpu(unsigned int cpu)
         *
         * Wait for the stop thread to go away.
         */
-       wait_for_completion(&st->done);
+       wait_for_ap_thread(st, false);
        BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
 
        /* Interrupts are moved away from the dying cpu, reenable alloc/free */
@@ -658,7 +805,7 @@ static void cpuhp_complete_idle_dead(void *arg)
 {
        struct cpuhp_cpu_state *st = arg;
 
-       complete(&st->done);
+       complete_ap_thread(st, false);
 }
 
 void cpuhp_report_idle_dead(void)
@@ -676,11 +823,32 @@ void cpuhp_report_idle_dead(void)
                                 cpuhp_complete_idle_dead, st, 0);
 }
 
-#else
-#define takedown_cpu           NULL
-#endif
+static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
+{
+       for (st->state++; st->state < st->target; st->state++) {
+               struct cpuhp_step *step = cpuhp_get_step(st->state);
 
-#ifdef CONFIG_HOTPLUG_CPU
+               if (!step->skip_onerr)
+                       cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
+       }
+}
+
+static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
+                               enum cpuhp_state target)
+{
+       enum cpuhp_state prev_state = st->state;
+       int ret = 0;
+
+       for (; st->state > target; st->state--) {
+               ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
+               if (ret) {
+                       st->target = prev_state;
+                       undo_cpu_down(cpu, st);
+                       break;
+               }
+       }
+       return ret;
+}
 
 /* Requires cpu_add_remove_lock to be held */
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
@@ -699,13 +867,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       prev_state = st->state;
-       st->target = target;
+       prev_state = cpuhp_set_state(st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread.
         */
        if (st->state > CPUHP_TEARDOWN_CPU) {
+               st->target = max((int)target, CPUHP_TEARDOWN_CPU);
                ret = cpuhp_kick_ap_work(cpu);
                /*
                 * The AP side has done the error rollback already. Just
@@ -720,6 +888,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
                 */
                if (st->state > CPUHP_TEARDOWN_CPU)
                        goto out;
+
+               st->target = target;
        }
        /*
         * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
@@ -727,13 +897,17 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         */
        ret = cpuhp_down_callbacks(cpu, st, target);
        if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
-               st->target = prev_state;
-               st->rollback = true;
-               cpuhp_kick_ap_work(cpu);
+               cpuhp_reset_state(st, prev_state);
+               __cpuhp_kick_ap(st);
        }
 
 out:
        cpus_write_unlock();
+       /*
+        * Do post unplug cleanup. This is still protected against
+        * concurrent CPU hotplug via cpu_add_remove_lock.
+        */
+       lockup_detector_cleanup();
        return ret;
 }
 
@@ -754,11 +928,15 @@ out:
        cpu_maps_update_done();
        return err;
 }
+
 int cpu_down(unsigned int cpu)
 {
        return do_cpu_down(cpu, CPUHP_OFFLINE);
 }
 EXPORT_SYMBOL(cpu_down);
+
+#else
+#define takedown_cpu           NULL
 #endif /*CONFIG_HOTPLUG_CPU*/
 
 /**
@@ -772,11 +950,16 @@ void notify_cpu_starting(unsigned int cpu)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
        enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
+       int ret;
 
        rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
        while (st->state < target) {
                st->state++;
-               cpuhp_invoke_callback(cpu, st->state, true, NULL);
+               ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
+               /*
+                * STARTING must not fail!
+                */
+               WARN_ON_ONCE(ret);
        }
 }
 
@@ -794,7 +977,7 @@ void cpuhp_online_idle(enum cpuhp_state state)
                return;
 
        st->state = CPUHP_AP_ONLINE_IDLE;
-       complete(&st->done);
+       complete_ap_thread(st, true);
 }
 
 /* Requires cpu_add_remove_lock to be held */
@@ -829,7 +1012,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       st->target = target;
+       cpuhp_set_state(st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread once more.
@@ -1296,6 +1479,10 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
        struct cpuhp_step *sp = cpuhp_get_step(state);
        int ret;
 
+       /*
+        * If there's nothing to do, we done.
+        * Relies on the union for multi_instance.
+        */
        if ((bringup && !sp->startup.single) ||
            (!bringup && !sp->teardown.single))
                return 0;
@@ -1307,9 +1494,9 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
        if (cpuhp_is_ap_state(state))
                ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
        else
-               ret = cpuhp_invoke_callback(cpu, state, bringup, node);
+               ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
 #else
-       ret = cpuhp_invoke_callback(cpu, state, bringup, node);
+       ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
 #endif
        BUG_ON(ret && !bringup);
        return ret;
@@ -1641,9 +1828,55 @@ static ssize_t show_cpuhp_target(struct device *dev,
 }
 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
 
+
+static ssize_t write_cpuhp_fail(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
+       struct cpuhp_step *sp;
+       int fail, ret;
+
+       ret = kstrtoint(buf, 10, &fail);
+       if (ret)
+               return ret;
+
+       /*
+        * Cannot fail STARTING/DYING callbacks.
+        */
+       if (cpuhp_is_atomic_state(fail))
+               return -EINVAL;
+
+       /*
+        * Cannot fail anything that doesn't have callbacks.
+        */
+       mutex_lock(&cpuhp_state_mutex);
+       sp = cpuhp_get_step(fail);
+       if (!sp->startup.single && !sp->teardown.single)
+               ret = -EINVAL;
+       mutex_unlock(&cpuhp_state_mutex);
+       if (ret)
+               return ret;
+
+       st->fail = fail;
+
+       return count;
+}
+
+static ssize_t show_cpuhp_fail(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
+
+       return sprintf(buf, "%d\n", st->fail);
+}
+
+static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
+
 static struct attribute *cpuhp_cpu_attrs[] = {
        &dev_attr_state.attr,
        &dev_attr_target.attr,
+       &dev_attr_fail.attr,
        NULL
 };
 
index 3e691b75b2db2eab410208b7312687270e1fe765..9d93db81fa36e683724a50762b1afd0b4057bb7b 100644 (file)
@@ -662,7 +662,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
        /*
         * Do not update time when cgroup is not active
         */
-       if (cgrp == event->cgrp)
+       if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
                __update_cgrp_time(event->cgrp);
 }
 
@@ -8171,6 +8171,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
                }
        }
        event->tp_event->prog = prog;
+       event->tp_event->bpf_prog_owner = event;
 
        return 0;
 }
@@ -8185,7 +8186,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
                return;
 
        prog = event->tp_event->prog;
-       if (prog) {
+       if (prog && event->tp_event->bpf_prog_owner == event) {
                event->tp_event->prog = NULL;
                bpf_prog_put(prog);
        }
@@ -8954,6 +8955,14 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
 
 static void free_pmu_context(struct pmu *pmu)
 {
+       /*
+        * Static contexts such as perf_sw_context have a global lifetime
+        * and may be shared between different PMUs. Avoid freeing them
+        * when a single PMU is going away.
+        */
+       if (pmu->task_ctx_nr > perf_invalid_context)
+               return;
+
        mutex_lock(&pmus_lock);
        free_percpu(pmu->pmu_cpu_context);
        mutex_unlock(&pmus_lock);
index af71a84e12eea343c6047184599fedf7f5592e65..f684d8e5fa2be2fd10e4d5e2f1e65a5c2afeb629 100644 (file)
@@ -412,6 +412,19 @@ err:
        return NULL;
 }
 
+static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
+{
+       if (rb->aux_overwrite)
+               return false;
+
+       if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
+               rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
+               return true;
+       }
+
+       return false;
+}
+
 /*
  * Commit the data written by hardware into the ring buffer by adjusting
  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
@@ -451,10 +464,8 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
        }
 
        rb->user_page->aux_head = rb->aux_head;
-       if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
+       if (rb_need_aux_wakeup(rb))
                wakeup = true;
-               rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
-       }
 
        if (wakeup) {
                if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
@@ -484,9 +495,8 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
        rb->aux_head += size;
 
        rb->user_page->aux_head = rb->aux_head;
-       if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
+       if (rb_need_aux_wakeup(rb)) {
                perf_output_wakeup(handle);
-               rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
                handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
        }
 
index 3481ababd06aa6cdd2aedd1fee0e56026acddc3b..f6cad39f35dfbe441abc5fc740458a6574e7d529 100644 (file)
@@ -1600,18 +1600,19 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
        struct waitid_info info = {.status = 0};
        long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
        int signo = 0;
+
        if (err > 0) {
                signo = SIGCHLD;
                err = 0;
-       }
-
-       if (!err) {
                if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
                        return -EFAULT;
        }
        if (!infop)
                return err;
 
+       if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+               return -EFAULT;
+
        user_access_begin();
        unsafe_put_user(signo, &infop->si_signo, Efault);
        unsafe_put_user(0, &infop->si_errno, Efault);
@@ -1723,21 +1724,23 @@ COMPAT_SYSCALL_DEFINE5(waitid,
        if (err > 0) {
                signo = SIGCHLD;
                err = 0;
-       }
-
-       if (!err && uru) {
-               /* kernel_waitid() overwrites everything in ru */
-               if (COMPAT_USE_64BIT_TIME)
-                       err = copy_to_user(uru, &ru, sizeof(ru));
-               else
-                       err = put_compat_rusage(&ru, uru);
-               if (err)
-                       return -EFAULT;
+               if (uru) {
+                       /* kernel_waitid() overwrites everything in ru */
+                       if (COMPAT_USE_64BIT_TIME)
+                               err = copy_to_user(uru, &ru, sizeof(ru));
+                       else
+                               err = put_compat_rusage(&ru, uru);
+                       if (err)
+                               return -EFAULT;
+               }
        }
 
        if (!infop)
                return err;
 
+       if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+               return -EFAULT;
+
        user_access_begin();
        unsafe_put_user(signo, &infop->si_signo, Efault);
        unsafe_put_user(0, &infop->si_errno, Efault);
index 38c2412401a1b118d53d3cff92eba24753134aed..9aa1cc41ecf79c8c2cc5fb9dbd3fbd418025c0dd 100644 (file)
@@ -102,15 +102,7 @@ int core_kernel_data(unsigned long addr)
 
 int __kernel_text_address(unsigned long addr)
 {
-       if (core_kernel_text(addr))
-               return 1;
-       if (is_module_text_address(addr))
-               return 1;
-       if (is_ftrace_trampoline(addr))
-               return 1;
-       if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
-               return 1;
-       if (is_bpf_text_address(addr))
+       if (kernel_text_address(addr))
                return 1;
        /*
         * There might be init symbols in saved stacktraces.
@@ -127,17 +119,42 @@ int __kernel_text_address(unsigned long addr)
 
 int kernel_text_address(unsigned long addr)
 {
+       bool no_rcu;
+       int ret = 1;
+
        if (core_kernel_text(addr))
                return 1;
+
+       /*
+        * If a stack dump happens while RCU is not watching, then
+        * RCU needs to be notified that it requires to start
+        * watching again. This can happen either by tracing that
+        * triggers a stack trace, or a WARN() that happens during
+        * coming back from idle, or cpu on or offlining.
+        *
+        * is_module_text_address() as well as the kprobe slots
+        * and is_bpf_text_address() require RCU to be watching.
+        */
+       no_rcu = !rcu_is_watching();
+
+       /* Treat this like an NMI as it can happen anywhere */
+       if (no_rcu)
+               rcu_nmi_enter();
+
        if (is_module_text_address(addr))
-               return 1;
+               goto out;
        if (is_ftrace_trampoline(addr))
-               return 1;
+               goto out;
        if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
-               return 1;
+               goto out;
        if (is_bpf_text_address(addr))
-               return 1;
-       return 0;
+               goto out;
+       ret = 0;
+out:
+       if (no_rcu)
+               rcu_nmi_exit();
+
+       return ret;
 }
 
 /*
index 10646182440fa4c3a3a8da7d9fdb7b45f01aab26..07cc743698d3668ef7fb4442c6de0e12de7f02f5 100644 (file)
@@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
                if (!s)
                        continue;
 
+#ifdef CONFIG_DEBUG_KMEMLEAK
+               /* Clear stale pointers from reused stack. */
+               memset(s->addr, 0, THREAD_SIZE);
+#endif
                tsk->stack_vm_area = s;
                return s->addr;
        }
@@ -946,6 +950,24 @@ void mmput(struct mm_struct *mm)
 }
 EXPORT_SYMBOL_GPL(mmput);
 
+#ifdef CONFIG_MMU
+static void mmput_async_fn(struct work_struct *work)
+{
+       struct mm_struct *mm = container_of(work, struct mm_struct,
+                                           async_put_work);
+
+       __mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+       if (atomic_dec_and_test(&mm->mm_users)) {
+               INIT_WORK(&mm->async_put_work, mmput_async_fn);
+               schedule_work(&mm->async_put_work);
+       }
+}
+#endif
+
 /**
  * set_mm_exe_file - change a reference to the mm's executable file
  *
index 3d38eaf0549209ddb72e83780df6167a4423e8a2..0518a0bfc746bab4f257f2fa81ef7361f99ff113 100644 (file)
@@ -821,8 +821,6 @@ static void get_pi_state(struct futex_pi_state *pi_state)
 /*
  * Drops a reference to the pi_state object and frees or caches it
  * when the last reference is gone.
- *
- * Must be called with the hb lock held.
  */
 static void put_pi_state(struct futex_pi_state *pi_state)
 {
@@ -837,16 +835,22 @@ static void put_pi_state(struct futex_pi_state *pi_state)
         * and has cleaned up the pi_state already
         */
        if (pi_state->owner) {
-               raw_spin_lock_irq(&pi_state->owner->pi_lock);
-               list_del_init(&pi_state->list);
-               raw_spin_unlock_irq(&pi_state->owner->pi_lock);
+               struct task_struct *owner;
 
-               rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
+               raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+               owner = pi_state->owner;
+               if (owner) {
+                       raw_spin_lock(&owner->pi_lock);
+                       list_del_init(&pi_state->list);
+                       raw_spin_unlock(&owner->pi_lock);
+               }
+               rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
+               raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
        }
 
-       if (current->pi_state_cache)
+       if (current->pi_state_cache) {
                kfree(pi_state);
-       else {
+       else {
                /*
                 * pi_state->list is already empty.
                 * clear pi_state->owner.
@@ -907,13 +911,14 @@ void exit_pi_state_list(struct task_struct *curr)
                raw_spin_unlock_irq(&curr->pi_lock);
 
                spin_lock(&hb->lock);
-
-               raw_spin_lock_irq(&curr->pi_lock);
+               raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+               raw_spin_lock(&curr->pi_lock);
                /*
                 * We dropped the pi-lock, so re-check whether this
                 * task still owns the PI-state:
                 */
                if (head->next != next) {
+                       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
                        spin_unlock(&hb->lock);
                        continue;
                }
@@ -922,9 +927,10 @@ void exit_pi_state_list(struct task_struct *curr)
                WARN_ON(list_empty(&pi_state->list));
                list_del_init(&pi_state->list);
                pi_state->owner = NULL;
-               raw_spin_unlock_irq(&curr->pi_lock);
+               raw_spin_unlock(&curr->pi_lock);
 
                get_pi_state(pi_state);
+               raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
                spin_unlock(&hb->lock);
 
                rt_mutex_futex_unlock(&pi_state->pi_mutex);
@@ -1208,6 +1214,10 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
 
        WARN_ON(!list_empty(&pi_state->list));
        list_add(&pi_state->list, &p->pi_state_list);
+       /*
+        * Assignment without holding pi_state->pi_mutex.wait_lock is safe
+        * because there is no concurrency as the object is not published yet.
+        */
        pi_state->owner = p;
        raw_spin_unlock_irq(&p->pi_lock);
 
@@ -2878,6 +2888,7 @@ retry:
                raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
                spin_unlock(&hb->lock);
 
+               /* drops pi_state->pi_mutex.wait_lock */
                ret = wake_futex_pi(uaddr, uval, pi_state);
 
                put_pi_state(pi_state);
index f51b7b6d245177292bb3dbdc2ab25ebc7ddf42b2..5a2ef92c2782c59c177e1f6c2eb5b4b17fb75a49 100644 (file)
@@ -202,7 +202,7 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
 
        irqd_clr_managed_shutdown(d);
 
-       if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
+       if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
                /*
                 * Catch code which fiddles with enable_irq() on a managed
                 * and potentially shutdown IRQ. Chained interrupt
@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
                        irq_setup_affinity(desc);
                        break;
                case IRQ_STARTUP_MANAGED:
+                       irq_do_set_affinity(d, aff, false);
                        ret = __irq_startup(desc);
-                       irq_set_affinity_locked(d, aff, false);
                        break;
                case IRQ_STARTUP_ABORT:
                        return 0;
index 638eb9c83d9f75b0e73c0d80e2c87f6e4c2e02f3..9eb09aef0313cecaea99844c653016469cc9321f 100644 (file)
 static inline bool irq_needs_fixup(struct irq_data *d)
 {
        const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
+       unsigned int cpu = smp_processor_id();
 
-       return cpumask_test_cpu(smp_processor_id(), m);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+       /*
+        * The cpumask_empty() check is a workaround for interrupt chips,
+        * which do not implement effective affinity, but the architecture has
+        * enabled the config switch. Use the general affinity mask instead.
+        */
+       if (cpumask_empty(m))
+               m = irq_data_get_affinity_mask(d);
+
+       /*
+        * Sanity check. If the mask is not empty when excluding the outgoing
+        * CPU then it must contain at least one online CPU. The outgoing CPU
+        * has been removed from the online mask already.
+        */
+       if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
+           cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
+               /*
+                * If this happens then there was a missed IRQ fixup at some
+                * point. Warn about it and enforce fixup.
+                */
+               pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
+                       cpumask_pr_args(m), d->irq, cpu);
+               return true;
+       }
+#endif
+       return cpumask_test_cpu(cpu, m);
 }
 
 static bool migrate_one_irq(struct irq_desc *desc)
index f7086b78ad6e1e1bcb3995537b024d7ed057ef0e..c26c5bb6b491f75f76f1190cdc21989f79d17e09 100644 (file)
@@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
 }
 
 /**
- * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
+ * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
  * @d: irq_data
+ *
+ * This generic implementation of the irq_mask_ack method is for chips
+ * with separate enable/disable registers instead of a single mask
+ * register and where a pending interrupt is acknowledged by setting a
+ * bit.
+ *
+ * Note: This is the only permutation currently used.  Similar generic
+ * functions should be added here if other permutations are required.
  */
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
 {
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        struct irq_chip_type *ct = irq_data_get_chip_type(d);
        u32 mask = d->mask;
 
        irq_gc_lock(gc);
-       irq_reg_writel(gc, mask, ct->regs.mask);
+       irq_reg_writel(gc, mask, ct->regs.disable);
+       *ct->mask_cache &= ~mask;
        irq_reg_writel(gc, mask, ct->regs.ack);
        irq_gc_unlock(gc);
 }
@@ -322,7 +331,6 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
                /* Calc pointer to the next generic chip */
                tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
        }
-       d->name = name;
        return 0;
 }
 EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
index e84b7056bb083349b8326ee5dae71284a4fc22cb..ac4644e92b499949b1a11652ddde3e3bccba8072 100644 (file)
@@ -945,7 +945,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
        struct irq_desc *desc;
        struct irq_domain *domain;
        struct radix_tree_iter iter;
-       void **slot;
+       void __rcu **slot;
        int i;
 
        seq_printf(m, " %-16s  %-6s  %-10s  %-10s  %s\n",
@@ -1453,7 +1453,7 @@ out_free_desc:
 /* The irq_data was moved, fix the revmap to refer to the new location */
 static void irq_domain_fix_revmap(struct irq_data *d)
 {
-       void **slot;
+       void __rcu **slot;
 
        if (d->hwirq < d->domain->revmap_size)
                return; /* Not using radix tree. */
index 573dc52b0806054bc485c1290d0ed58ccfbb5c87..4bff6a10ae8ec7efb76dd8e677e86016ca04355a 100644 (file)
@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc)
                        set_bit(IRQTF_AFFINITY, &action->thread_flags);
 }
 
+static void irq_validate_effective_affinity(struct irq_data *data)
+{
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+       const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
+       struct irq_chip *chip = irq_data_get_irq_chip(data);
+
+       if (!cpumask_empty(m))
+               return;
+       pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
+                    chip->name, data->irq);
+#endif
+}
+
 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
                        bool force)
 {
@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        int ret;
 
+       if (!chip || !chip->irq_set_affinity)
+               return -EINVAL;
+
        ret = chip->irq_set_affinity(data, mask, force);
        switch (ret) {
        case IRQ_SET_MASK_OK:
        case IRQ_SET_MASK_OK_DONE:
                cpumask_copy(desc->irq_common_data.affinity, mask);
        case IRQ_SET_MASK_OK_NOCOPY:
+               irq_validate_effective_affinity(data);
                irq_set_thread_affinity(desc);
                ret = 0;
        }
@@ -1643,6 +1660,10 @@ const void *free_irq(unsigned int irq, void *dev_id)
 #endif
 
        action = __free_irq(irq, dev_id);
+
+       if (!action)
+               return NULL;
+
        devname = action->name;
        kfree(action);
        return devname;
index ea34ed8bb9529c7b7a2a38b0a4e050995b4ba97e..055bb2962a0b656269b1220d7cb4ead7978301e2 100644 (file)
@@ -131,7 +131,7 @@ static int kcmp_epoll_target(struct task_struct *task1,
        if (filp_epoll) {
                filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
                fput(filp_epoll);
-       } else
+       }
 
        if (IS_ERR(filp_tgt))
                return PTR_ERR(filp_tgt);
index b9628e43c78f60711f8a8c21693aef5c7041f2bb..bf8c8fd72589ddeeae34662a8d76352afa890678 100644 (file)
@@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch)
 }
 EXPORT_SYMBOL_GPL(klp_register_patch);
 
+/*
+ * Remove parts of patches that touch a given kernel module. The list of
+ * patches processed might be limited. When limit is NULL, all patches
+ * will be handled.
+ */
+static void klp_cleanup_module_patches_limited(struct module *mod,
+                                              struct klp_patch *limit)
+{
+       struct klp_patch *patch;
+       struct klp_object *obj;
+
+       list_for_each_entry(patch, &klp_patches, list) {
+               if (patch == limit)
+                       break;
+
+               klp_for_each_object(patch, obj) {
+                       if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
+                               continue;
+
+                       /*
+                        * Only unpatch the module if the patch is enabled or
+                        * is in transition.
+                        */
+                       if (patch->enabled || patch == klp_transition_patch) {
+                               pr_notice("reverting patch '%s' on unloading module '%s'\n",
+                                         patch->mod->name, obj->mod->name);
+                               klp_unpatch_object(obj);
+                       }
+
+                       klp_free_object_loaded(obj);
+                       break;
+               }
+       }
+}
+
 int klp_module_coming(struct module *mod)
 {
        int ret;
@@ -894,7 +929,7 @@ err:
        pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
                patch->mod->name, obj->mod->name, obj->mod->name);
        mod->klp_alive = false;
-       klp_free_object_loaded(obj);
+       klp_cleanup_module_patches_limited(mod, patch);
        mutex_unlock(&klp_mutex);
 
        return ret;
@@ -902,9 +937,6 @@ err:
 
 void klp_module_going(struct module *mod)
 {
-       struct klp_patch *patch;
-       struct klp_object *obj;
-
        if (WARN_ON(mod->state != MODULE_STATE_GOING &&
                    mod->state != MODULE_STATE_COMING))
                return;
@@ -917,25 +949,7 @@ void klp_module_going(struct module *mod)
         */
        mod->klp_alive = false;
 
-       list_for_each_entry(patch, &klp_patches, list) {
-               klp_for_each_object(patch, obj) {
-                       if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
-                               continue;
-
-                       /*
-                        * Only unpatch the module if the patch is enabled or
-                        * is in transition.
-                        */
-                       if (patch->enabled || patch == klp_transition_patch) {
-                               pr_notice("reverting patch '%s' on unloading module '%s'\n",
-                                         patch->mod->name, obj->mod->name);
-                               klp_unpatch_object(obj);
-                       }
-
-                       klp_free_object_loaded(obj);
-                       break;
-               }
-       }
+       klp_cleanup_module_patches_limited(mod, NULL);
 
        mutex_unlock(&klp_mutex);
 }
index 44c8d0d17170af808ab0f11c4e556d49044a6c78..e36e652d996fe682157c52768949f0b9c7f1e6d1 100644 (file)
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
               struct held_lock *next, int distance, struct stack_trace *trace,
               int (*save)(struct stack_trace *trace))
 {
+       struct lock_list *uninitialized_var(target_entry);
        struct lock_list *entry;
-       int ret;
        struct lock_list this;
-       struct lock_list *uninitialized_var(target_entry);
+       int ret;
 
        /*
         * Prove that the new <prev> -> <next> dependency would not
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
        this.class = hlock_class(next);
        this.parent = NULL;
        ret = check_noncircular(&this, hlock_class(prev), &target_entry);
-       if (unlikely(!ret))
+       if (unlikely(!ret)) {
+               if (!trace->entries) {
+                       /*
+                        * If @save fails here, the printing might trigger
+                        * a WARN but because of the !nr_entries it should
+                        * not do bad things.
+                        */
+                       save(trace);
+               }
                return print_circular_bug(&this, target_entry, next, prev, trace);
+       }
        else if (unlikely(ret < 0))
                return print_bfs_bug(ret);
 
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
                return print_bfs_bug(ret);
 
 
-       if (save && !save(trace))
+       if (!trace->entries && !save(trace))
                return 0;
 
        /*
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
        if (!ret)
                return 0;
 
-       /*
-        * Debugging printouts:
-        */
-       if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
-               graph_unlock();
-               printk("\n new dependency: ");
-               print_lock_name(hlock_class(prev));
-               printk(KERN_CONT " => ");
-               print_lock_name(hlock_class(next));
-               printk(KERN_CONT "\n");
-               dump_stack();
-               if (!graph_lock())
-                       return 0;
-       }
        return 2;
 }
 
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
 {
        int depth = curr->lockdep_depth;
        struct held_lock *hlock;
-       struct stack_trace trace;
-       int (*save)(struct stack_trace *trace) = save_trace;
+       struct stack_trace trace = {
+               .nr_entries = 0,
+               .max_entries = 0,
+               .entries = NULL,
+               .skip = 0,
+       };
 
        /*
         * Debugging checks.
@@ -2018,17 +2017,10 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
                         */
                        if (hlock->read != 2 && hlock->check) {
                                int ret = check_prev_add(curr, hlock, next,
-                                                        distance, &trace, save);
+                                                        distance, &trace, save_trace);
                                if (!ret)
                                        return 0;
 
-                               /*
-                                * Stop saving stack_trace if save_trace() was
-                                * called at least once:
-                                */
-                               if (save && ret == 2)
-                                       save = NULL;
-
                                /*
                                 * Stop after the first non-trylock entry,
                                 * as non-trylock entries have added their
index 02f660666ab8976ea51215427050d1fe3807326f..1fefe6dcafd7403a9b172e11ebd0f41d35332211 100644 (file)
@@ -612,6 +612,33 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
        unsigned long flags;
        DEFINE_WAKE_Q(wake_q);
 
+       /*
+       * __rwsem_down_write_failed_common(sem)
+       *   rwsem_optimistic_spin(sem)
+       *     osq_unlock(sem->osq)
+       *   ...
+       *   atomic_long_add_return(&sem->count)
+       *
+       *      - VS -
+       *
+       *              __up_write()
+       *                if (atomic_long_sub_return_release(&sem->count) < 0)
+       *                  rwsem_wake(sem)
+       *                    osq_is_locked(&sem->osq)
+       *
+       * And __up_write() must observe !osq_is_locked() when it observes the
+       * atomic_long_add_return() in order to not miss a wakeup.
+       *
+       * This boils down to:
+       *
+       * [S.rel] X = 1                [RmW] r0 = (Y += 0)
+       *         MB                         RMB
+       * [RmW]   Y += 1               [L]   r1 = X
+       *
+       * exists (r0=1 /\ r1=0)
+       */
+       smp_rmb();
+
        /*
         * If a spinner is present, it is not necessary to do the wakeup.
         * Try to do wakeup only if the trylock succeeds to minimize
index 6bcbfbf1a8fdfd2f1008cde707db9a798a68cdc6..403ab9cdb949a0483bd82c811a3383eed77e5246 100644 (file)
@@ -350,7 +350,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        pgprot_t pgprot = PAGE_KERNEL;
        struct dev_pagemap *pgmap;
        struct page_map *page_map;
-       int error, nid, is_ram;
+       int error, nid, is_ram, i = 0;
 
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
@@ -448,6 +448,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
                list_del(&page->lru);
                page->pgmap = pgmap;
                percpu_ref_get(ref);
+               if (!(++i % 1024))
+                       cond_resched();
        }
        devres_add(dev, page_map);
        return __va(res->start);
index 60b2d8101355fedcfd8dfee99f8a2fa467ea99d9..cc9108c2a1fde209f5fe8b41e4cfca2e9e4d1310 100644 (file)
@@ -224,7 +224,7 @@ char *parse_args(const char *doing,
        }                                                               \
        int param_get_##name(char *buffer, const struct kernel_param *kp) \
        {                                                               \
-               return scnprintf(buffer, PAGE_SIZE, format,             \
+               return scnprintf(buffer, PAGE_SIZE, format "\n",        \
                                *((type *)kp->arg));                    \
        }                                                               \
        const struct kernel_param_ops param_ops_##name = {                      \
@@ -236,14 +236,14 @@ char *parse_args(const char *doing,
        EXPORT_SYMBOL(param_ops_##name)
 
 
-STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8);
-STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16);
-STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16);
-STANDARD_PARAM_DEF(int, int, "%i", kstrtoint);
-STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint);
-STANDARD_PARAM_DEF(long, long, "%li", kstrtol);
-STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul);
-STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull);
+STANDARD_PARAM_DEF(byte,       unsigned char,          "%hhu", kstrtou8);
+STANDARD_PARAM_DEF(short,      short,                  "%hi",  kstrtos16);
+STANDARD_PARAM_DEF(ushort,     unsigned short,         "%hu",  kstrtou16);
+STANDARD_PARAM_DEF(int,                int,                    "%i",   kstrtoint);
+STANDARD_PARAM_DEF(uint,       unsigned int,           "%u",   kstrtouint);
+STANDARD_PARAM_DEF(long,       long,                   "%li",  kstrtol);
+STANDARD_PARAM_DEF(ulong,      unsigned long,          "%lu",  kstrtoul);
+STANDARD_PARAM_DEF(ullong,     unsigned long long,     "%llu", kstrtoull);
 
 int param_set_charp(const char *val, const struct kernel_param *kp)
 {
@@ -270,7 +270,7 @@ EXPORT_SYMBOL(param_set_charp);
 
 int param_get_charp(char *buffer, const struct kernel_param *kp)
 {
-       return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg));
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg));
 }
 EXPORT_SYMBOL(param_get_charp);
 
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(param_set_bool);
 int param_get_bool(char *buffer, const struct kernel_param *kp)
 {
        /* Y and N chosen as being relatively non-coder friendly */
-       return sprintf(buffer, "%c", *(bool *)kp->arg ? 'Y' : 'N');
+       return sprintf(buffer, "%c\n", *(bool *)kp->arg ? 'Y' : 'N');
 }
 EXPORT_SYMBOL(param_get_bool);
 
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(param_set_invbool);
 
 int param_get_invbool(char *buffer, const struct kernel_param *kp)
 {
-       return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y');
+       return sprintf(buffer, "%c\n", (*(bool *)kp->arg) ? 'N' : 'Y');
 }
 EXPORT_SYMBOL(param_get_invbool);
 
@@ -460,8 +460,9 @@ static int param_array_get(char *buffer, const struct kernel_param *kp)
        struct kernel_param p = *kp;
 
        for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) {
+               /* Replace \n with comma */
                if (i)
-                       buffer[off++] = ',';
+                       buffer[off - 1] = ',';
                p.arg = arr->elem + arr->elemsize * i;
                check_kparam_locked(p.mod);
                ret = arr->ops->get(buffer + off, &p);
@@ -507,7 +508,7 @@ EXPORT_SYMBOL(param_set_copystring);
 int param_get_string(char *buffer, const struct kernel_param *kp)
 {
        const struct kparam_string *kps = kp->str;
-       return strlcpy(buffer, kps->string, kps->maxlen);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string);
 }
 EXPORT_SYMBOL(param_get_string);
 
@@ -549,10 +550,6 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
        kernel_param_lock(mk->mod);
        count = attribute->param->ops->get(buf, attribute->param);
        kernel_param_unlock(mk->mod);
-       if (count > 0) {
-               strcat(buf, "\n");
-               ++count;
-       }
        return count;
 }
 
@@ -600,7 +597,7 @@ EXPORT_SYMBOL(kernel_param_unlock);
 /*
  * add_sysfs_param - add a parameter to sysfs
  * @mk: struct module_kobject
- * @kparam: the actual parameter definition to add to sysfs
+ * @kp: the actual parameter definition to add to sysfs
  * @name: name of parameter
  *
  * Create a kobject if for a (per-module) parameter if mp NULL, and
index 3e2b4f519009709c61b8de20e7f159efd159df04..ccd2d20e6b067f6a7c3bb83a5848e9c0b333fade 100644 (file)
@@ -120,22 +120,26 @@ static void s2idle_loop(void)
                 * frozen processes + suspended devices + idle processors.
                 * Thus s2idle_enter() should be called right after
                 * all devices have been suspended.
+                *
+                * Wakeups during the noirq suspend of devices may be spurious,
+                * so prevent them from terminating the loop right away.
                 */
                error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
                if (!error)
                        s2idle_enter();
+               else if (error == -EBUSY && pm_wakeup_pending())
+                       error = 0;
 
-               dpm_noirq_resume_devices(PMSG_RESUME);
-               if (error && (error != -EBUSY || !pm_wakeup_pending())) {
-                       dpm_noirq_end();
-                       break;
-               }
-
-               if (s2idle_ops && s2idle_ops->wake)
+               if (!error && s2idle_ops && s2idle_ops->wake)
                        s2idle_ops->wake();
 
+               dpm_noirq_resume_devices(PMSG_RESUME);
+
                dpm_noirq_end();
 
+               if (error)
+                       break;
+
                if (s2idle_ops && s2idle_ops->sync)
                        s2idle_ops->sync();
 
index 729a8706751db4230bf3e3275192475fb44f9c3a..6d5880089ff6b7785db69e0facb432cab4aab5fc 100644 (file)
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
 /**
  * call_srcu() - Queue a callback for invocation after an SRCU grace period
  * @sp: srcu_struct in queue the callback
- * @head: structure to be used for queueing the SRCU callback.
+ * @rhp: structure to be used for queueing the SRCU callback.
  * @func: function to be invoked after the SRCU grace period
  *
  * The callback function will be invoked some time after a full SRCU
index 50d1861f7759b40ff0248281e742a6a4fac1e852..3f943efcf61c1e3c31373799f475c80967ae3fd0 100644 (file)
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
 }
 
 /**
+ * rcu_sync_enter_start - Force readers onto slow path for multiple updates
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
  * Must be called after rcu_sync_init() and before first use.
  *
  * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
 
 /**
  * rcu_sync_func() - Callback function managing reader access to fastpath
- * @rsp: Pointer to rcu_sync structure to use for synchronization
+ * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
  *
  * This function is passed to one of the call_rcu() functions by
  * rcu_sync_exit(), so that it is invoked after a grace period following the
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
  * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
  * can again use their fastpaths.
  */
-static void rcu_sync_func(struct rcu_head *rcu)
+static void rcu_sync_func(struct rcu_head *rhp)
 {
-       struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
+       struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
        unsigned long flags;
 
        BUG_ON(rsp->gp_state != GP_PASSED);
index 1250e4bd4b85e690c7db2b061a87599e8651a606..3e3650e94ae6b1dd26fea711a8961d627c16623b 100644 (file)
@@ -882,6 +882,11 @@ void rcu_irq_exit(void)
 
        RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
        rdtp = this_cpu_ptr(&rcu_dynticks);
+
+       /* Page faults can happen in NMI handlers, so check... */
+       if (rdtp->dynticks_nmi_nesting)
+               return;
+
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     rdtp->dynticks_nesting < 1);
        if (rdtp->dynticks_nesting <= 1) {
@@ -1015,6 +1020,11 @@ void rcu_irq_enter(void)
 
        RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
        rdtp = this_cpu_ptr(&rcu_dynticks);
+
+       /* Page faults can happen in NMI handlers, so check... */
+       if (rdtp->dynticks_nmi_nesting)
+               return;
+
        oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting++;
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
@@ -3087,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
  * read-side critical sections have completed. call_rcu_sched() assumes
  * that the read-side critical sections end on enabling of preemption
  * or on voluntary preemption.
- * RCU read-side critical sections are delimited by :
- *  - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
- *  - anything that disables preemption.
+ * RCU read-side critical sections are delimited by:
+ *
+ * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
+ * - anything that disables preemption.
  *
  *  These may be nested.
  *
@@ -3114,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
  * handler. This means that read-side critical sections in process
  * context must not be interrupted by softirqs. This interface is to be
  * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by :
- *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
- *  OR
- *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- *  These may be nested.
+ * RCU read-side critical sections are delimited by:
+ *
+ * - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context, OR
+ * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+ *
+ * These may be nested.
  *
  * See the description of call_rcu() for more detailed information on
  * memory ordering guarantees.
index 18a6966567daf30517a9f832e4a88b20ae705401..d17c5da523a0bc817a6b32413ee7e0ba3e7a6b2e 100644 (file)
@@ -5166,6 +5166,28 @@ void sched_show_task(struct task_struct *p)
        put_task_stack(p);
 }
 
+static inline bool
+state_filter_match(unsigned long state_filter, struct task_struct *p)
+{
+       /* no filter, everything matches */
+       if (!state_filter)
+               return true;
+
+       /* filter, but doesn't match */
+       if (!(p->state & state_filter))
+               return false;
+
+       /*
+        * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
+        * TASK_KILLABLE).
+        */
+       if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
+               return false;
+
+       return true;
+}
+
+
 void show_state_filter(unsigned long state_filter)
 {
        struct task_struct *g, *p;
@@ -5188,7 +5210,7 @@ void show_state_filter(unsigned long state_filter)
                 */
                touch_nmi_watchdog();
                touch_all_softlockup_watchdogs();
-               if (!state_filter || (p->state & state_filter))
+               if (state_filter_match(state_filter, p))
                        sched_show_task(p);
        }
 
index 01217fb5a5de9bd007506020cffb5052ab3abba8..2f93e4a2d9f623915d0023f9b3a7d8b7d7b95cf7 100644 (file)
@@ -466,8 +466,6 @@ static char *task_group_path(struct task_group *tg)
 }
 #endif
 
-static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
-
 static void
 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 {
index 70ba32e08a231858326211b7f64a3ae6a3f4dbd3..d3f3094856fe14df9adc8837f569087d5543e7e7 100644 (file)
@@ -5356,91 +5356,62 @@ static int wake_wide(struct task_struct *p)
        return 1;
 }
 
-struct llc_stats {
-       unsigned long   nr_running;
-       unsigned long   load;
-       unsigned long   capacity;
-       int             has_capacity;
-};
+/*
+ * The purpose of wake_affine() is to quickly determine on which CPU we can run
+ * soonest. For the purpose of speed we only consider the waking and previous
+ * CPU.
+ *
+ * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
+ *                     will be) idle.
+ *
+ * wake_affine_weight() - considers the weight to reflect the average
+ *                       scheduling latency of the CPUs. This seems to work
+ *                       for the overloaded case.
+ */
 
-static bool get_llc_stats(struct llc_stats *stats, int cpu)
+static bool
+wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
+                int this_cpu, int prev_cpu, int sync)
 {
-       struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-
-       if (!sds)
-               return false;
+       if (idle_cpu(this_cpu))
+               return true;
 
-       stats->nr_running       = READ_ONCE(sds->nr_running);
-       stats->load             = READ_ONCE(sds->load);
-       stats->capacity         = READ_ONCE(sds->capacity);
-       stats->has_capacity     = stats->nr_running < per_cpu(sd_llc_size, cpu);
+       if (sync && cpu_rq(this_cpu)->nr_running == 1)
+               return true;
 
-       return true;
+       return false;
 }
 
-/*
- * Can a task be moved from prev_cpu to this_cpu without causing a load
- * imbalance that would trigger the load balancer?
- *
- * Since we're running on 'stale' values, we might in fact create an imbalance
- * but recomputing these values is expensive, as that'd mean iteration 2 cache
- * domains worth of CPUs.
- */
 static bool
-wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
-               int this_cpu, int prev_cpu, int sync)
+wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
+                  int this_cpu, int prev_cpu, int sync)
 {
-       struct llc_stats prev_stats, this_stats;
        s64 this_eff_load, prev_eff_load;
        unsigned long task_load;
 
-       if (!get_llc_stats(&prev_stats, prev_cpu) ||
-           !get_llc_stats(&this_stats, this_cpu))
-               return false;
+       this_eff_load = target_load(this_cpu, sd->wake_idx);
+       prev_eff_load = source_load(prev_cpu, sd->wake_idx);
 
-       /*
-        * If sync wakeup then subtract the (maximum possible)
-        * effect of the currently running task from the load
-        * of the current LLC.
-        */
        if (sync) {
                unsigned long current_load = task_h_load(current);
 
-               /* in this case load hits 0 and this LLC is considered 'idle' */
-               if (current_load > this_stats.load)
+               if (current_load > this_eff_load)
                        return true;
 
-               this_stats.load -= current_load;
+               this_eff_load -= current_load;
        }
 
-       /*
-        * The has_capacity stuff is not SMT aware, but by trying to balance
-        * the nr_running on both ends we try and fill the domain at equal
-        * rates, thereby first consuming cores before siblings.
-        */
-
-       /* if the old cache has capacity, stay there */
-       if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1)
-               return false;
-
-       /* if this cache has capacity, come here */
-       if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
-               return true;
-
-       /*
-        * Check to see if we can move the load without causing too much
-        * imbalance.
-        */
        task_load = task_h_load(p);
 
-       this_eff_load = 100;
-       this_eff_load *= prev_stats.capacity;
-
-       prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
-       prev_eff_load *= this_stats.capacity;
+       this_eff_load += task_load;
+       if (sched_feat(WA_BIAS))
+               this_eff_load *= 100;
+       this_eff_load *= capacity_of(prev_cpu);
 
-       this_eff_load *= this_stats.load + task_load;
-       prev_eff_load *= prev_stats.load - task_load;
+       prev_eff_load -= task_load;
+       if (sched_feat(WA_BIAS))
+               prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
+       prev_eff_load *= capacity_of(this_cpu);
 
        return this_eff_load <= prev_eff_load;
 }
@@ -5449,22 +5420,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
                       int prev_cpu, int sync)
 {
        int this_cpu = smp_processor_id();
-       bool affine;
+       bool affine = false;
 
-       /*
-        * Default to no affine wakeups; wake_affine() should not effect a task
-        * placement the load-balancer feels inclined to undo. The conservative
-        * option is therefore to not move tasks when they wake up.
-        */
-       affine = false;
+       if (sched_feat(WA_IDLE) && !affine)
+               affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
 
-       /*
-        * If the wakeup is across cache domains, try to evaluate if movement
-        * makes sense, otherwise rely on select_idle_siblings() to do
-        * placement inside the cache domain.
-        */
-       if (!cpus_share_cache(prev_cpu, this_cpu))
-               affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync);
+       if (sched_feat(WA_WEIGHT) && !affine)
+               affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
 
        schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
        if (affine) {
@@ -7600,7 +7562,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
  */
 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
 {
-       struct sched_domain_shared *shared = env->sd->shared;
        struct sched_domain *child = env->sd->child;
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats *local = &sds->local_stat;
@@ -7672,22 +7633,6 @@ next_group:
                if (env->dst_rq->rd->overload != overload)
                        env->dst_rq->rd->overload = overload;
        }
-
-       if (!shared)
-               return;
-
-       /*
-        * Since these are sums over groups they can contain some CPUs
-        * multiple times for the NUMA domains.
-        *
-        * Currently only wake_affine_llc() and find_busiest_group()
-        * uses these numbers, only the last is affected by this problem.
-        *
-        * XXX fix that.
-        */
-       WRITE_ONCE(shared->nr_running,  sds->total_running);
-       WRITE_ONCE(shared->load,        sds->total_load);
-       WRITE_ONCE(shared->capacity,    sds->total_capacity);
 }
 
 /**
@@ -8097,6 +8042,13 @@ static int should_we_balance(struct lb_env *env)
        struct sched_group *sg = env->sd->groups;
        int cpu, balance_cpu = -1;
 
+       /*
+        * Ensure the balancing environment is consistent; can happen
+        * when the softirq triggers 'during' hotplug.
+        */
+       if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
+               return 0;
+
        /*
         * In the newly idle case, we will allow all the cpu's
         * to do the newly idle load balance.
index d3fb15555291e0bfb4ce2e837f38cc1676a664d4..319ed0e8a3477fca87edd42afbf7726b4ea36f0a 100644 (file)
@@ -81,3 +81,6 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
 SCHED_FEAT(LB_MIN, false)
 SCHED_FEAT(ATTACH_AGE_LOAD, true)
 
+SCHED_FEAT(WA_IDLE, true)
+SCHED_FEAT(WA_WEIGHT, true)
+SCHED_FEAT(WA_BIAS, true)
index a92fddc227471e05a77d9f8c4c585b84ba8f28dd..dd7908743dab696facd9dd32f4399ee952228151 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/membarrier.h>
 #include <linux/tick.h>
 #include <linux/cpumask.h>
+#include <linux/atomic.h>
 
 #include "sched.h"     /* for cpu_rq(). */
 
  * except MEMBARRIER_CMD_QUERY.
  */
 #define MEMBARRIER_CMD_BITMASK \
-       (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED)
+       (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED       \
+       | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
 
 static void ipi_mb(void *info)
 {
        smp_mb();       /* IPIs should be serializing but paranoid. */
 }
 
-static void membarrier_private_expedited(void)
+static int membarrier_private_expedited(void)
 {
        int cpu;
        bool fallback = false;
        cpumask_var_t tmpmask;
 
+       if (!(atomic_read(&current->mm->membarrier_state)
+                       & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
+               return -EPERM;
+
        if (num_online_cpus() == 1)
-               return;
+               return 0;
 
        /*
         * Matches memory barriers around rq->curr modification in
@@ -94,6 +100,24 @@ static void membarrier_private_expedited(void)
         * rq->curr modification in scheduler.
         */
        smp_mb();       /* exit from system call is not a mb */
+       return 0;
+}
+
+static void membarrier_register_private_expedited(void)
+{
+       struct task_struct *p = current;
+       struct mm_struct *mm = p->mm;
+
+       /*
+        * We need to consider threads belonging to different thread
+        * groups, which use the same mm. (CLONE_VM but not
+        * CLONE_THREAD).
+        */
+       if (atomic_read(&mm->membarrier_state)
+                       & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
+               return;
+       atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
+                       &mm->membarrier_state);
 }
 
 /**
@@ -144,7 +168,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
                        synchronize_sched();
                return 0;
        case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
-               membarrier_private_expedited();
+               return membarrier_private_expedited();
+       case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
+               membarrier_register_private_expedited();
                return 0;
        default:
                return -EINVAL;
index 98b59b5db90baae53b091afe807c133e6027d081..0ae832e13b974041002c508f03a7c9cc8d6d7a6d 100644 (file)
 #include <linux/audit.h>
 #include <linux/compat.h>
 #include <linux/coredump.h>
+#include <linux/kmemleak.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 #include <linux/seccomp.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
+#include <linux/sysctl.h>
 
 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
 #include <asm/syscall.h>
@@ -42,6 +44,7 @@
  *         get/put helpers should be used when accessing an instance
  *         outside of a lifetime-guarded section.  In general, this
  *         is only needed for handling filters shared across tasks.
+ * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
  * @prev: points to a previously installed, or inherited, filter
  * @prog: the BPF program to evaluate
  *
@@ -57,6 +60,7 @@
  */
 struct seccomp_filter {
        refcount_t usage;
+       bool log;
        struct seccomp_filter *prev;
        struct bpf_prog *prog;
 };
@@ -171,10 +175,15 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
 /**
  * seccomp_run_filters - evaluates all seccomp filters against @sd
  * @sd: optional seccomp data to be passed to filters
+ * @match: stores struct seccomp_filter that resulted in the return value,
+ *         unless filter returned SECCOMP_RET_ALLOW, in which case it will
+ *         be unchanged.
  *
  * Returns valid seccomp BPF response codes.
  */
-static u32 seccomp_run_filters(const struct seccomp_data *sd)
+#define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
+static u32 seccomp_run_filters(const struct seccomp_data *sd,
+                              struct seccomp_filter **match)
 {
        struct seccomp_data sd_local;
        u32 ret = SECCOMP_RET_ALLOW;
@@ -184,7 +193,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd)
 
        /* Ensure unexpected behavior doesn't result in failing open. */
        if (unlikely(WARN_ON(f == NULL)))
-               return SECCOMP_RET_KILL;
+               return SECCOMP_RET_KILL_PROCESS;
 
        if (!sd) {
                populate_seccomp_data(&sd_local);
@@ -198,8 +207,10 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd)
        for (; f; f = f->prev) {
                u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
 
-               if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
+               if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
                        ret = cur_ret;
+                       *match = f;
+               }
        }
        return ret;
 }
@@ -444,6 +455,10 @@ static long seccomp_attach_filter(unsigned int flags,
                        return ret;
        }
 
+       /* Set log flag, if present. */
+       if (flags & SECCOMP_FILTER_FLAG_LOG)
+               filter->log = true;
+
        /*
         * If there is an existing filter, make it the prev and don't drop its
         * task reference.
@@ -458,14 +473,19 @@ static long seccomp_attach_filter(unsigned int flags,
        return 0;
 }
 
+static void __get_seccomp_filter(struct seccomp_filter *filter)
+{
+       /* Reference count is bounded by the number of total processes. */
+       refcount_inc(&filter->usage);
+}
+
 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
 void get_seccomp_filter(struct task_struct *tsk)
 {
        struct seccomp_filter *orig = tsk->seccomp.filter;
        if (!orig)
                return;
-       /* Reference count is bounded by the number of total processes. */
-       refcount_inc(&orig->usage);
+       __get_seccomp_filter(orig);
 }
 
 static inline void seccomp_filter_free(struct seccomp_filter *filter)
@@ -476,10 +496,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
        }
 }
 
-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
-void put_seccomp_filter(struct task_struct *tsk)
+static void __put_seccomp_filter(struct seccomp_filter *orig)
 {
-       struct seccomp_filter *orig = tsk->seccomp.filter;
        /* Clean up single-reference branches iteratively. */
        while (orig && refcount_dec_and_test(&orig->usage)) {
                struct seccomp_filter *freeme = orig;
@@ -488,6 +506,12 @@ void put_seccomp_filter(struct task_struct *tsk)
        }
 }
 
+/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+void put_seccomp_filter(struct task_struct *tsk)
+{
+       __put_seccomp_filter(tsk->seccomp.filter);
+}
+
 static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
 {
        memset(info, 0, sizeof(*info));
@@ -514,6 +538,65 @@ static void seccomp_send_sigsys(int syscall, int reason)
 }
 #endif /* CONFIG_SECCOMP_FILTER */
 
+/* For use with seccomp_actions_logged */
+#define SECCOMP_LOG_KILL_PROCESS       (1 << 0)
+#define SECCOMP_LOG_KILL_THREAD                (1 << 1)
+#define SECCOMP_LOG_TRAP               (1 << 2)
+#define SECCOMP_LOG_ERRNO              (1 << 3)
+#define SECCOMP_LOG_TRACE              (1 << 4)
+#define SECCOMP_LOG_LOG                        (1 << 5)
+#define SECCOMP_LOG_ALLOW              (1 << 6)
+
+static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
+                                   SECCOMP_LOG_KILL_THREAD  |
+                                   SECCOMP_LOG_TRAP  |
+                                   SECCOMP_LOG_ERRNO |
+                                   SECCOMP_LOG_TRACE |
+                                   SECCOMP_LOG_LOG;
+
+static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
+                              bool requested)
+{
+       bool log = false;
+
+       switch (action) {
+       case SECCOMP_RET_ALLOW:
+               break;
+       case SECCOMP_RET_TRAP:
+               log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
+               break;
+       case SECCOMP_RET_ERRNO:
+               log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
+               break;
+       case SECCOMP_RET_TRACE:
+               log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
+               break;
+       case SECCOMP_RET_LOG:
+               log = seccomp_actions_logged & SECCOMP_LOG_LOG;
+               break;
+       case SECCOMP_RET_KILL_THREAD:
+               log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
+               break;
+       case SECCOMP_RET_KILL_PROCESS:
+       default:
+               log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
+       }
+
+       /*
+        * Force an audit message to be emitted when the action is RET_KILL_*,
+        * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
+        * allowed to be logged by the admin.
+        */
+       if (log)
+               return __audit_seccomp(syscall, signr, action);
+
+       /*
+        * Let the audit subsystem decide if the action should be audited based
+        * on whether the current task itself is being audited.
+        */
+       return audit_seccomp(syscall, signr, action);
+}
+
 /*
  * Secure computing mode 1 allows only read/write/exit/sigreturn.
  * To be fully secure this must be combined with rlimit
@@ -539,7 +622,7 @@ static void __secure_computing_strict(int this_syscall)
 #ifdef SECCOMP_DEBUG
        dump_stack();
 #endif
-       audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
+       seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
        do_exit(SIGKILL);
 }
 
@@ -566,6 +649,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
                            const bool recheck_after_trace)
 {
        u32 filter_ret, action;
+       struct seccomp_filter *match = NULL;
        int data;
 
        /*
@@ -574,9 +658,9 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
         */
        rmb();
 
-       filter_ret = seccomp_run_filters(sd);
+       filter_ret = seccomp_run_filters(sd, &match);
        data = filter_ret & SECCOMP_RET_DATA;
-       action = filter_ret & SECCOMP_RET_ACTION;
+       action = filter_ret & SECCOMP_RET_ACTION_FULL;
 
        switch (action) {
        case SECCOMP_RET_ERRNO:
@@ -637,14 +721,25 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
 
                return 0;
 
+       case SECCOMP_RET_LOG:
+               seccomp_log(this_syscall, 0, action, true);
+               return 0;
+
        case SECCOMP_RET_ALLOW:
+               /*
+                * Note that the "match" filter will always be NULL for
+                * this action since SECCOMP_RET_ALLOW is the starting
+                * state in seccomp_run_filters().
+                */
                return 0;
 
-       case SECCOMP_RET_KILL:
+       case SECCOMP_RET_KILL_THREAD:
+       case SECCOMP_RET_KILL_PROCESS:
        default:
-               audit_seccomp(this_syscall, SIGSYS, action);
+               seccomp_log(this_syscall, SIGSYS, action, true);
                /* Dump core only if this is the last remaining thread. */
-               if (get_nr_threads(current) == 1) {
+               if (action == SECCOMP_RET_KILL_PROCESS ||
+                   get_nr_threads(current) == 1) {
                        siginfo_t info;
 
                        /* Show the original registers in the dump. */
@@ -653,13 +748,16 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
                        seccomp_init_siginfo(&info, this_syscall, data);
                        do_coredump(&info);
                }
-               do_exit(SIGSYS);
+               if (action == SECCOMP_RET_KILL_PROCESS)
+                       do_group_exit(SIGSYS);
+               else
+                       do_exit(SIGSYS);
        }
 
        unreachable();
 
 skip:
-       audit_seccomp(this_syscall, 0, action);
+       seccomp_log(this_syscall, 0, action, match ? match->log : false);
        return -1;
 }
 #else
@@ -794,6 +892,29 @@ static inline long seccomp_set_mode_filter(unsigned int flags,
 }
 #endif
 
+static long seccomp_get_action_avail(const char __user *uaction)
+{
+       u32 action;
+
+       if (copy_from_user(&action, uaction, sizeof(action)))
+               return -EFAULT;
+
+       switch (action) {
+       case SECCOMP_RET_KILL_PROCESS:
+       case SECCOMP_RET_KILL_THREAD:
+       case SECCOMP_RET_TRAP:
+       case SECCOMP_RET_ERRNO:
+       case SECCOMP_RET_TRACE:
+       case SECCOMP_RET_LOG:
+       case SECCOMP_RET_ALLOW:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
 /* Common entry point for both prctl and syscall. */
 static long do_seccomp(unsigned int op, unsigned int flags,
                       const char __user *uargs)
@@ -805,6 +926,11 @@ static long do_seccomp(unsigned int op, unsigned int flags,
                return seccomp_set_mode_strict();
        case SECCOMP_SET_MODE_FILTER:
                return seccomp_set_mode_filter(flags, uargs);
+       case SECCOMP_GET_ACTION_AVAIL:
+               if (flags != 0)
+                       return -EINVAL;
+
+               return seccomp_get_action_avail(uargs);
        default:
                return -EINVAL;
        }
@@ -908,13 +1034,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
        if (!data)
                goto out;
 
-       get_seccomp_filter(task);
+       __get_seccomp_filter(filter);
        spin_unlock_irq(&task->sighand->siglock);
 
        if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
                ret = -EFAULT;
 
-       put_seccomp_filter(task);
+       __put_seccomp_filter(filter);
        return ret;
 
 out:
@@ -922,3 +1048,185 @@ out:
        return ret;
 }
 #endif
+
+#ifdef CONFIG_SYSCTL
+
+/* Human readable action names for friendly sysctl interaction */
+#define SECCOMP_RET_KILL_PROCESS_NAME  "kill_process"
+#define SECCOMP_RET_KILL_THREAD_NAME   "kill_thread"
+#define SECCOMP_RET_TRAP_NAME          "trap"
+#define SECCOMP_RET_ERRNO_NAME         "errno"
+#define SECCOMP_RET_TRACE_NAME         "trace"
+#define SECCOMP_RET_LOG_NAME           "log"
+#define SECCOMP_RET_ALLOW_NAME         "allow"
+
+static const char seccomp_actions_avail[] =
+                               SECCOMP_RET_KILL_PROCESS_NAME   " "
+                               SECCOMP_RET_KILL_THREAD_NAME    " "
+                               SECCOMP_RET_TRAP_NAME           " "
+                               SECCOMP_RET_ERRNO_NAME          " "
+                               SECCOMP_RET_TRACE_NAME          " "
+                               SECCOMP_RET_LOG_NAME            " "
+                               SECCOMP_RET_ALLOW_NAME;
+
+struct seccomp_log_name {
+       u32             log;
+       const char      *name;
+};
+
+static const struct seccomp_log_name seccomp_log_names[] = {
+       { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
+       { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
+       { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
+       { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
+       { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
+       { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
+       { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
+       { }
+};
+
+static bool seccomp_names_from_actions_logged(char *names, size_t size,
+                                             u32 actions_logged)
+{
+       const struct seccomp_log_name *cur;
+       bool append_space = false;
+
+       for (cur = seccomp_log_names; cur->name && size; cur++) {
+               ssize_t ret;
+
+               if (!(actions_logged & cur->log))
+                       continue;
+
+               if (append_space) {
+                       ret = strscpy(names, " ", size);
+                       if (ret < 0)
+                               return false;
+
+                       names += ret;
+                       size -= ret;
+               } else
+                       append_space = true;
+
+               ret = strscpy(names, cur->name, size);
+               if (ret < 0)
+                       return false;
+
+               names += ret;
+               size -= ret;
+       }
+
+       return true;
+}
+
+static bool seccomp_action_logged_from_name(u32 *action_logged,
+                                           const char *name)
+{
+       const struct seccomp_log_name *cur;
+
+       for (cur = seccomp_log_names; cur->name; cur++) {
+               if (!strcmp(cur->name, name)) {
+                       *action_logged = cur->log;
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
+{
+       char *name;
+
+       *actions_logged = 0;
+       while ((name = strsep(&names, " ")) && *name) {
+               u32 action_logged = 0;
+
+               if (!seccomp_action_logged_from_name(&action_logged, name))
+                       return false;
+
+               *actions_logged |= action_logged;
+       }
+
+       return true;
+}
+
+static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
+                                         void __user *buffer, size_t *lenp,
+                                         loff_t *ppos)
+{
+       char names[sizeof(seccomp_actions_avail)];
+       struct ctl_table table;
+       int ret;
+
+       if (write && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       memset(names, 0, sizeof(names));
+
+       if (!write) {
+               if (!seccomp_names_from_actions_logged(names, sizeof(names),
+                                                      seccomp_actions_logged))
+                       return -EINVAL;
+       }
+
+       table = *ro_table;
+       table.data = names;
+       table.maxlen = sizeof(names);
+       ret = proc_dostring(&table, write, buffer, lenp, ppos);
+       if (ret)
+               return ret;
+
+       if (write) {
+               u32 actions_logged;
+
+               if (!seccomp_actions_logged_from_names(&actions_logged,
+                                                      table.data))
+                       return -EINVAL;
+
+               if (actions_logged & SECCOMP_LOG_ALLOW)
+                       return -EINVAL;
+
+               seccomp_actions_logged = actions_logged;
+       }
+
+       return 0;
+}
+
+static struct ctl_path seccomp_sysctl_path[] = {
+       { .procname = "kernel", },
+       { .procname = "seccomp", },
+       { }
+};
+
+static struct ctl_table seccomp_sysctl_table[] = {
+       {
+               .procname       = "actions_avail",
+               .data           = (void *) &seccomp_actions_avail,
+               .maxlen         = sizeof(seccomp_actions_avail),
+               .mode           = 0444,
+               .proc_handler   = proc_dostring,
+       },
+       {
+               .procname       = "actions_logged",
+               .mode           = 0644,
+               .proc_handler   = seccomp_actions_logged_handler,
+       },
+       { }
+};
+
+static int __init seccomp_sysctl_init(void)
+{
+       struct ctl_table_header *hdr;
+
+       hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
+       if (!hdr)
+               pr_warn("seccomp: sysctl registration failed\n");
+       else
+               kmemleak_not_leak(hdr);
+
+       return 0;
+}
+
+device_initcall(seccomp_sysctl_init)
+
+#endif /* CONFIG_SYSCTL */
index 1d71c051a9515c6acecd4be823465a4760e7cefa..5043e7433f4b15879a6498ed3d1ca6cfa2876f83 100644 (file)
@@ -344,39 +344,30 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
  * by the client, but only by calling this function.
  * This function can only be called on a registered smp_hotplug_thread.
  */
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                        const struct cpumask *new)
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+                                         const struct cpumask *new)
 {
        struct cpumask *old = plug_thread->cpumask;
-       cpumask_var_t tmp;
+       static struct cpumask tmp;
        unsigned int cpu;
 
-       if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
-               return -ENOMEM;
-
-       get_online_cpus();
+       lockdep_assert_cpus_held();
        mutex_lock(&smpboot_threads_lock);
 
        /* Park threads that were exclusively enabled on the old mask. */
-       cpumask_andnot(tmp, old, new);
-       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+       cpumask_andnot(&tmp, old, new);
+       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
                smpboot_park_thread(plug_thread, cpu);
 
        /* Unpark threads that are exclusively enabled on the new mask. */
-       cpumask_andnot(tmp, new, old);
-       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+       cpumask_andnot(&tmp, new, old);
+       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
                smpboot_unpark_thread(plug_thread, cpu);
 
        cpumask_copy(old, new);
 
        mutex_unlock(&smpboot_threads_lock);
-       put_online_cpus();
-
-       free_cpumask_var(tmp);
-
-       return 0;
 }
-EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
 
 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
 
index 6648fbbb8157fc12703d02fa0fdc9ff85c527ac4..d9c31bc2eaea2c95a7a7be5a5321700b84d8f640 100644 (file)
@@ -367,7 +367,8 @@ static struct ctl_table kern_table[] = {
                .data           = &sysctl_sched_time_avg,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
        },
 #ifdef CONFIG_SCHEDSTATS
        {
@@ -871,9 +872,9 @@ static struct ctl_table kern_table[] = {
 #if defined(CONFIG_LOCKUP_DETECTOR)
        {
                .procname       = "watchdog",
-               .data           = &watchdog_user_enabled,
-               .maxlen         = sizeof (int),
-               .mode           = 0644,
+               .data           = &watchdog_user_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
                .proc_handler   = proc_watchdog,
                .extra1         = &zero,
                .extra2         = &one,
@@ -889,16 +890,12 @@ static struct ctl_table kern_table[] = {
        },
        {
                .procname       = "nmi_watchdog",
-               .data           = &nmi_watchdog_enabled,
-               .maxlen         = sizeof (int),
-               .mode           = 0644,
+               .data           = &nmi_watchdog_user_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = NMI_WATCHDOG_SYSCTL_PERM,
                .proc_handler   = proc_nmi_watchdog,
                .extra1         = &zero,
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
                .extra2         = &one,
-#else
-               .extra2         = &zero,
-#endif
        },
        {
                .procname       = "watchdog_cpumask",
@@ -910,9 +907,9 @@ static struct ctl_table kern_table[] = {
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
        {
                .procname       = "soft_watchdog",
-               .data           = &soft_watchdog_enabled,
-               .maxlen         = sizeof (int),
-               .mode           = 0644,
+               .data           = &soft_watchdog_user_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
                .proc_handler   = proc_soft_watchdog,
                .extra1         = &zero,
                .extra2         = &one,
@@ -2185,8 +2182,6 @@ static int do_proc_douintvec_conv(unsigned long *lvalp,
                                  int write, void *data)
 {
        if (write) {
-               if (*lvalp > UINT_MAX)
-                       return -EINVAL;
                if (*lvalp > UINT_MAX)
                        return -EINVAL;
                *valp = *lvalp;
index 2a685b45b73be4159bd310d8c4530a87df9e4cec..45a3928544ce599183c415471f4624be0583ea40 100644 (file)
@@ -648,6 +648,12 @@ int blk_trace_startstop(struct request_queue *q, int start)
 }
 EXPORT_SYMBOL_GPL(blk_trace_startstop);
 
+/*
+ * When reading or writing the blktrace sysfs files, the references to the
+ * opened sysfs or device files should prevent the underlying block device
+ * from being removed. So no further delete protection is really needed.
+ */
+
 /**
  * blk_trace_ioctl: - handle the ioctls associated with tracing
  * @bdev:      the block device
@@ -665,7 +671,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
        if (!q)
                return -ENXIO;
 
-       mutex_lock(&bdev->bd_mutex);
+       mutex_lock(&q->blk_trace_mutex);
 
        switch (cmd) {
        case BLKTRACESETUP:
@@ -691,7 +697,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
                break;
        }
 
-       mutex_unlock(&bdev->bd_mutex);
+       mutex_unlock(&q->blk_trace_mutex);
        return ret;
 }
 
@@ -1727,7 +1733,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
        if (q == NULL)
                goto out_bdput;
 
-       mutex_lock(&bdev->bd_mutex);
+       mutex_lock(&q->blk_trace_mutex);
 
        if (attr == &dev_attr_enable) {
                ret = sprintf(buf, "%u\n", !!q->blk_trace);
@@ -1746,7 +1752,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
                ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
 
 out_unlock_bdev:
-       mutex_unlock(&bdev->bd_mutex);
+       mutex_unlock(&q->blk_trace_mutex);
 out_bdput:
        bdput(bdev);
 out:
@@ -1788,7 +1794,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
        if (q == NULL)
                goto out_bdput;
 
-       mutex_lock(&bdev->bd_mutex);
+       mutex_lock(&q->blk_trace_mutex);
 
        if (attr == &dev_attr_enable) {
                if (value)
@@ -1814,7 +1820,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
        }
 
 out_unlock_bdev:
-       mutex_unlock(&bdev->bd_mutex);
+       mutex_unlock(&q->blk_trace_mutex);
 out_bdput:
        bdput(bdev);
 out:
index 6abfafd7f173c49ab60221768654a7b08f7d7f22..8319e09e15b945f14f9046edeb885e173ef26652 100644 (file)
@@ -4954,9 +4954,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
 
-static unsigned long save_global_trampoline;
-static unsigned long save_global_flags;
-
 static int __init set_graph_function(char *str)
 {
        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -6808,17 +6805,6 @@ void unregister_ftrace_graph(void)
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
 
-#ifdef CONFIG_DYNAMIC_FTRACE
-       /*
-        * Function graph does not allocate the trampoline, but
-        * other global_ops do. We need to reset the ALLOC_TRAMP flag
-        * if one was used.
-        */
-       global_ops.trampoline = save_global_trampoline;
-       if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
-               global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
-#endif
-
  out:
        mutex_unlock(&ftrace_lock);
 }
index 5360b7aec57ac1ccd1eea33b090f67ccb4b4e8cf..752e5daf0896fc529876f8801a95e23118713338 100644 (file)
@@ -4020,11 +4020,17 @@ static int tracing_open(struct inode *inode, struct file *file)
        /* If this file was open for write, then erase contents */
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                int cpu = tracing_get_cpu(inode);
+               struct trace_buffer *trace_buf = &tr->trace_buffer;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+               if (tr->current_trace->print_max)
+                       trace_buf = &tr->max_buffer;
+#endif
 
                if (cpu == RING_BUFFER_ALL_CPUS)
-                       tracing_reset_online_cpus(&tr->trace_buffer);
+                       tracing_reset_online_cpus(trace_buf);
                else
-                       tracing_reset(&tr->trace_buffer, cpu);
+                       tracing_reset(trace_buf, cpu);
        }
 
        if (file->f_mode & FMODE_READ) {
@@ -5358,6 +5364,13 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
        if (t == tr->current_trace)
                goto out;
 
+       /* Some tracers won't work on kernel command line */
+       if (system_state < SYSTEM_RUNNING && t->noboot) {
+               pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
+                       t->name);
+               goto out;
+       }
+
        /* Some tracers are only allowed for the top level buffer */
        if (!trace_ok_for_array(t, tr)) {
                ret = -EINVAL;
@@ -5667,7 +5680,7 @@ static int tracing_wait_pipe(struct file *filp)
                 *
                 * iter->pos will be 0 if we haven't read anything.
                 */
-               if (!tracing_is_on() && iter->pos)
+               if (!tracer_tracing_is_on(iter->tr) && iter->pos)
                        break;
 
                mutex_unlock(&iter->mutex);
index fb5d54d0d1b3f297087e96f03a22003fc887d359..652c682707cdd4f489a00c3b14df0b97ae8d2425 100644 (file)
@@ -444,6 +444,8 @@ struct tracer {
 #ifdef CONFIG_TRACER_MAX_TRACE
        bool                    use_max_tr;
 #endif
+       /* True if tracer cannot be enabled in kernel param */
+       bool                    noboot;
 };
 
 
index cd7480d0a201e335cd7adc20c9cb0a8dba16f189..dca78fc48439d21a80cd6576269eded8bfe13623 100644 (file)
@@ -282,6 +282,7 @@ static struct tracer mmio_tracer __read_mostly =
        .close          = mmio_close,
        .read           = mmio_read,
        .print_line     = mmio_print_line,
+       .noboot         = true,
 };
 
 __init static int init_mmio_trace(void)
index bac629af2285748bbdac64b6b5c632f2fcaed49d..c738e764e2a55cfd3303a3262d748c94a917a86d 100644 (file)
@@ -656,15 +656,6 @@ int trace_print_lat_context(struct trace_iterator *iter)
        return !trace_seq_has_overflowed(s);
 }
 
-static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
-
-static int task_state_char(unsigned long state)
-{
-       int bit = state ? __ffs(state) + 1 : 0;
-
-       return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
-}
-
 /**
  * ftrace_find_event - find a registered event
  * @type: the type of event to look for
@@ -930,8 +921,8 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
 
        trace_assign_type(field, iter->ent);
 
-       T = task_state_char(field->next_state);
-       S = task_state_char(field->prev_state);
+       T = __task_state_to_char(field->next_state);
+       S = __task_state_to_char(field->prev_state);
        trace_find_cmdline(field->next_pid, comm);
        trace_seq_printf(&iter->seq,
                         " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
@@ -966,8 +957,8 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
        trace_assign_type(field, iter->ent);
 
        if (!S)
-               S = task_state_char(field->prev_state);
-       T = task_state_char(field->next_state);
+               S = __task_state_to_char(field->prev_state);
+       T = __task_state_to_char(field->next_state);
        trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
                         field->prev_pid,
                         field->prev_prio,
@@ -1002,8 +993,8 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
        trace_assign_type(field, iter->ent);
 
        if (!S)
-               S = task_state_char(field->prev_state);
-       T = task_state_char(field->next_state);
+               S = __task_state_to_char(field->prev_state);
+       T = __task_state_to_char(field->next_state);
 
        SEQ_PUT_HEX_FIELD(s, field->prev_pid);
        SEQ_PUT_HEX_FIELD(s, field->prev_prio);
index ddec53b6764617e8fe431d93bf9f970a5015b4be..0c331978b1a636e9f2736f77bd93e97415c1ca10 100644 (file)
@@ -397,10 +397,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry   = ring_buffer_event_data(event);
        entry->prev_pid                 = prev->pid;
        entry->prev_prio                = prev->prio;
-       entry->prev_state               = prev->state;
+       entry->prev_state               = __get_task_state(prev);
        entry->next_pid                 = next->pid;
        entry->next_prio                = next->prio;
-       entry->next_state               = next->state;
+       entry->next_state               = __get_task_state(next);
        entry->next_cpu = task_cpu(next);
 
        if (!call_filter_check_discard(call, entry, buffer, event))
@@ -425,10 +425,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry   = ring_buffer_event_data(event);
        entry->prev_pid                 = curr->pid;
        entry->prev_prio                = curr->prio;
-       entry->prev_state               = curr->state;
+       entry->prev_state               = __get_task_state(curr);
        entry->next_pid                 = wakee->pid;
        entry->next_prio                = wakee->prio;
-       entry->next_state               = wakee->state;
+       entry->next_state               = __get_task_state(wakee);
        entry->next_cpu                 = task_cpu(wakee);
 
        if (!call_filter_check_discard(call, entry, buffer, event))
index a4df67cbc711459df2aa8b4a313eb449a472ee45..49cb41412eece8bb7dff9bedf5fd7991ade07b76 100644 (file)
@@ -96,23 +96,9 @@ check_stack(unsigned long ip, unsigned long *stack)
        if (in_nmi())
                return;
 
-       /*
-        * There's a slight chance that we are tracing inside the
-        * RCU infrastructure, and rcu_irq_enter() will not work
-        * as expected.
-        */
-       if (unlikely(rcu_irq_enter_disabled()))
-               return;
-
        local_irq_save(flags);
        arch_spin_lock(&stack_trace_max_lock);
 
-       /*
-        * RCU may not be watching, make it see us.
-        * The stack trace code uses rcu_sched.
-        */
-       rcu_irq_enter();
-
        /* In case another CPU set the tracer_frame on us */
        if (unlikely(!frame_size))
                this_size -= tracer_frame;
@@ -205,7 +191,6 @@ check_stack(unsigned long ip, unsigned long *stack)
        }
 
  out:
-       rcu_irq_exit();
        arch_spin_unlock(&stack_trace_max_lock);
        local_irq_restore(flags);
 }
index f5d52024f6b72a9d1354b1a44c12c3e3b6af06a9..6bcb854909c0b6f1563469ae3517fa1f89c43268 100644 (file)
 #include <linux/kvm_para.h>
 #include <linux/kthread.h>
 
-/* Watchdog configuration */
-static DEFINE_MUTEX(watchdog_proc_mutex);
-
-int __read_mostly nmi_watchdog_enabled;
+static DEFINE_MUTEX(watchdog_mutex);
 
 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
-                                               NMI_WATCHDOG_ENABLED;
+# define WATCHDOG_DEFAULT      (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
+# define NMI_WATCHDOG_DEFAULT  1
 #else
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+# define WATCHDOG_DEFAULT      (SOFT_WATCHDOG_ENABLED)
+# define NMI_WATCHDOG_DEFAULT  0
 #endif
 
+unsigned long __read_mostly watchdog_enabled;
+int __read_mostly watchdog_user_enabled = 1;
+int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
+int __read_mostly soft_watchdog_user_enabled = 1;
+int __read_mostly watchdog_thresh = 10;
+int __read_mostly nmi_watchdog_available;
+
+struct cpumask watchdog_allowed_mask __read_mostly;
+
+struct cpumask watchdog_cpumask __read_mostly;
+unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
+
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
-/* boot commands */
 /*
  * Should we panic when a soft-lockup or hard-lockup occurs:
  */
@@ -56,9 +65,9 @@ unsigned int __read_mostly hardlockup_panic =
  * kernel command line parameters are parsed, because otherwise it is not
  * possible to override this in hardlockup_panic_setup().
  */
-void hardlockup_detector_disable(void)
+void __init hardlockup_detector_disable(void)
 {
-       watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+       nmi_watchdog_user_enabled = 0;
 }
 
 static int __init hardlockup_panic_setup(char *str)
@@ -68,48 +77,24 @@ static int __init hardlockup_panic_setup(char *str)
        else if (!strncmp(str, "nopanic", 7))
                hardlockup_panic = 0;
        else if (!strncmp(str, "0", 1))
-               watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+               nmi_watchdog_user_enabled = 0;
        else if (!strncmp(str, "1", 1))
-               watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+               nmi_watchdog_user_enabled = 1;
        return 1;
 }
 __setup("nmi_watchdog=", hardlockup_panic_setup);
 
-#endif
-
-#ifdef CONFIG_SOFTLOCKUP_DETECTOR
-int __read_mostly soft_watchdog_enabled;
-#endif
-
-int __read_mostly watchdog_user_enabled;
-int __read_mostly watchdog_thresh = 10;
-
-#ifdef CONFIG_SMP
-int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+# ifdef CONFIG_SMP
 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#endif
-struct cpumask watchdog_cpumask __read_mostly;
-unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 
-/*
- * The 'watchdog_running' variable is set to 1 when the watchdog threads
- * are registered/started and is set to 0 when the watchdog threads are
- * unregistered/stopped, so it is an indicator whether the threads exist.
- */
-static int __read_mostly watchdog_running;
-/*
- * If a subsystem has a need to deactivate the watchdog temporarily, it
- * can use the suspend/resume interface to achieve this. The content of
- * the 'watchdog_suspended' variable reflects this state. Existing threads
- * are parked/unparked by the lockup_detector_{suspend|resume} functions
- * (see comment blocks pertaining to those functions for further details).
- *
- * 'watchdog_suspended' also prevents threads from being registered/started
- * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
- * of 'watchdog_running' cannot change while the watchdog is deactivated
- * temporarily (see related code in 'proc' handlers).
- */
-int __read_mostly watchdog_suspended;
+static int __init hardlockup_all_cpu_backtrace_setup(char *str)
+{
+       sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
+       return 1;
+}
+__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
+# endif /* CONFIG_SMP */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
 /*
  * These functions can be overridden if an architecture implements its
@@ -121,36 +106,68 @@ int __read_mostly watchdog_suspended;
  */
 int __weak watchdog_nmi_enable(unsigned int cpu)
 {
+       hardlockup_detector_perf_enable();
        return 0;
 }
+
 void __weak watchdog_nmi_disable(unsigned int cpu)
 {
+       hardlockup_detector_perf_disable();
 }
 
-/*
- * watchdog_nmi_reconfigure can be implemented to be notified after any
- * watchdog configuration change. The arch hardlockup watchdog should
- * respond to the following variables:
- * - nmi_watchdog_enabled
+/* Return 0, if a NMI watchdog is available. Error code otherwise */
+int __weak __init watchdog_nmi_probe(void)
+{
+       return hardlockup_detector_perf_init();
+}
+
+/**
+ * watchdog_nmi_stop - Stop the watchdog for reconfiguration
+ *
+ * The reconfiguration steps are:
+ * watchdog_nmi_stop();
+ * update_variables();
+ * watchdog_nmi_start();
+ */
+void __weak watchdog_nmi_stop(void) { }
+
+/**
+ * watchdog_nmi_start - Start the watchdog after reconfiguration
+ *
+ * Counterpart to watchdog_nmi_stop().
+ *
+ * The following variables have been updated in update_variables() and
+ * contain the currently valid configuration:
+ * - watchdog_enabled
  * - watchdog_thresh
  * - watchdog_cpumask
- * - sysctl_hardlockup_all_cpu_backtrace
- * - hardlockup_panic
- * - watchdog_suspended
  */
-void __weak watchdog_nmi_reconfigure(void)
+void __weak watchdog_nmi_start(void) { }
+
+/**
+ * lockup_detector_update_enable - Update the sysctl enable bit
+ *
+ * Caller needs to make sure that the NMI/perf watchdogs are off, so this
+ * can't race with watchdog_nmi_disable().
+ */
+static void lockup_detector_update_enable(void)
 {
+       watchdog_enabled = 0;
+       if (!watchdog_user_enabled)
+               return;
+       if (nmi_watchdog_available && nmi_watchdog_user_enabled)
+               watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+       if (soft_watchdog_user_enabled)
+               watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
 }
 
-
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
 
-/* Helper for online, unparked cpus. */
-#define for_each_watchdog_cpu(cpu) \
-       for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
-
-atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+/* Global variables, exported for sysctl */
+unsigned int __read_mostly softlockup_panic =
+                       CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 
+static bool softlockup_threads_initialized __read_mostly;
 static u64 __read_mostly sample_period;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -164,50 +181,40 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static unsigned long soft_lockup_nmi_warn;
 
-unsigned int __read_mostly softlockup_panic =
-                       CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
-
 static int __init softlockup_panic_setup(char *str)
 {
        softlockup_panic = simple_strtoul(str, NULL, 0);
-
        return 1;
 }
 __setup("softlockup_panic=", softlockup_panic_setup);
 
 static int __init nowatchdog_setup(char *str)
 {
-       watchdog_enabled = 0;
+       watchdog_user_enabled = 0;
        return 1;
 }
 __setup("nowatchdog", nowatchdog_setup);
 
 static int __init nosoftlockup_setup(char *str)
 {
-       watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
+       soft_watchdog_user_enabled = 0;
        return 1;
 }
 __setup("nosoftlockup", nosoftlockup_setup);
 
 #ifdef CONFIG_SMP
+int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+
 static int __init softlockup_all_cpu_backtrace_setup(char *str)
 {
-       sysctl_softlockup_all_cpu_backtrace =
-               !!simple_strtol(str, NULL, 0);
+       sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
        return 1;
 }
 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static int __init hardlockup_all_cpu_backtrace_setup(char *str)
-{
-       sysctl_hardlockup_all_cpu_backtrace =
-               !!simple_strtol(str, NULL, 0);
-       return 1;
-}
-__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
-#endif
 #endif
 
+static void __lockup_detector_cleanup(void);
+
 /*
  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  * lockups can have false positives under extreme conditions. So we generally
@@ -278,11 +285,15 @@ void touch_all_softlockup_watchdogs(void)
        int cpu;
 
        /*
-        * this is done lockless
-        * do we care if a 0 races with a timestamp?
-        * all it means is the softlock check starts one cycle later
+        * watchdog_mutex cannpt be taken here, as this might be called
+        * from (soft)interrupt context, so the access to
+        * watchdog_allowed_cpumask might race with a concurrent update.
+        *
+        * The watchdog time stamp can race against a concurrent real
+        * update as well, the only side effect might be a cycle delay for
+        * the softlockup check.
         */
-       for_each_watchdog_cpu(cpu)
+       for_each_cpu(cpu, &watchdog_allowed_mask)
                per_cpu(watchdog_touch_ts, cpu) = 0;
        wq_watchdog_touch(-1);
 }
@@ -322,9 +333,6 @@ static void watchdog_interrupt_count(void)
        __this_cpu_inc(hrtimer_interrupts);
 }
 
-static int watchdog_enable_all_cpus(void);
-static void watchdog_disable_all_cpus(void);
-
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
@@ -333,7 +341,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
-       if (atomic_read(&watchdog_park_in_progress) != 0)
+       if (!watchdog_enabled)
                return HRTIMER_NORESTART;
 
        /* kick the hardlockup detector */
@@ -447,32 +455,38 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
 
 static void watchdog_enable(unsigned int cpu)
 {
-       struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+       struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
 
-       /* kick off the timer for the hardlockup detector */
+       /*
+        * Start the timer first to prevent the NMI watchdog triggering
+        * before the timer has a chance to fire.
+        */
        hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hrtimer->function = watchdog_timer_fn;
-
-       /* Enable the perf event */
-       watchdog_nmi_enable(cpu);
-
-       /* done here because hrtimer_start can only pin to smp_processor_id() */
        hrtimer_start(hrtimer, ns_to_ktime(sample_period),
                      HRTIMER_MODE_REL_PINNED);
 
-       /* initialize timestamp */
-       watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
+       /* Initialize timestamp */
        __touch_watchdog();
+       /* Enable the perf event */
+       if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
+               watchdog_nmi_enable(cpu);
+
+       watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
 }
 
 static void watchdog_disable(unsigned int cpu)
 {
-       struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+       struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
 
        watchdog_set_prio(SCHED_NORMAL, 0);
-       hrtimer_cancel(hrtimer);
-       /* disable the perf event */
+       /*
+        * Disable the perf event first. That prevents that a large delay
+        * between disabling the timer and disabling the perf event causes
+        * the perf NMI to detect a false positive.
+        */
        watchdog_nmi_disable(cpu);
+       hrtimer_cancel(hrtimer);
 }
 
 static void watchdog_cleanup(unsigned int cpu, bool online)
@@ -499,21 +513,6 @@ static void watchdog(unsigned int cpu)
        __this_cpu_write(soft_lockup_hrtimer_cnt,
                         __this_cpu_read(hrtimer_interrupts));
        __touch_watchdog();
-
-       /*
-        * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
-        * failure path. Check for failures that can occur asynchronously -
-        * for example, when CPUs are on-lined - and shut down the hardware
-        * perf event on each CPU accordingly.
-        *
-        * The only non-obvious place this bit can be cleared is through
-        * watchdog_nmi_enable(), so a pr_info() is placed there.  Placing a
-        * pr_info here would be too noisy as it would result in a message
-        * every few seconds if the hardlockup was disabled but the softlockup
-        * enabled.
-        */
-       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-               watchdog_nmi_disable(cpu);
 }
 
 static struct smp_hotplug_thread watchdog_threads = {
@@ -527,295 +526,174 @@ static struct smp_hotplug_thread watchdog_threads = {
        .unpark                 = watchdog_enable,
 };
 
-/*
- * park all watchdog threads that are specified in 'watchdog_cpumask'
- *
- * This function returns an error if kthread_park() of a watchdog thread
- * fails. In this situation, the watchdog threads of some CPUs can already
- * be parked and the watchdog threads of other CPUs can still be runnable.
- * Callers are expected to handle this special condition as appropriate in
- * their context.
- *
- * This function may only be called in a context that is protected against
- * races with CPU hotplug - for example, via get_online_cpus().
- */
-static int watchdog_park_threads(void)
+static void softlockup_update_smpboot_threads(void)
 {
-       int cpu, ret = 0;
+       lockdep_assert_held(&watchdog_mutex);
 
-       atomic_set(&watchdog_park_in_progress, 1);
+       if (!softlockup_threads_initialized)
+               return;
 
-       for_each_watchdog_cpu(cpu) {
-               ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
-               if (ret)
-                       break;
-       }
-
-       atomic_set(&watchdog_park_in_progress, 0);
-
-       return ret;
+       smpboot_update_cpumask_percpu_thread(&watchdog_threads,
+                                            &watchdog_allowed_mask);
 }
 
-/*
- * unpark all watchdog threads that are specified in 'watchdog_cpumask'
- *
- * This function may only be called in a context that is protected against
- * races with CPU hotplug - for example, via get_online_cpus().
- */
-static void watchdog_unpark_threads(void)
+/* Temporarily park all watchdog threads */
+static void softlockup_park_all_threads(void)
 {
-       int cpu;
-
-       for_each_watchdog_cpu(cpu)
-               kthread_unpark(per_cpu(softlockup_watchdog, cpu));
+       cpumask_clear(&watchdog_allowed_mask);
+       softlockup_update_smpboot_threads();
 }
 
-static int update_watchdog_all_cpus(void)
+/* Unpark enabled threads */
+static void softlockup_unpark_threads(void)
 {
-       int ret;
-
-       ret = watchdog_park_threads();
-       if (ret)
-               return ret;
-
-       watchdog_unpark_threads();
-
-       return 0;
+       cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
+       softlockup_update_smpboot_threads();
 }
 
-static int watchdog_enable_all_cpus(void)
+static void lockup_detector_reconfigure(void)
 {
-       int err = 0;
-
-       if (!watchdog_running) {
-               err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
-                                                            &watchdog_cpumask);
-               if (err)
-                       pr_err("Failed to create watchdog threads, disabled\n");
-               else
-                       watchdog_running = 1;
-       } else {
-               /*
-                * Enable/disable the lockup detectors or
-                * change the sample period 'on the fly'.
-                */
-               err = update_watchdog_all_cpus();
-
-               if (err) {
-                       watchdog_disable_all_cpus();
-                       pr_err("Failed to update lockup detectors, disabled\n");
-               }
-       }
-
-       if (err)
-               watchdog_enabled = 0;
-
-       return err;
+       cpus_read_lock();
+       watchdog_nmi_stop();
+       softlockup_park_all_threads();
+       set_sample_period();
+       lockup_detector_update_enable();
+       if (watchdog_enabled && watchdog_thresh)
+               softlockup_unpark_threads();
+       watchdog_nmi_start();
+       cpus_read_unlock();
+       /*
+        * Must be called outside the cpus locked section to prevent
+        * recursive locking in the perf code.
+        */
+       __lockup_detector_cleanup();
 }
 
-static void watchdog_disable_all_cpus(void)
+/*
+ * Create the watchdog thread infrastructure and configure the detector(s).
+ *
+ * The threads are not unparked as watchdog_allowed_mask is empty.  When
+ * the threads are sucessfully initialized, take the proper locks and
+ * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
+ */
+static __init void lockup_detector_setup(void)
 {
-       if (watchdog_running) {
-               watchdog_running = 0;
-               smpboot_unregister_percpu_thread(&watchdog_threads);
-       }
-}
+       int ret;
 
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
-{
-       return smpboot_update_cpumask_percpu_thread(
-                   &watchdog_threads, &watchdog_cpumask);
-}
-#endif
+       /*
+        * If sysctl is off and watchdog got disabled on the command line,
+        * nothing to do here.
+        */
+       lockup_detector_update_enable();
 
-#else /* SOFTLOCKUP */
-static int watchdog_park_threads(void)
-{
-       return 0;
-}
+       if (!IS_ENABLED(CONFIG_SYSCTL) &&
+           !(watchdog_enabled && watchdog_thresh))
+               return;
 
-static void watchdog_unpark_threads(void)
-{
-}
+       ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
+                                                    &watchdog_allowed_mask);
+       if (ret) {
+               pr_err("Failed to initialize soft lockup detector threads\n");
+               return;
+       }
 
-static int watchdog_enable_all_cpus(void)
-{
-       return 0;
+       mutex_lock(&watchdog_mutex);
+       softlockup_threads_initialized = true;
+       lockup_detector_reconfigure();
+       mutex_unlock(&watchdog_mutex);
 }
 
-static void watchdog_disable_all_cpus(void)
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
+static inline int watchdog_park_threads(void) { return 0; }
+static inline void watchdog_unpark_threads(void) { }
+static inline int watchdog_enable_all_cpus(void) { return 0; }
+static inline void watchdog_disable_all_cpus(void) { }
+static void lockup_detector_reconfigure(void)
 {
+       cpus_read_lock();
+       watchdog_nmi_stop();
+       lockup_detector_update_enable();
+       watchdog_nmi_start();
+       cpus_read_unlock();
 }
-
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
+static inline void lockup_detector_setup(void)
 {
-       return 0;
+       lockup_detector_reconfigure();
 }
-#endif
+#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
 
-static void set_sample_period(void)
+static void __lockup_detector_cleanup(void)
 {
+       lockdep_assert_held(&watchdog_mutex);
+       hardlockup_detector_perf_cleanup();
 }
-#endif /* SOFTLOCKUP */
 
-/*
- * Suspend the hard and soft lockup detector by parking the watchdog threads.
+/**
+ * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
+ *
+ * Caller must not hold the cpu hotplug rwsem.
  */
-int lockup_detector_suspend(void)
+void lockup_detector_cleanup(void)
 {
-       int ret = 0;
-
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
-       /*
-        * Multiple suspend requests can be active in parallel (counted by
-        * the 'watchdog_suspended' variable). If the watchdog threads are
-        * running, the first caller takes care that they will be parked.
-        * The state of 'watchdog_running' cannot change while a suspend
-        * request is active (see related code in 'proc' handlers).
-        */
-       if (watchdog_running && !watchdog_suspended)
-               ret = watchdog_park_threads();
-
-       if (ret == 0)
-               watchdog_suspended++;
-       else {
-               watchdog_disable_all_cpus();
-               pr_err("Failed to suspend lockup detectors, disabled\n");
-               watchdog_enabled = 0;
-       }
-
-       watchdog_nmi_reconfigure();
-
-       mutex_unlock(&watchdog_proc_mutex);
-
-       return ret;
+       mutex_lock(&watchdog_mutex);
+       __lockup_detector_cleanup();
+       mutex_unlock(&watchdog_mutex);
 }
 
-/*
- * Resume the hard and soft lockup detector by unparking the watchdog threads.
+/**
+ * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
+ *
+ * Special interface for parisc. It prevents lockup detector warnings from
+ * the default pm_poweroff() function which busy loops forever.
  */
-void lockup_detector_resume(void)
+void lockup_detector_soft_poweroff(void)
 {
-       mutex_lock(&watchdog_proc_mutex);
-
-       watchdog_suspended--;
-       /*
-        * The watchdog threads are unparked if they were previously running
-        * and if there is no more active suspend request.
-        */
-       if (watchdog_running && !watchdog_suspended)
-               watchdog_unpark_threads();
-
-       watchdog_nmi_reconfigure();
-
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       watchdog_enabled = 0;
 }
 
 #ifdef CONFIG_SYSCTL
 
-/*
- * Update the run state of the lockup detectors.
- */
-static int proc_watchdog_update(void)
+/* Propagate any changes to the watchdog threads */
+static void proc_watchdog_update(void)
 {
-       int err = 0;
-
-       /*
-        * Watchdog threads won't be started if they are already active.
-        * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
-        * care of this. If those threads are already active, the sample
-        * period will be updated and the lockup detectors will be enabled
-        * or disabled 'on the fly'.
-        */
-       if (watchdog_enabled && watchdog_thresh)
-               err = watchdog_enable_all_cpus();
-       else
-               watchdog_disable_all_cpus();
-
-       watchdog_nmi_reconfigure();
-
-       return err;
-
+       /* Remove impossible cpus to keep sysctl output clean. */
+       cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
+       lockup_detector_reconfigure();
 }
 
 /*
  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
  *
- * caller             | table->data points to | 'which' contains the flag(s)
- * -------------------|-----------------------|-----------------------------
- * proc_watchdog      | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
- *                    |                       | with SOFT_WATCHDOG_ENABLED
- * -------------------|-----------------------|-----------------------------
- * proc_nmi_watchdog  | nmi_watchdog_enabled  | NMI_WATCHDOG_ENABLED
- * -------------------|-----------------------|-----------------------------
- * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
+ * caller             | table->data points to      | 'which'
+ * -------------------|----------------------------|--------------------------
+ * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
+ *                    |                            | SOFT_WATCHDOG_ENABLED
+ * -------------------|----------------------------|--------------------------
+ * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
+ * -------------------|----------------------------|--------------------------
+ * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
  */
 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int err, old, new;
-       int *watchdog_param = (int *)table->data;
+       int err, old, *param = table->data;
 
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
+       mutex_lock(&watchdog_mutex);
 
-       if (watchdog_suspended) {
-               /* no parameter changes allowed while watchdog is suspended */
-               err = -EAGAIN;
-               goto out;
-       }
-
-       /*
-        * If the parameter is being read return the state of the corresponding
-        * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
-        * run state of the lockup detectors.
-        */
        if (!write) {
-               *watchdog_param = (watchdog_enabled & which) != 0;
+               /*
+                * On read synchronize the userspace interface. This is a
+                * racy snapshot.
+                */
+               *param = (watchdog_enabled & which) != 0;
                err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        } else {
+               old = READ_ONCE(*param);
                err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-               if (err)
-                       goto out;
-
-               /*
-                * There is a race window between fetching the current value
-                * from 'watchdog_enabled' and storing the new value. During
-                * this race window, watchdog_nmi_enable() can sneak in and
-                * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
-                * The 'cmpxchg' detects this race and the loop retries.
-                */
-               do {
-                       old = watchdog_enabled;
-                       /*
-                        * If the parameter value is not zero set the
-                        * corresponding bit(s), else clear it(them).
-                        */
-                       if (*watchdog_param)
-                               new = old | which;
-                       else
-                               new = old & ~which;
-               } while (cmpxchg(&watchdog_enabled, old, new) != old);
-
-               /*
-                * Update the run state of the lockup detectors. There is _no_
-                * need to check the value returned by proc_watchdog_update()
-                * and to restore the previous value of 'watchdog_enabled' as
-                * both lockup detectors are disabled if proc_watchdog_update()
-                * returns an error.
-                */
-               if (old == new)
-                       goto out;
-
-               err = proc_watchdog_update();
+               if (!err && old != READ_ONCE(*param))
+                       proc_watchdog_update();
        }
-out:
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       mutex_unlock(&watchdog_mutex);
        return err;
 }
 
@@ -835,6 +713,8 @@ int proc_watchdog(struct ctl_table *table, int write,
 int proc_nmi_watchdog(struct ctl_table *table, int write,
                      void __user *buffer, size_t *lenp, loff_t *ppos)
 {
+       if (!nmi_watchdog_available && write)
+               return -ENOTSUPP;
        return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
                                    table, write, buffer, lenp, ppos);
 }
@@ -855,39 +735,17 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
 int proc_watchdog_thresh(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int err, old, new;
-
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
+       int err, old;
 
-       if (watchdog_suspended) {
-               /* no parameter changes allowed while watchdog is suspended */
-               err = -EAGAIN;
-               goto out;
-       }
+       mutex_lock(&watchdog_mutex);
 
-       old = ACCESS_ONCE(watchdog_thresh);
+       old = READ_ONCE(watchdog_thresh);
        err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
-       if (err || !write)
-               goto out;
-
-       /*
-        * Update the sample period. Restore on failure.
-        */
-       new = ACCESS_ONCE(watchdog_thresh);
-       if (old == new)
-               goto out;
+       if (!err && write && old != READ_ONCE(watchdog_thresh))
+               proc_watchdog_update();
 
-       set_sample_period();
-       err = proc_watchdog_update();
-       if (err) {
-               watchdog_thresh = old;
-               set_sample_period();
-       }
-out:
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       mutex_unlock(&watchdog_mutex);
        return err;
 }
 
@@ -902,45 +760,19 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
 {
        int err;
 
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
-
-       if (watchdog_suspended) {
-               /* no parameter changes allowed while watchdog is suspended */
-               err = -EAGAIN;
-               goto out;
-       }
+       mutex_lock(&watchdog_mutex);
 
        err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
-       if (!err && write) {
-               /* Remove impossible cpus to keep sysctl output cleaner. */
-               cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
-                           cpu_possible_mask);
-
-               if (watchdog_running) {
-                       /*
-                        * Failure would be due to being unable to allocate
-                        * a temporary cpumask, so we are likely not in a
-                        * position to do much else to make things better.
-                        */
-                       if (watchdog_update_cpus() != 0)
-                               pr_err("cpumask update failed\n");
-               }
+       if (!err && write)
+               proc_watchdog_update();
 
-               watchdog_nmi_reconfigure();
-       }
-out:
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       mutex_unlock(&watchdog_mutex);
        return err;
 }
-
 #endif /* CONFIG_SYSCTL */
 
 void __init lockup_detector_init(void)
 {
-       set_sample_period();
-
 #ifdef CONFIG_NO_HZ_FULL
        if (tick_nohz_full_enabled()) {
                pr_info("Disabling watchdog on nohz_full cores by default\n");
@@ -951,6 +783,7 @@ void __init lockup_detector_init(void)
        cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
 #endif
 
-       if (watchdog_enabled)
-               watchdog_enable_all_cpus();
+       if (!watchdog_nmi_probe())
+               nmi_watchdog_available = true;
+       lockup_detector_setup();
 }
index 3a09ea1b1d3d5e6e284d058052403ac1396804ca..71a62ceacdc883aa77a711e4b1bfba8c3185dceb 100644 (file)
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static struct cpumask dead_events_mask;
 
 static unsigned long hardlockup_allcpu_dumped;
+static unsigned int watchdog_cpus;
 
 void arch_touch_nmi_watchdog(void)
 {
@@ -103,15 +105,12 @@ static struct perf_event_attr wd_hw_attr = {
 
 /* Callback function for perf event subsystem */
 static void watchdog_overflow_callback(struct perf_event *event,
-                struct perf_sample_data *data,
-                struct pt_regs *regs)
+                                      struct perf_sample_data *data,
+                                      struct pt_regs *regs)
 {
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
-       if (atomic_read(&watchdog_park_in_progress) != 0)
-               return;
-
        if (__this_cpu_read(watchdog_nmi_touch) == true) {
                __this_cpu_write(watchdog_nmi_touch, false);
                return;
@@ -160,104 +159,131 @@ static void watchdog_overflow_callback(struct perf_event *event,
        return;
 }
 
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long firstcpu_err;
-static atomic_t watchdog_cpus;
-
-int watchdog_nmi_enable(unsigned int cpu)
+static int hardlockup_detector_event_create(void)
 {
+       unsigned int cpu = smp_processor_id();
        struct perf_event_attr *wd_attr;
-       struct perf_event *event = per_cpu(watchdog_ev, cpu);
-       int firstcpu = 0;
-
-       /* nothing to do if the hard lockup detector is disabled */
-       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-               goto out;
-
-       /* is it already setup and enabled? */
-       if (event && event->state > PERF_EVENT_STATE_OFF)
-               goto out;
-
-       /* it is setup but not enabled */
-       if (event != NULL)
-               goto out_enable;
-
-       if (atomic_inc_return(&watchdog_cpus) == 1)
-               firstcpu = 1;
+       struct perf_event *evt;
 
        wd_attr = &wd_hw_attr;
        wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
 
        /* Try to register using hardware perf events */
-       event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+       evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
+                                              watchdog_overflow_callback, NULL);
+       if (IS_ERR(evt)) {
+               pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
+                       PTR_ERR(evt));
+               return PTR_ERR(evt);
+       }
+       this_cpu_write(watchdog_ev, evt);
+       return 0;
+}
 
-       /* save the first cpu's error for future comparision */
-       if (firstcpu && IS_ERR(event))
-               firstcpu_err = PTR_ERR(event);
+/**
+ * hardlockup_detector_perf_enable - Enable the local event
+ */
+void hardlockup_detector_perf_enable(void)
+{
+       if (hardlockup_detector_event_create())
+               return;
 
-       if (!IS_ERR(event)) {
-               /* only print for the first cpu initialized */
-               if (firstcpu || firstcpu_err)
-                       pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
-               goto out_save;
-       }
+       if (!watchdog_cpus++)
+               pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
 
-       /*
-        * Disable the hard lockup detector if _any_ CPU fails to set up
-        * set up the hardware perf event. The watchdog() function checks
-        * the NMI_WATCHDOG_ENABLED bit periodically.
-        *
-        * The barriers are for syncing up watchdog_enabled across all the
-        * cpus, as clear_bit() does not use barriers.
-        */
-       smp_mb__before_atomic();
-       clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
-       smp_mb__after_atomic();
-
-       /* skip displaying the same error again */
-       if (!firstcpu && (PTR_ERR(event) == firstcpu_err))
-               return PTR_ERR(event);
-
-       /* vary the KERN level based on the returned errno */
-       if (PTR_ERR(event) == -EOPNOTSUPP)
-               pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
-       else if (PTR_ERR(event) == -ENOENT)
-               pr_warn("disabled (cpu%i): hardware events not enabled\n",
-                        cpu);
-       else
-               pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
-                       cpu, PTR_ERR(event));
-
-       pr_info("Shutting down hard lockup detector on all cpus\n");
-
-       return PTR_ERR(event);
-
-       /* success path */
-out_save:
-       per_cpu(watchdog_ev, cpu) = event;
-out_enable:
-       perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
-       return 0;
+       perf_event_enable(this_cpu_read(watchdog_ev));
 }
 
-void watchdog_nmi_disable(unsigned int cpu)
+/**
+ * hardlockup_detector_perf_disable - Disable the local event
+ */
+void hardlockup_detector_perf_disable(void)
 {
-       struct perf_event *event = per_cpu(watchdog_ev, cpu);
+       struct perf_event *event = this_cpu_read(watchdog_ev);
 
        if (event) {
                perf_event_disable(event);
+               cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
+               watchdog_cpus--;
+       }
+}
+
+/**
+ * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
+ *
+ * Called from lockup_detector_cleanup(). Serialized by the caller.
+ */
+void hardlockup_detector_perf_cleanup(void)
+{
+       int cpu;
+
+       for_each_cpu(cpu, &dead_events_mask) {
+               struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+               /*
+                * Required because for_each_cpu() reports  unconditionally
+                * CPU0 as set on UP kernels. Sigh.
+                */
+               if (event)
+                       perf_event_release_kernel(event);
                per_cpu(watchdog_ev, cpu) = NULL;
+       }
+       cpumask_clear(&dead_events_mask);
+}
+
+/**
+ * hardlockup_detector_perf_stop - Globally stop watchdog events
+ *
+ * Special interface for x86 to handle the perf HT bug.
+ */
+void __init hardlockup_detector_perf_stop(void)
+{
+       int cpu;
+
+       lockdep_assert_cpus_held();
+
+       for_each_online_cpu(cpu) {
+               struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+               if (event)
+                       perf_event_disable(event);
+       }
+}
 
-               /* should be in cleanup, but blocks oprofile */
-               perf_event_release_kernel(event);
+/**
+ * hardlockup_detector_perf_restart - Globally restart watchdog events
+ *
+ * Special interface for x86 to handle the perf HT bug.
+ */
+void __init hardlockup_detector_perf_restart(void)
+{
+       int cpu;
+
+       lockdep_assert_cpus_held();
+
+       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+               return;
+
+       for_each_online_cpu(cpu) {
+               struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+               if (event)
+                       perf_event_enable(event);
+       }
+}
+
+/**
+ * hardlockup_detector_perf_init - Probe whether NMI event is available at all
+ */
+int __init hardlockup_detector_perf_init(void)
+{
+       int ret = hardlockup_detector_event_create();
 
-               /* watchdog_nmi_enable() expects this to be zero initially. */
-               if (atomic_dec_and_test(&watchdog_cpus))
-                       firstcpu_err = 0;
+       if (ret) {
+               pr_info("Perf NMI watchdog permanently disabled\n");
+       } else {
+               perf_event_release_kernel(this_cpu_read(watchdog_ev));
+               this_cpu_write(watchdog_ev, NULL);
        }
+       return ret;
 }
index 64d0edf428f850f2e5cfed94970cb74491eb6b61..a2dccfe1acec34bbda97a292344b997055e56d9a 100644 (file)
@@ -68,6 +68,7 @@ enum {
         * attach_mutex to avoid changing binding state while
         * worker_attach_to_pool() is in progress.
         */
+       POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
 
        /* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
                                                /* L: hash of busy workers */
 
        /* see manage_workers() for details on the two manager mutexes */
-       struct mutex            manager_arb;    /* manager arbitration */
        struct worker           *manager;       /* L: purely informational */
        struct mutex            attach_mutex;   /* attach/detach exclusion */
        struct list_head        workers;        /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 
 static DEFINE_MUTEX(wq_pool_mutex);    /* protects pools and workqueues list */
 static DEFINE_SPINLOCK(wq_mayday_lock);        /* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 
 static LIST_HEAD(workqueues);          /* PR: list of all workqueues */
 static bool workqueue_freezing;                /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-       bool managing = mutex_is_locked(&pool->manager_arb);
+       bool managing = pool->flags & POOL_MANAGER_ACTIVE;
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
 
-       /*
-        * Anyone who successfully grabs manager_arb wins the arbitration
-        * and becomes the manager.  mutex_trylock() on pool->manager_arb
-        * failure while holding pool->lock reliably indicates that someone
-        * else is managing the pool and the worker which failed trylock
-        * can proceed to executing work items.  This means that anyone
-        * grabbing manager_arb is responsible for actually performing
-        * manager duties.  If manager_arb is grabbed and released without
-        * actual management, the pool may stall indefinitely.
-        */
-       if (!mutex_trylock(&pool->manager_arb))
+       if (pool->flags & POOL_MANAGER_ACTIVE)
                return false;
+
+       pool->flags |= POOL_MANAGER_ACTIVE;
        pool->manager = worker;
 
        maybe_create_worker(pool);
 
        pool->manager = NULL;
-       mutex_unlock(&pool->manager_arb);
+       pool->flags &= ~POOL_MANAGER_ACTIVE;
+       wake_up(&wq_manager_wait);
        return true;
 }
 
@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool)
        setup_timer(&pool->mayday_timer, pool_mayday_timeout,
                    (unsigned long)pool);
 
-       mutex_init(&pool->manager_arb);
        mutex_init(&pool->attach_mutex);
        INIT_LIST_HEAD(&pool->workers);
 
@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool)
        hash_del(&pool->hash_node);
 
        /*
-        * Become the manager and destroy all workers.  Grabbing
-        * manager_arb prevents @pool's workers from blocking on
-        * attach_mutex.
+        * Become the manager and destroy all workers.  This prevents
+        * @pool's workers from blocking on attach_mutex.  We're the last
+        * manager and @pool gets freed with the flag set.
         */
-       mutex_lock(&pool->manager_arb);
-
        spin_lock_irq(&pool->lock);
+       wait_event_lock_irq(wq_manager_wait,
+                           !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+       pool->flags |= POOL_MANAGER_ACTIVE;
+
        while ((worker = first_idle_worker(pool)))
                destroy_worker(worker);
        WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool)
        if (pool->detach_completion)
                wait_for_completion(pool->detach_completion);
 
-       mutex_unlock(&pool->manager_arb);
-
        /* shut down the timers */
        del_timer_sync(&pool->idle_timer);
        del_timer_sync(&pool->mayday_timer);
index b19c491cbc4e7934ab5bc78549c95423b91b2f7a..dfdad67d8f6cce470171986addcad7b61c09f382 100644 (file)
@@ -219,7 +219,8 @@ config FRAME_WARN
        range 0 8192
        default 0 if KASAN
        default 2048 if GCC_PLUGIN_LATENT_ENTROPY
-       default 1024 if !64BIT
+       default 1280 if (!64BIT && PARISC)
+       default 1024 if (!64BIT && !PARISC)
        default 2048 if 64BIT
        help
          Tell gcc to warn at build time for stack frames larger than this.
@@ -1091,8 +1092,8 @@ config PROVE_LOCKING
        select DEBUG_MUTEXES
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_LOCK_ALLOC
-       select LOCKDEP_CROSSRELEASE
-       select LOCKDEP_COMPLETIONS
+       select LOCKDEP_CROSSRELEASE if BROKEN
+       select LOCKDEP_COMPLETIONS if BROKEN
        select TRACE_IRQFLAGS
        default n
        help
@@ -1589,6 +1590,54 @@ config LATENCYTOP
 
 source kernel/trace/Kconfig
 
+config PROVIDE_OHCI1394_DMA_INIT
+       bool "Remote debugging over FireWire early on boot"
+       depends on PCI && X86
+       help
+         If you want to debug problems which hang or crash the kernel early
+         on boot and the crashing machine has a FireWire port, you can use
+         this feature to remotely access the memory of the crashed machine
+         over FireWire. This employs remote DMA as part of the OHCI1394
+         specification which is now the standard for FireWire controllers.
+
+         With remote DMA, you can monitor the printk buffer remotely using
+         firescope and access all memory below 4GB using fireproxy from gdb.
+         Even controlling a kernel debugger is possible using remote DMA.
+
+         Usage:
+
+         If ohci1394_dma=early is used as boot parameter, it will initialize
+         all OHCI1394 controllers which are found in the PCI config space.
+
+         As all changes to the FireWire bus such as enabling and disabling
+         devices cause a bus reset and thereby disable remote DMA for all
+         devices, be sure to have the cable plugged and FireWire enabled on
+         the debugging host before booting the debug target for debugging.
+
+         This code (~1k) is freed after boot. By then, the firewire stack
+         in charge of the OHCI-1394 controllers should be used instead.
+
+         See Documentation/debugging-via-ohci1394.txt for more information.
+
+config DMA_API_DEBUG
+       bool "Enable debugging of DMA-API usage"
+       depends on HAVE_DMA_API_DEBUG
+       help
+         Enable this option to debug the use of the DMA API by device drivers.
+         With this option you will be able to detect common bugs in device
+         drivers like double-freeing of DMA mappings or freeing mappings that
+         were never allocated.
+
+         This also attempts to catch cases where a page owned by DMA is
+         accessed by the cpu in a way that could cause data corruption.  For
+         example, this enables cow_user_page() to check that the source page is
+         not undergoing DMA.
+
+         This option causes a performance degradation.  Use only if you want to
+         debug device drivers and dma interactions.
+
+         If unsure, say N.
+
 menu "Runtime Testing"
 
 config LKDTM
@@ -1748,56 +1797,6 @@ config TEST_PARMAN
 
          If unsure, say N.
 
-endmenu # runtime tests
-
-config PROVIDE_OHCI1394_DMA_INIT
-       bool "Remote debugging over FireWire early on boot"
-       depends on PCI && X86
-       help
-         If you want to debug problems which hang or crash the kernel early
-         on boot and the crashing machine has a FireWire port, you can use
-         this feature to remotely access the memory of the crashed machine
-         over FireWire. This employs remote DMA as part of the OHCI1394
-         specification which is now the standard for FireWire controllers.
-
-         With remote DMA, you can monitor the printk buffer remotely using
-         firescope and access all memory below 4GB using fireproxy from gdb.
-         Even controlling a kernel debugger is possible using remote DMA.
-
-         Usage:
-
-         If ohci1394_dma=early is used as boot parameter, it will initialize
-         all OHCI1394 controllers which are found in the PCI config space.
-
-         As all changes to the FireWire bus such as enabling and disabling
-         devices cause a bus reset and thereby disable remote DMA for all
-         devices, be sure to have the cable plugged and FireWire enabled on
-         the debugging host before booting the debug target for debugging.
-
-         This code (~1k) is freed after boot. By then, the firewire stack
-         in charge of the OHCI-1394 controllers should be used instead.
-
-         See Documentation/debugging-via-ohci1394.txt for more information.
-
-config DMA_API_DEBUG
-       bool "Enable debugging of DMA-API usage"
-       depends on HAVE_DMA_API_DEBUG
-       help
-         Enable this option to debug the use of the DMA API by device drivers.
-         With this option you will be able to detect common bugs in device
-         drivers like double-freeing of DMA mappings or freeing mappings that
-         were never allocated.
-
-         This also attempts to catch cases where a page owned by DMA is
-         accessed by the cpu in a way that could cause data corruption.  For
-         example, this enables cow_user_page() to check that the source page is
-         not undergoing DMA.
-
-         This option causes a performance degradation.  Use only if you want to
-         debug device drivers and dma interactions.
-
-         If unsure, say N.
-
 config TEST_LKM
        tristate "Test module loading with 'hello world' module"
        default n
@@ -1872,18 +1871,6 @@ config TEST_UDELAY
 
          If unsure, say N.
 
-config MEMTEST
-       bool "Memtest"
-       depends on HAVE_MEMBLOCK
-       ---help---
-         This option adds a kernel parameter 'memtest', which allows memtest
-         to be set.
-               memtest=0, mean disabled; -- default
-               memtest=1, mean do 1 test pattern;
-               ...
-               memtest=17, mean do 17 test patterns.
-         If you are unsure how to answer this question, answer N.
-
 config TEST_STATIC_KEYS
        tristate "Test static keys"
        default n
@@ -1893,16 +1880,6 @@ config TEST_STATIC_KEYS
 
          If unsure, say N.
 
-config BUG_ON_DATA_CORRUPTION
-       bool "Trigger a BUG when data corruption is detected"
-       select DEBUG_LIST
-       help
-         Select this option if the kernel should BUG when it encounters
-         data corruption in kernel memory structures when they get checked
-         for validity.
-
-         If unsure, say N.
-
 config TEST_KMOD
        tristate "kmod stress tester"
        default n
@@ -1940,6 +1917,29 @@ config TEST_DEBUG_VIRTUAL
 
          If unsure, say N.
 
+endmenu # runtime tests
+
+config MEMTEST
+       bool "Memtest"
+       depends on HAVE_MEMBLOCK
+       ---help---
+         This option adds a kernel parameter 'memtest', which allows memtest
+         to be set.
+               memtest=0, mean disabled; -- default
+               memtest=1, mean do 1 test pattern;
+               ...
+               memtest=17, mean do 17 test patterns.
+         If you are unsure how to answer this question, answer N.
+
+config BUG_ON_DATA_CORRUPTION
+       bool "Trigger a BUG when data corruption is detected"
+       select DEBUG_LIST
+       help
+         Select this option if the kernel should BUG when it encounters
+         data corruption in kernel memory structures when they get checked
+         for validity.
+
+         If unsure, say N.
 
 source "samples/Kconfig"
 
index 155c55d8db5fccb3ded0047d1fb1fcfef7673072..4e53be8bc590dc2030a930aec5a2cac8c4fa6a30 100644 (file)
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
                if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
                        goto all_leaves_cluster_together;
 
-               /* Otherwise we can just insert a new node ahead of the old
-                * one.
+               /* Otherwise all the old leaves cluster in the same slot, but
+                * the new leaf wants to go into a different slot - so we
+                * create a new node (n0) to hold the new leaf and a pointer to
+                * a new node (n1) holding all the old leaves.
+                *
+                * This can be done by falling through to the node splitting
+                * path.
                 */
-               goto present_leaves_cluster_but_not_new_leaf;
+               pr_devel("present leaves cluster but not new leaf\n");
        }
 
 split_node:
        pr_devel("split node\n");
 
-       /* We need to split the current node; we know that the node doesn't
-        * simply contain a full set of leaves that cluster together (it
-        * contains meta pointers and/or non-clustering leaves).
+       /* We need to split the current node.  The node must contain anything
+        * from a single leaf (in the one leaf case, this leaf will cluster
+        * with the new leaf) and the rest meta-pointers, to all leaves, some
+        * of which may cluster.
+        *
+        * It won't contain the case in which all the current leaves plus the
+        * new leaves want to cluster in the same slot.
         *
         * We need to expel at least two leaves out of a set consisting of the
-        * leaves in the node and the new leaf.
+        * leaves in the node and the new leaf.  The current meta pointers can
+        * just be copied as they shouldn't cluster with any of the leaves.
         *
         * We need a new node (n0) to replace the current one and a new node to
         * take the expelled nodes (n1).
@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
        pr_devel("<--%s() = ok [split node]\n", __func__);
        return true;
 
-present_leaves_cluster_but_not_new_leaf:
-       /* All the old leaves cluster in the same slot, but the new leaf wants
-        * to go into a different slot, so we create a new node to hold the new
-        * leaf and a pointer to a new node holding all the old leaves.
-        */
-       pr_devel("present leaves cluster but not new leaf\n");
-
-       new_n0->back_pointer = node->back_pointer;
-       new_n0->parent_slot = node->parent_slot;
-       new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
-       new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
-       new_n1->parent_slot = edit->segment_cache[0];
-       new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
-       edit->adjust_count_on = new_n0;
-
-       for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
-               new_n1->slots[i] = node->slots[i];
-
-       new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
-       edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-
-       edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
-       edit->set[0].to = assoc_array_node_to_ptr(new_n0);
-       edit->excised_meta[0] = assoc_array_node_to_ptr(node);
-       pr_devel("<--%s() = ok [insert node before]\n", __func__);
-       return true;
-
 all_leaves_cluster_together:
        /* All the leaves, new and old, want to cluster together in this node
         * in the same slot, so we have to replace this node with a shortcut to
index 03d7c63837aecb36f74037212e9e7dd421e2e4c9..6ba6fcd92dd10cd2c78898b35c65a0540b19bfb3 100644 (file)
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
        down_read(&key->sem);
        ukp = user_key_payload_locked(key);
 
+       if (!ukp) {
+               /* key was revoked before we acquired its semaphore */
+               err = -EKEYREVOKED;
+               goto err1;
+       }
+
        if (ukp->datalen < sizeof(*pkh))
                goto err1;
 
index f9adf4805fd740c3283c7b5b1361e12052869aba..edd9b2be1651fa99ae3a6274bda38c1bd7159a64 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -146,8 +146,8 @@ EXPORT_SYMBOL(idr_get_next_ext);
  * idr_alloc() and idr_remove() (as long as the ID being removed is not
  * the one being replaced!).
  *
- * Returns: 0 on success.  %-ENOENT indicates that @id was not found.
- * %-EINVAL indicates that @id or @ptr were not valid.
+ * Returns: the old value on success.  %-ENOENT indicates that @id was not
+ * found.  %-EINVAL indicates that @id or @ptr were not valid.
  */
 void *idr_replace(struct idr *idr, void *ptr, int id)
 {
index 52c8dd6d8e8290fa8df614e0017542a600b9c536..1c1c06ddc20a8a961d7d9f73e43d65b695636d5d 100644 (file)
@@ -687,8 +687,10 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
 
 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 {
-       size_t v = n + offset;
-       if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
+       struct page *head = compound_head(page);
+       size_t v = n + offset + page_address(page) - page_address(head);
+
+       if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
                return true;
        WARN_ON(1);
        return false;
index e590523ea4761425df5e112a2c2aab873dbaa90d..f237a09a58627bfd4df2628c173f57afd89a83de 100644 (file)
@@ -294,6 +294,26 @@ static void cleanup_uevent_env(struct subprocess_info *info)
 }
 #endif
 
+static void zap_modalias_env(struct kobj_uevent_env *env)
+{
+       static const char modalias_prefix[] = "MODALIAS=";
+       int i;
+
+       for (i = 0; i < env->envp_idx;) {
+               if (strncmp(env->envp[i], modalias_prefix,
+                           sizeof(modalias_prefix) - 1)) {
+                       i++;
+                       continue;
+               }
+
+               if (i != env->envp_idx - 1)
+                       memmove(&env->envp[i], &env->envp[i + 1],
+                               sizeof(env->envp[i]) * env->envp_idx - 1);
+
+               env->envp_idx--;
+       }
+}
+
 /**
  * kobject_uevent_env - send an uevent with environmental data
  *
@@ -409,16 +429,29 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
                }
        }
 
-       /*
-        * Mark "add" and "remove" events in the object to ensure proper
-        * events to userspace during automatic cleanup. If the object did
-        * send an "add" event, "remove" will automatically generated by
-        * the core, if not already done by the caller.
-        */
-       if (action == KOBJ_ADD)
+       switch (action) {
+       case KOBJ_ADD:
+               /*
+                * Mark "add" event so we can make sure we deliver "remove"
+                * event to userspace during automatic cleanup. If
+                * the object did send an "add" event, "remove" will
+                * automatically generated by the core, if not already done
+                * by the caller.
+                */
                kobj->state_add_uevent_sent = 1;
-       else if (action == KOBJ_REMOVE)
+               break;
+
+       case KOBJ_REMOVE:
                kobj->state_remove_uevent_sent = 1;
+               break;
+
+       case KOBJ_UNBIND:
+               zap_modalias_env(env);
+               break;
+
+       default:
+               break;
+       }
 
        mutex_lock(&uevent_sock_mutex);
        /* we will send an event, so request a new sequence number */
index cd0b5c964bd041853a74116780c4446c4dc355f9..2b827b8a1d8c9945dfa04dc94bfda82216e045f8 100644 (file)
@@ -2031,11 +2031,13 @@ void locking_selftest(void)
        print_testname("mixed read-lock/lock-write ABBA");
        pr_cont("             |");
        dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+#ifdef CONFIG_PROVE_LOCKING
        /*
         * Lockdep does indeed fail here, but there's nothing we can do about
         * that now.  Don't kill lockdep for it.
         */
        unexpected_testcase_failures--;
+#endif
 
        pr_cont("             |");
        dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
index bd3574312b827c606fe93c8d0a8a8c0ca0c6542d..141734d255e4b941f888e32ee53885c26cfc8fbd 100644 (file)
@@ -85,8 +85,8 @@ static FORCE_INLINE int LZ4_decompress_generic(
        const BYTE * const lowLimit = lowPrefix - dictSize;
 
        const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
-       const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
-       const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+       static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
+       static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
 
        const int safeDecode = (endOnInput == endOnInputSize);
        const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
index 08f8043cac619d4dc59efda9aa2830514ba1c199..d01f4713523904b6c1da5022eb273f227b129095 100644 (file)
@@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
        if (time_is_before_jiffies(rs->begin + rs->interval)) {
                if (rs->missed) {
                        if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
-                               pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
+                               printk_deferred(KERN_WARNING
+                                               "%s: %d callbacks suppressed\n",
+                                               func, rs->missed);
                                rs->missed = 0;
                        }
                }
index 707ca5d677c676a599442604d918c215d7709138..ddd7dde87c3ca0db910d67a567f2a68c8d8e847e 100644 (file)
@@ -735,9 +735,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  * rhashtable_walk_start - Start a hash table walk
  * @iter:      Hash table iterator
  *
- * Start a hash table walk.  Note that we take the RCU lock in all
- * cases including when we return an error.  So you must always call
- * rhashtable_walk_stop to clean up.
+ * Start a hash table walk at the current iterator position.  Note that we take
+ * the RCU lock in all cases including when we return an error.  So you must
+ * always call rhashtable_walk_stop to clean up.
  *
  * Returns zero if successful.
  *
@@ -846,7 +846,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  * rhashtable_walk_stop - Finish a hash table walk
  * @iter:      Hash table iterator
  *
- * Finish a hash table walk.
+ * Finish a hash table walk.  Does not reset the iterator to the start of the
+ * hash table.
  */
 void rhashtable_walk_stop(struct rhashtable_iter *iter)
        __releases(RCU)
index 5696a35184e4a3a086573c6020b9f32fcede5fe2..69557c74ef9f8e4dbe1994dce8d1099456717cf2 100644 (file)
@@ -11,7 +11,7 @@
  * ==========================================================================
  *
  *   A finite state machine consists of n states (struct ts_fsm_token)
- *   representing the pattern as a finite automation. The data is read
+ *   representing the pattern as a finite automaton. The data is read
  *   sequentially on an octet basis. Every state token specifies the number
  *   of recurrences and the type of value accepted which can be either a
  *   specific character or ctype based set of characters. The available
index 632f783e65f1cfef531dba412f0568fd9f7849e8..ffbe66cbb0ed60ce9c602ed3d2dc6e4b643155e3 100644 (file)
@@ -27,7 +27,7 @@
  *
  *   [1] Cormen, Leiserson, Rivest, Stein
  *       Introdcution to Algorithms, 2nd Edition, MIT Press
- *   [2] See finite automation theory
+ *   [2] See finite automaton theory
  */
 
 #include <linux/module.h>
index c0da318c020e6c6d666ac8cf7cb92b2d4a47ceae..022e52bd83703e50408cc170b5ba91bcdb038aff 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 
        trace_cma_alloc(pfn, page, count, align);
 
-       if (ret) {
+       if (ret && !(gfp_mask & __GFP_NOWARN)) {
                pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
                        __func__, count, ret);
                cma_debug_show_areas(cma);
index fb548e4c7bd4b44f426a8e45c5230508c57d36e6..03d31a87534160f23eadb0aefeb48feca89191ab 100644 (file)
@@ -1999,17 +1999,14 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
        if (pgdat->kcompactd_max_order < order)
                pgdat->kcompactd_max_order = order;
 
-       /*
-        * Pairs with implicit barrier in wait_event_freezable()
-        * such that wakeups are not missed in the lockless
-        * waitqueue_active() call.
-        */
-       smp_acquire__after_ctrl_dep();
-
        if (pgdat->kcompactd_classzone_idx > classzone_idx)
                pgdat->kcompactd_classzone_idx = classzone_idx;
 
-       if (!waitqueue_active(&pgdat->kcompactd_wait))
+       /*
+        * Pairs with implicit barrier in wait_event_freezable()
+        * such that wakeups are not missed.
+        */
+       if (!wq_has_sleeper(&pgdat->kcompactd_wait))
                return;
 
        if (!kcompactd_node_suitable(pgdat))
index 870971e209670c99a335bc05903f7f12326b9882..594d73fef8b43bae852f4f7ace1e8cfc46b23690 100644 (file)
@@ -620,6 +620,14 @@ int file_check_and_advance_wb_err(struct file *file)
                trace_file_check_and_advance_wb_err(file, old);
                spin_unlock(&file->f_lock);
        }
+
+       /*
+        * We're mostly using this function as a drop in replacement for
+        * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
+        * that the legacy code would have had on these flags.
+        */
+       clear_bit(AS_EIO, &mapping->flags);
+       clear_bit(AS_ENOSPC, &mapping->flags);
        return err;
 }
 EXPORT_SYMBOL(file_check_and_advance_wb_err);
@@ -2926,9 +2934,15 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
         * we're writing.  Either one is a pretty crazy thing to do,
         * so we don't support it 100%.  If this invalidation
         * fails, tough, the write still worked...
+        *
+        * Most of the time we do not need this since dio_complete() will do
+        * the invalidation for us. However there are some file systems that
+        * do not end up with dio_complete() being called, so let's not break
+        * them by removing it completely
         */
-       invalidate_inode_pages2_range(mapping,
-                               pos >> PAGE_SHIFT, end);
+       if (mapping->nrpages)
+               invalidate_inode_pages2_range(mapping,
+                                       pos >> PAGE_SHIFT, end);
 
        if (written > 0) {
                pos += written;
index 15dd7415f7b3f1a1b418db0e38b074771d2bec2f..6cb60f46cce55761b0ff9d3523be69a706523972 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
  */
 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
 {
+       struct mm_struct *mm = rmap_item->mm;
        struct rmap_item *tree_rmap_item;
        struct page *tree_page = NULL;
        struct stable_node *stable_node;
@@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
        if (ksm_use_zero_pages && (checksum == zero_checksum)) {
                struct vm_area_struct *vma;
 
-               vma = find_mergeable_vma(rmap_item->mm, rmap_item->address);
+               down_read(&mm->mmap_sem);
+               vma = find_mergeable_vma(mm, rmap_item->address);
                err = try_to_merge_one_page(vma, page,
                                            ZERO_PAGE(rmap_item->address));
+               up_read(&mm->mmap_sem);
                /*
                 * In case of failure, the page was not really empty, so we
                 * need to continue. Otherwise we're done.
index 7a40fa2be858acbc79cc79d887c1af575a3d2026..f141f0c80ff338f9c2a5a5244afca23440f9bdc3 100644 (file)
@@ -325,12 +325,12 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru)
 {
        int size = memcg_nr_cache_ids;
 
-       nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
+       nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL);
        if (!nlru->memcg_lrus)
                return -ENOMEM;
 
        if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
-               kfree(nlru->memcg_lrus);
+               kvfree(nlru->memcg_lrus);
                return -ENOMEM;
        }
 
@@ -340,7 +340,7 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru)
 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
 {
        __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
-       kfree(nlru->memcg_lrus);
+       kvfree(nlru->memcg_lrus);
 }
 
 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
@@ -351,12 +351,12 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
        BUG_ON(old_size > new_size);
 
        old = nlru->memcg_lrus;
-       new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
+       new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
        if (__memcg_init_list_lru_node(new, old_size, new_size)) {
-               kfree(new);
+               kvfree(new);
                return -ENOMEM;
        }
 
@@ -373,7 +373,7 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
        nlru->memcg_lrus = new;
        spin_unlock_irq(&nlru->lock);
 
-       kfree(old);
+       kvfree(old);
        return 0;
 }
 
index 21261ff0466fb99d1254ee3927dd1730642d5356..fd70d6aabc3e82b7ead501e2f834e043908a959e 100644 (file)
@@ -625,18 +625,26 @@ static int madvise_inject_error(int behavior,
 {
        struct page *page;
        struct zone *zone;
+       unsigned int order;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       for (; start < end; start += PAGE_SIZE <<
-                               compound_order(compound_head(page))) {
+
+       for (; start < end; start += PAGE_SIZE << order) {
                int ret;
 
                ret = get_user_pages_fast(start, 1, 0, &page);
                if (ret != 1)
                        return ret;
 
+               /*
+                * When soft offlining hugepages, after migrating the page
+                * we dissolve it, therefore in the second loop "page" will
+                * no longer be a compound page, and order will be 0.
+                */
+               order = compound_order(compound_head(page));
+
                if (PageHWPoison(page)) {
                        put_page(page);
                        continue;
@@ -749,6 +757,9 @@ madvise_behavior_valid(int behavior)
  *  MADV_DONTFORK - omit this area from child's address space when forking:
  *             typically, to avoid COWing pages pinned by get_user_pages().
  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
+ *  MADV_WIPEONFORK - present the child process with zero-filled memory in this
+ *              range after a fork.
+ *  MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
  *  MADV_HWPOISON - trigger memory error handler as if the given memory range
  *             were corrupted by unrecoverable hardware memory failure.
  *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
@@ -769,7 +780,9 @@ madvise_behavior_valid(int behavior)
  *  zero    - success
  *  -EINVAL - start + len < 0, start is not page-aligned,
  *             "behavior" is not a valid value, or application
- *             is attempting to release locked or shared pages.
+ *             is attempting to release locked or shared pages,
+ *             or the specified address range includes file, Huge TLB,
+ *             MAP_SHARED or VMPFNMAP range.
  *  -ENOMEM - addresses in the specified range are not currently
  *             mapped, or are outside the AS of the process.
  *  -EIO    - an I/O error occurred while paging in data.
index 15af3da5af02f6acbccff3551a71461a4f4396d5..661f046ad3181f65eccfd9bf3832e395e27aa226 100644 (file)
@@ -1777,6 +1777,10 @@ static void drain_local_stock(struct work_struct *dummy)
        struct memcg_stock_pcp *stock;
        unsigned long flags;
 
+       /*
+        * The only protection from memory hotplug vs. drain_stock races is
+        * that we always operate on local CPU stock here with IRQ disabled
+        */
        local_irq_save(flags);
 
        stock = this_cpu_ptr(&memcg_stock);
@@ -1821,27 +1825,33 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
        /* If someone's already draining, avoid adding running more workers. */
        if (!mutex_trylock(&percpu_charge_mutex))
                return;
-       /* Notify other cpus that system-wide "drain" is running */
-       get_online_cpus();
+       /*
+        * Notify other cpus that system-wide "drain" is running
+        * We do not care about races with the cpu hotplug because cpu down
+        * as well as workers from this path always operate on the local
+        * per-cpu data. CPU up doesn't touch memcg_stock at all.
+        */
        curcpu = get_cpu();
        for_each_online_cpu(cpu) {
                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
                struct mem_cgroup *memcg;
 
                memcg = stock->cached;
-               if (!memcg || !stock->nr_pages)
+               if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
                        continue;
-               if (!mem_cgroup_is_descendant(memcg, root_memcg))
+               if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
+                       css_put(&memcg->css);
                        continue;
+               }
                if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
                        if (cpu == curcpu)
                                drain_local_stock(&stock->work);
                        else
                                schedule_work_on(cpu, &stock->work);
                }
+               css_put(&memcg->css);
        }
        put_cpu();
-       put_online_cpus();
        mutex_unlock(&percpu_charge_mutex);
 }
 
@@ -5648,7 +5658,8 @@ static void uncharge_batch(const struct uncharge_gather *ug)
 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
 {
        VM_BUG_ON_PAGE(PageLRU(page), page);
-       VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
+       VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
+                       !PageHWPoison(page) , page);
 
        if (!page->mem_cgroup)
                return;
@@ -5817,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
        if (!mem_cgroup_sockets_enabled)
                return;
 
-       /*
-        * Socket cloning can throw us here with sk_memcg already
-        * filled. It won't however, necessarily happen from
-        * process context. So the test for root memcg given
-        * the current task's memcg won't help us in this case.
-        *
-        * Respecting the original socket's memcg is a better
-        * decision in this case.
-        */
-       if (sk->sk_memcg) {
-               BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
-               css_get(&sk->sk_memcg->css);
-               return;
-       }
-
        rcu_read_lock();
        memcg = mem_cgroup_from_task(current);
        if (memcg == root_mem_cgroup)
index ec4e15494901665f99329f2ad094cd3ed3ceed2e..a728bed16c206902de6498921a1d130d141ff7b7 100644 (file)
@@ -845,7 +845,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                 * vm_normal_page() so that we do not have to special case all
                 * call site of vm_normal_page().
                 */
-               if (likely(pfn < highest_memmap_pfn)) {
+               if (likely(pfn <= highest_memmap_pfn)) {
                        struct page *page = pfn_to_page(pfn);
 
                        if (is_device_public_page(page)) {
index e882cb6da99425bad30e4017c5feeadacba2e8ce..d4b5f29906b96465207df76897739d2eba518886 100644 (file)
@@ -328,6 +328,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
                if (err && (err != -EEXIST))
                        break;
                err = 0;
+               cond_resched();
        }
        vmemmap_populate_print_last();
 out:
@@ -337,7 +338,7 @@ EXPORT_SYMBOL_GPL(__add_pages);
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
-static int find_smallest_section_pfn(int nid, struct zone *zone,
+static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
                                     unsigned long start_pfn,
                                     unsigned long end_pfn)
 {
@@ -362,7 +363,7 @@ static int find_smallest_section_pfn(int nid, struct zone *zone,
 }
 
 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
-static int find_biggest_section_pfn(int nid, struct zone *zone,
+static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
                                    unsigned long start_pfn,
                                    unsigned long end_pfn)
 {
@@ -550,7 +551,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
                return ret;
 
        scn_nr = __section_nr(ms);
-       start_pfn = section_nr_to_pfn(scn_nr);
+       start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
        __remove_zone(zone, start_pfn);
 
        sparse_remove_one_section(zone, ms, map_offset);
index 006ba625c0b8d4edb6b3ed2b20ce307c254b42be..a2af6d58a68fc087feddf1181ecb511bde86cecd 100644 (file)
@@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
        struct page *page;
 
        page = __alloc_pages(gfp, order, nid);
-       if (page && page_to_nid(page) == nid)
-               inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
+       if (page && page_to_nid(page) == nid) {
+               preempt_disable();
+               __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
+               preempt_enable();
+       }
        return page;
 }
 
index 6954c1435833133f910a08a9cd8e0e1516084296..e00814ca390ea46dab6174e3c7c183e7bd4faf73 100644 (file)
@@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start,
        unsigned long addr;
 
        for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
-               migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE;
+               migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
                migrate->dst[migrate->npages] = 0;
+               migrate->npages++;
                migrate->cpages++;
        }
 
index 99736e026712c42c73c676c5e1889f86d7d05d53..dee0f75c301337af62156d2ae46d5c5391cc6127 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/ratelimit.h>
 #include <linux/kthread.h>
 #include <linux/init.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlb.h>
 #include "internal.h"
@@ -494,6 +495,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
                goto unlock_oom;
        }
 
+       /*
+        * If the mm has notifiers then we would need to invalidate them around
+        * unmap_page_range and that is risky because notifiers can sleep and
+        * what they do is basically undeterministic.  So let's have a short
+        * sleep to give the oom victim some more time.
+        * TODO: we really want to get rid of this ugly hack and make sure that
+        * notifiers cannot block for unbounded amount of time and add
+        * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
+        */
+       if (mm_has_notifiers(mm)) {
+               up_read(&mm->mmap_sem);
+               schedule_timeout_idle(HZ);
+               goto unlock_oom;
+       }
+
        /*
         * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
         * work on the mm anymore. The check for MMF_OOM_SKIP must run
index c841af88836ad63d548a2f007220203e4a8ddf9a..77e4d3c5c57b72dcd7e411a03707c26dc85c7c04 100644 (file)
@@ -1190,7 +1190,7 @@ static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
 }
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-static void init_reserved_page(unsigned long pfn)
+static void __meminit init_reserved_page(unsigned long pfn)
 {
        pg_data_t *pgdat;
        int nid, zid;
@@ -5367,6 +5367,7 @@ not_early:
 
                        __init_single_page(page, pfn, zone, nid);
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+                       cond_resched();
                } else {
                        __init_single_pfn(pfn, zone, nid);
                }
index 6a03946469a99eb535851194f519893a5e8a2d11..53afbb919a1c858734513a43360ec8ef7d7bb5a8 100644 (file)
@@ -6,17 +6,6 @@
 
 #include "internal.h"
 
-static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
-{
-       pmd_t pmde;
-       /*
-        * Make sure we don't re-load pmd between present and !trans_huge check.
-        * We need a consistent view.
-        */
-       pmde = READ_ONCE(*pvmw->pmd);
-       return pmd_present(pmde) && !pmd_trans_huge(pmde);
-}
-
 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 {
        page_vma_mapped_walk_done(pvmw);
@@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
        pgd_t *pgd;
        p4d_t *p4d;
        pud_t *pud;
+       pmd_t pmde;
 
        /* The only possible pmd mapping has been handled on last iteration */
        if (pvmw->pmd && !pvmw->pte)
@@ -148,7 +138,13 @@ restart:
        if (!pud_present(*pud))
                return false;
        pvmw->pmd = pmd_offset(pud, pvmw->address);
-       if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) {
+       /*
+        * Make sure the pmd value isn't cached in a register by the
+        * compiler and used as a stale value after we've observed a
+        * subsequent update.
+        */
+       pmde = READ_ONCE(*pvmw->pmd);
+       if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
                pvmw->ptl = pmd_lock(mm, pvmw->pmd);
                if (likely(pmd_trans_huge(*pvmw->pmd))) {
                        if (pvmw->flags & PVMW_MIGRATION)
@@ -167,17 +163,15 @@ restart:
                                                return not_found(pvmw);
                                        return true;
                                }
-                       } else
-                               WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+                       }
                        return not_found(pvmw);
                } else {
                        /* THP pmd was split under us: handle on pte level */
                        spin_unlock(pvmw->ptl);
                        pvmw->ptl = NULL;
                }
-       } else {
-               if (!check_pmd(pvmw))
-                       return false;
+       } else if (!pmd_present(pmde)) {
+               return false;
        }
        if (!map_pte(pvmw))
                goto next_pte;
index 6142484e88f79c3e946b30de136a2886c075ffe7..7a58460bfd27a303c7ab4d3c4f10abac02c15603 100644 (file)
@@ -73,7 +73,7 @@ static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
                     last_alloc + 1 : 0;
 
        as_len = 0;
-       start = chunk->start_offset;
+       start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
 
        /*
         * If a bit is set in the allocation map, the bound_map identifies
index 59d44d61f5f198b26be07ff264395a6b30cc091d..a0e0c82c1e4cd22324e87a86a5df7301f35b8a83 100644 (file)
@@ -353,6 +353,8 @@ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
                                        block->contig_hint_start);
                        return;
                }
+               /* reset to satisfy the second predicate above */
+               block_off = 0;
 
                *bits = block->right_free;
                *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
@@ -407,6 +409,8 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
                        *bit_off = pcpu_block_off_to_off(i, block->first_free);
                        return;
                }
+               /* reset to satisfy the second predicate above */
+               block_off = 0;
 
                *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
                                 align);
@@ -1325,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
  * @gfp: allocation flags
  *
  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
- * contain %GFP_KERNEL, the allocation is atomic.
+ * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
+ * then no warning will be triggered on invalid or failed allocation
+ * requests.
  *
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
@@ -1333,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                                 gfp_t gfp)
 {
+       bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+       bool do_warn = !(gfp & __GFP_NOWARN);
        static int warn_limit = 10;
        struct pcpu_chunk *chunk;
        const char *err;
-       bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
        int slot, off, cpu, ret;
        unsigned long flags;
        void __percpu *ptr;
@@ -1357,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 
        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
                     !is_power_of_2(align))) {
-               WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
+               WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
                     size, align);
                return NULL;
        }
@@ -1478,7 +1485,7 @@ fail_unlock:
 fail:
        trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
 
-       if (!is_atomic && warn_limit) {
+       if (!is_atomic && do_warn && warn_limit) {
                pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
                        size, align, is_atomic, err);
                dump_stack();
@@ -1503,7 +1510,9 @@ fail:
  *
  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
- * be called from any context but is a lot more likely to fail.
+ * be called from any context but is a lot more likely to fail. If @gfp
+ * has __GFP_NOWARN then no warning will be triggered on invalid or failed
+ * allocation requests.
  *
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
index 6bb4deb12e78b8e3724c40446069e2cd0731b4a4..d908c8769b48478b6ba738b5053f0e25a70c0732 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/uaccess.h>
 #include <asm/sections.h>
 
-const int rodata_test_data = 0xC3;
+static const int rodata_test_data = 0xC3;
 
 void rodata_test(void)
 {
index 904a83be82de81a24a73adfac8e573db32f7118a..80164599ca5d1921d2c4b8f773f2a7c8b019cce7 100644 (file)
@@ -165,9 +165,9 @@ static int init_memcg_params(struct kmem_cache *s,
        if (!memcg_nr_cache_ids)
                return 0;
 
-       arr = kzalloc(sizeof(struct memcg_cache_array) +
-                     memcg_nr_cache_ids * sizeof(void *),
-                     GFP_KERNEL);
+       arr = kvzalloc(sizeof(struct memcg_cache_array) +
+                      memcg_nr_cache_ids * sizeof(void *),
+                      GFP_KERNEL);
        if (!arr)
                return -ENOMEM;
 
@@ -178,15 +178,23 @@ static int init_memcg_params(struct kmem_cache *s,
 static void destroy_memcg_params(struct kmem_cache *s)
 {
        if (is_root_cache(s))
-               kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+               kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+}
+
+static void free_memcg_params(struct rcu_head *rcu)
+{
+       struct memcg_cache_array *old;
+
+       old = container_of(rcu, struct memcg_cache_array, rcu);
+       kvfree(old);
 }
 
 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
 {
        struct memcg_cache_array *old, *new;
 
-       new = kzalloc(sizeof(struct memcg_cache_array) +
-                     new_array_size * sizeof(void *), GFP_KERNEL);
+       new = kvzalloc(sizeof(struct memcg_cache_array) +
+                      new_array_size * sizeof(void *), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -198,7 +206,7 @@ static int update_memcg_params(struct kmem_cache *s, int new_array_size)
 
        rcu_assign_pointer(s->memcg_params.memcg_caches, new);
        if (old)
-               kfree_rcu(old, rcu);
+               call_rcu(&old->rcu, free_memcg_params);
        return 0;
 }
 
index 9295ae960d6680165f67db4405698c3b48e3b84e..a77d68f2c1b61de1bc8102656e7e482a442963b1 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
                            void *arg)
 {
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
-           !PageUnevictable(page)) {
+           !PageSwapCache(page) && !PageUnevictable(page)) {
                bool active = PageActive(page);
 
                del_page_from_lru_list(page, lruvec,
@@ -665,7 +665,7 @@ void deactivate_file_page(struct page *page)
 void mark_page_lazyfree(struct page *page)
 {
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
-           !PageUnevictable(page)) {
+           !PageSwapCache(page) && !PageUnevictable(page)) {
                struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
 
                get_page(page);
index 71ce2d1ccbf7357238306d5e900a8bee7944e1cb..05b6803f0cce205ca58410b6932c5af22a6dd7cb 100644 (file)
@@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES];
 static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
 bool swap_vma_readahead = true;
 
-#define SWAP_RA_MAX_ORDER_DEFAULT      3
-
-static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT;
-
 #define SWAP_RA_WIN_SHIFT      (PAGE_SHIFT / 2)
 #define SWAP_RA_HITS_MASK      ((1UL << SWAP_RA_WIN_SHIFT) - 1)
 #define SWAP_RA_HITS_MAX       SWAP_RA_HITS_MASK
@@ -242,6 +238,17 @@ int add_to_swap(struct page *page)
                 * clear SWAP_HAS_CACHE flag.
                 */
                goto fail;
+       /*
+        * Normally the page will be dirtied in unmap because its pte should be
+        * dirty. A special case is MADV_FREE page. The page'e pte could have
+        * dirty bit cleared but the page's SwapBacked bit is still set because
+        * clearing the dirty bit and SwapBacked bit has no lock protected. For
+        * such page, unmap will not set dirty bit for it, so page reclaim will
+        * not write the page out. This can cause data corruption when the page
+        * is swap in later. Always setting the dirty bit for the page solves
+        * the problem.
+        */
+       set_page_dirty(page);
 
        return 1;
 
@@ -653,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
        pte_t *tpte;
 #endif
 
+       max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
+                            SWAP_RA_ORDER_CEILING);
+       if (max_win == 1) {
+               swap_ra->win = 1;
+               return NULL;
+       }
+
        faddr = vmf->address;
        entry = pte_to_swp_entry(vmf->orig_pte);
        if ((unlikely(non_swap_entry(entry))))
@@ -661,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
        if (page)
                return page;
 
-       max_win = 1 << READ_ONCE(swap_ra_max_order);
-       if (max_win == 1) {
-               swap_ra->win = 1;
-               return NULL;
-       }
-
        fpfn = PFN_DOWN(faddr);
        swap_ra_info = GET_SWAP_RA_VAL(vma);
        pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
@@ -775,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr =
        __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
               vma_ra_enabled_store);
 
-static ssize_t vma_ra_max_order_show(struct kobject *kobj,
-                                    struct kobj_attribute *attr, char *buf)
-{
-       return sprintf(buf, "%d\n", swap_ra_max_order);
-}
-static ssize_t vma_ra_max_order_store(struct kobject *kobj,
-                                     struct kobj_attribute *attr,
-                                     const char *buf, size_t count)
-{
-       int err, v;
-
-       err = kstrtoint(buf, 10, &v);
-       if (err || v > SWAP_RA_ORDER_CEILING || v <= 0)
-               return -EINVAL;
-
-       swap_ra_max_order = v;
-
-       return count;
-}
-static struct kobj_attribute vma_ra_max_order_attr =
-       __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show,
-              vma_ra_max_order_store);
-
 static struct attribute *swap_attrs[] = {
        &vma_ra_enabled_attr.attr,
-       &vma_ra_max_order_attr.attr,
        NULL,
 };
 
index 8a43db6284ebcb9c40dfc3532d454c783ea61412..673942094328a710b059b2b50e149ce7eb3d5f11 100644 (file)
@@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        for (i = 0; i < area->nr_pages; i++) {
                struct page *page;
 
-               if (fatal_signal_pending(current)) {
-                       area->nr_pages = i;
-                       goto fail_no_warn;
-               }
-
                if (node == NUMA_NO_NODE)
                        page = alloc_page(alloc_mask|highmem_mask);
                else
@@ -1723,7 +1718,6 @@ fail:
        warn_alloc(gfp_mask, NULL,
                          "vmalloc: allocation failure, allocated %ld of %ld bytes",
                          (area->nr_pages*PAGE_SIZE), area->size);
-fail_no_warn:
        vfree(area->addr);
        return NULL;
 }
index 486550df32be8589eea953d739ea30e62876b702..b2ba2ba585f3c0ccb422531ae9dc26909ae463d4 100644 (file)
@@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
 
        WARN_ON(!list_empty(&zhdr->buddy));
        set_bit(PAGE_STALE, &page->private);
+       clear_bit(NEEDS_COMPACTING, &page->private);
        spin_lock(&pool->lock);
        if (!list_empty(&page->lru))
                list_del(&page->lru);
@@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)
                list_del(&zhdr->buddy);
                if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
                        continue;
-               clear_bit(NEEDS_COMPACTING, &page->private);
                spin_unlock(&pool->stale_lock);
                cancel_work_sync(&zhdr->work);
                free_z3fold_page(page);
@@ -624,10 +624,8 @@ lookup:
         * stale pages list. cancel_work_sync() can sleep so we must make
         * sure it won't be called in case we're in atomic context.
         */
-       if (zhdr && (can_sleep || !work_pending(&zhdr->work) ||
-           !unlikely(work_busy(&zhdr->work)))) {
+       if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
                list_del(&zhdr->buddy);
-               clear_bit(NEEDS_COMPACTING, &page->private);
                spin_unlock(&pool->stale_lock);
                if (can_sleep)
                        cancel_work_sync(&zhdr->work);
@@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                                goto next;
                }
 next:
+               spin_lock(&pool->lock);
                if (test_bit(PAGE_HEADLESS, &page->private)) {
                        if (ret == 0) {
+                               spin_unlock(&pool->lock);
                                free_z3fold_page(page);
                                return 0;
                        }
                } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
                        atomic64_dec(&pool->pages_nr);
+                       spin_unlock(&pool->lock);
                        return 0;
                }
-               spin_lock(&pool->lock);
 
                /*
                 * Add to the beginning of LRU.
index e2ed69850489bb79a6a055ee3264456a31acee3e..0bc31de9071a2112dbbd622dbb5646a5fe86fd3c 100644 (file)
@@ -21,6 +21,12 @@ bool vlan_do_receive(struct sk_buff **skbp)
        if (unlikely(!skb))
                return false;
 
+       if (unlikely(!(vlan_dev->flags & IFF_UP))) {
+               kfree_skb(skb);
+               *skbp = NULL;
+               return false;
+       }
+
        skb->dev = vlan_dev;
        if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
                /* Our lower layer thinks this is not local, let's make sure.
index c18115d22f00be37f68d9bbdbb04f626de6cf3c4..db82a40875e8da3c9ad39881046d27bd8f0ee596 100644 (file)
@@ -126,14 +126,4 @@ config BT_DEBUGFS
          Provide extensive information about internal Bluetooth states
          in debugfs.
 
-config BT_LEGACY_IOCTL
-       bool "Enable legacy ioctl interfaces"
-       depends on BT && BT_BREDR
-       default y
-       help
-         Enable support for legacy ioctl interfaces.  This is only needed
-         for old and deprecated applications using direct ioctl calls for
-         controller management.  Since Linux 3.4 all configuration and
-         setup is done via mgmt interface and this is no longer needed.
-
 source "drivers/bluetooth/Kconfig"
index 0bad296fe0af970f5989b8a121039b99863088b9..65d734c165bd6368b8607d0b5078d345b5e642a8 100644 (file)
@@ -878,7 +878,6 @@ static int hci_sock_release(struct socket *sock)
        return 0;
 }
 
-#ifdef CONFIG_BT_LEGACY_IOCTL
 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 {
        bdaddr_t bdaddr;
@@ -1050,7 +1049,6 @@ done:
        release_sock(sk);
        return err;
 }
-#endif
 
 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
                         int addr_len)
@@ -1971,11 +1969,7 @@ static const struct proto_ops hci_sock_ops = {
        .getname        = hci_sock_getname,
        .sendmsg        = hci_sock_sendmsg,
        .recvmsg        = hci_sock_recvmsg,
-#ifdef CONFIG_BT_LEGACY_IOCTL
        .ioctl          = hci_sock_ioctl,
-#else
-       .ioctl          = sock_no_ioctl,
-#endif
        .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 3bc890716c89cb5fdeafcbd3e5062ed9bb6a9882..de21527308093240614481966bed7f43d7af07f3 100644 (file)
@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
                }
                *vinfo_last = NULL;
 
-               return 0;
+               return err;
        }
 
        return br_vlan_info(br, p, cmd, vinfo_curr);
index 2585b100ebbbc332a33209c2b0bf3643eb0dd961..276b60262981c95a9fccd508e8d8123212d535de 100644 (file)
@@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
 
 static int __net_init broute_net_init(struct net *net)
 {
-       net->xt.broute_table = ebt_register_table(net, &broute_table, NULL);
-       return PTR_ERR_OR_ZERO(net->xt.broute_table);
+       return ebt_register_table(net, &broute_table, NULL,
+                                 &net->xt.broute_table);
 }
 
 static void __net_exit broute_net_exit(struct net *net)
index 45a00dbdbcad647f2342caa9fc9181f27b7f3a77..c41da5fac84f49a9cf5e58eaab88b3beb2d81fba 100644 (file)
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
 
 static int __net_init frame_filter_net_init(struct net *net)
 {
-       net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter);
-       return PTR_ERR_OR_ZERO(net->xt.frame_filter);
+       return ebt_register_table(net, &frame_filter, ebt_ops_filter,
+                                 &net->xt.frame_filter);
 }
 
 static void __net_exit frame_filter_net_exit(struct net *net)
index 57cd5bb154e7071096f3205cb5200e277e04c69d..08df7406ecb3835a664a695a239d73f62eeaf457 100644 (file)
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
 
 static int __net_init frame_nat_net_init(struct net *net)
 {
-       net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat);
-       return PTR_ERR_OR_ZERO(net->xt.frame_nat);
+       return ebt_register_table(net, &frame_nat, ebt_ops_nat,
+                                 &net->xt.frame_nat);
 }
 
 static void __net_exit frame_nat_net_exit(struct net *net)
index 83951f978445e5b9daede1eac715cec0d9f42987..3b3dcf719e0783e74ecf9018b3fd1728a31f6393 100644 (file)
@@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
        kfree(table);
 }
 
-struct ebt_table *
-ebt_register_table(struct net *net, const struct ebt_table *input_table,
-                  const struct nf_hook_ops *ops)
+int ebt_register_table(struct net *net, const struct ebt_table *input_table,
+                      const struct nf_hook_ops *ops, struct ebt_table **res)
 {
        struct ebt_table_info *newinfo;
        struct ebt_table *t, *table;
@@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
            repl->entries == NULL || repl->entries_size == 0 ||
            repl->counters != NULL || input_table->private != NULL) {
                BUGPRINT("Bad table data for ebt_register_table!!!\n");
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        }
 
        /* Don't add one table to multiple lists. */
@@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
        list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
        mutex_unlock(&ebt_mutex);
 
+       WRITE_ONCE(*res, table);
+
        if (!ops)
-               return table;
+               return 0;
 
        ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
        if (ret) {
                __ebt_unregister_table(net, table);
-               return ERR_PTR(ret);
+               *res = NULL;
        }
 
-       return table;
+       return ret;
 free_unlock:
        mutex_unlock(&ebt_mutex);
 free_chainstack:
@@ -1276,7 +1277,7 @@ free_newinfo:
 free_table:
        kfree(table);
 out:
-       return ERR_PTR(ret);
+       return ret;
 }
 
 void ebt_unregister_table(struct net *net, struct ebt_table *table,
index 88edac0f3e366398d0c1e0de023b90b0669498f9..ecd5c703d11e85c32eaee87321af56da03e7e07d 100644 (file)
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
 static struct kmem_cache *rcv_cache __read_mostly;
 
 /* table of registered CAN protocols */
-static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
+static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
 static DEFINE_MUTEX(proto_tab_lock);
 
 static atomic_t skbcounter = ATOMIC_INIT(0);
@@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp)
 
        mutex_lock(&proto_tab_lock);
 
-       if (proto_tab[proto]) {
+       if (rcu_access_pointer(proto_tab[proto])) {
                pr_err("can: protocol %d already registered\n", proto);
                err = -EBUSY;
        } else
@@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp)
        int proto = cp->protocol;
 
        mutex_lock(&proto_tab_lock);
-       BUG_ON(proto_tab[proto] != cp);
+       BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
        RCU_INIT_POINTER(proto_tab[proto], NULL);
        mutex_unlock(&proto_tab_lock);
 
@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
        spin_lock_init(&net->can.can_rcvlists_lock);
        net->can.can_rx_alldev_list =
                kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
-
+       if (!net->can.can_rx_alldev_list)
+               goto out;
        net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
+       if (!net->can.can_stats)
+               goto out_free_alldev_list;
        net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
+       if (!net->can.can_pstats)
+               goto out_free_can_stats;
 
        if (IS_ENABLED(CONFIG_PROC_FS)) {
                /* the statistics are updated every second (timer triggered) */
@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
        }
 
        return 0;
+
+ out_free_can_stats:
+       kfree(net->can.can_stats);
+ out_free_alldev_list:
+       kfree(net->can.can_rx_alldev_list);
+ out:
+       return -ENOMEM;
 }
 
 static void can_pernet_exit(struct net *net)
index 47a8748d953afbf460238804d3d7d3d8b087f474..13690334efa31b978cff2ed6432af626f00cab76 100644 (file)
@@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk)
 static int bcm_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
-       struct net *net = sock_net(sk);
+       struct net *net;
        struct bcm_sock *bo;
        struct bcm_op *op, *next;
 
-       if (sk == NULL)
+       if (!sk)
                return 0;
 
+       net = sock_net(sk);
        bo = bcm_sk(sk);
 
        /* remove bcm_ops, timer, rx_unregister(), etc. */
index f358d0bfa76b35cb978e9e92b57aea38a4e3b391..79d14d70b7ea7e032ecffec839b2f44ee4fe7cdc 100644 (file)
@@ -2445,19 +2445,34 @@ static void apply_upmap(struct ceph_osdmap *osdmap,
 
        pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
        if (pg) {
-               for (i = 0; i < raw->size; i++) {
-                       for (j = 0; j < pg->pg_upmap_items.len; j++) {
-                               int from = pg->pg_upmap_items.from_to[j][0];
-                               int to = pg->pg_upmap_items.from_to[j][1];
-
-                               if (from == raw->osds[i]) {
-                                       if (!(to != CRUSH_ITEM_NONE &&
-                                             to < osdmap->max_osd &&
-                                             osdmap->osd_weight[to] == 0))
-                                               raw->osds[i] = to;
+               /*
+                * Note: this approach does not allow a bidirectional swap,
+                * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
+                */
+               for (i = 0; i < pg->pg_upmap_items.len; i++) {
+                       int from = pg->pg_upmap_items.from_to[i][0];
+                       int to = pg->pg_upmap_items.from_to[i][1];
+                       int pos = -1;
+                       bool exists = false;
+
+                       /* make sure replacement doesn't already appear */
+                       for (j = 0; j < raw->size; j++) {
+                               int osd = raw->osds[j];
+
+                               if (osd == to) {
+                                       exists = true;
                                        break;
                                }
+                               /* ignore mapping if target is marked out */
+                               if (osd == from && pos < 0 &&
+                                   !(to != CRUSH_ITEM_NONE &&
+                                     to < osdmap->max_osd &&
+                                     osdmap->osd_weight[to] == 0)) {
+                                       pos = j;
+                               }
                        }
+                       if (!exists && pos >= 0)
+                               raw->osds[pos] = to;
                }
        }
 }
index 6ded6c821d7a21f296d14db02552ea1c71643051..22381719718c4fbd5d63b8836e3161026b59a459 100644 (file)
@@ -185,6 +185,13 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
                ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
        }
 
+       /*
+        * check the length of messages copied in is the same as the
+        * what we get from the first loop
+        */
+       if ((char *)kcmsg - (char *)kcmsg_base != kcmlen)
+               goto Einval;
+
        /* Ok, looks like we made it.  Hook it up and return success. */
        kmsg->msg_control = kcmsg_base;
        kmsg->msg_controllen = kcmlen;
index fb766d906148e7d1e85a654eaf6048c1cdf49364..11596a302a265212cb5dfe40f51b5b01fb20d0ce 100644 (file)
@@ -1147,9 +1147,8 @@ static int dev_alloc_name_ns(struct net *net,
        return ret;
 }
 
-static int dev_get_valid_name(struct net *net,
-                             struct net_device *dev,
-                             const char *name)
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+                      const char *name)
 {
        BUG_ON(!net);
 
@@ -1165,6 +1164,7 @@ static int dev_get_valid_name(struct net *net,
 
        return 0;
 }
+EXPORT_SYMBOL(dev_get_valid_name);
 
 /**
  *     dev_change_name - change name of a device
@@ -1948,8 +1948,12 @@ again:
                goto again;
        }
 out_unlock:
-       if (pt_prev)
-               pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
+       if (pt_prev) {
+               if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
+                       pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
+               else
+                       kfree_skb(skb2);
+       }
        rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
@@ -3892,6 +3896,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                __skb_pull(skb, off);
        else if (off < 0)
                __skb_push(skb, -off);
+       skb->mac_header += off;
 
        switch (act) {
        case XDP_REDIRECT:
index 709a4e6fb447fda886046308de5b613a88ff9dfa..f9c7a88cd98183fd10e4b00bf84e8ec5452c88a8 100644 (file)
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
        case SIOCSIFTXQLEN:
                if (ifr->ifr_qlen < 0)
                        return -EINVAL;
-               dev->tx_queue_len = ifr->ifr_qlen;
+               if (dev->tx_queue_len ^ ifr->ifr_qlen) {
+                       unsigned int orig_len = dev->tx_queue_len;
+
+                       dev->tx_queue_len = ifr->ifr_qlen;
+                       err = call_netdevice_notifiers(
+                                       NETDEV_CHANGE_TX_QUEUE_LEN, dev);
+                       err = notifier_to_errno(err);
+                       if (err) {
+                               dev->tx_queue_len = orig_len;
+                               return err;
+                       }
+               }
                return 0;
 
        case SIOCSIFNAME:
index 6a582ae4c5d972360736a966f02729bf22a64599..9a9a3d77e3274fc3e115fe73470f18bc93be6364 100644 (file)
@@ -436,7 +436,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
 EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
 
 /* return false if legacy contained non-0 deprecated fields
- * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated
+ * maxtxpkt/maxrxpkt. rest of ksettings always updated
  */
 static bool
 convert_legacy_settings_to_link_ksettings(
@@ -451,8 +451,7 @@ convert_legacy_settings_to_link_ksettings(
         * deprecated legacy fields, and they should not use
         * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
         */
-       if (legacy_settings->transceiver ||
-           legacy_settings->maxtxpkt ||
+       if (legacy_settings->maxtxpkt ||
            legacy_settings->maxrxpkt)
                retval = false;
 
@@ -525,6 +524,8 @@ convert_link_ksettings_to_legacy_settings(
                = link_ksettings->base.eth_tp_mdix;
        legacy_settings->eth_tp_mdix_ctrl
                = link_ksettings->base.eth_tp_mdix_ctrl;
+       legacy_settings->transceiver
+               = link_ksettings->base.transceiver;
        return retval;
 }
 
index 24dd33dd9f0452994623e26e3a379f0bf56190ce..6ae94f825f72eb810b5252e10dd89cae66f8cbef 100644 (file)
@@ -989,10 +989,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 
 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
-       bool ret = __sk_filter_charge(sk, fp);
-       if (ret)
-               refcount_inc(&fp->refcnt);
-       return ret;
+       if (!refcount_inc_not_zero(&fp->refcnt))
+               return false;
+
+       if (!__sk_filter_charge(sk, fp)) {
+               sk_filter_release(fp);
+               return false;
+       }
+       return true;
 }
 
 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
@@ -1794,7 +1798,7 @@ struct redirect_info {
        u32 flags;
        struct bpf_map *map;
        struct bpf_map *map_to_flush;
-       const struct bpf_prog *map_owner;
+       unsigned long   map_owner;
 };
 
 static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@@ -1835,31 +1839,32 @@ static const struct bpf_func_proto bpf_redirect_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags)
+BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
+          struct bpf_map *, map, u32, key, u64, flags)
 {
-       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+       struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
+       /* If user passes invalid input drop the packet. */
        if (unlikely(flags))
-               return SK_ABORTED;
+               return SK_DROP;
 
-       ri->ifindex = key;
-       ri->flags = flags;
-       ri->map = map;
+       tcb->bpf.key = key;
+       tcb->bpf.flags = flags;
+       tcb->bpf.map = map;
 
-       return SK_REDIRECT;
+       return SK_PASS;
 }
 
-struct sock *do_sk_redirect_map(void)
+struct sock *do_sk_redirect_map(struct sk_buff *skb)
 {
-       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+       struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
        struct sock *sk = NULL;
 
-       if (ri->map) {
-               sk = __sock_map_lookup_elem(ri->map, ri->ifindex);
+       if (tcb->bpf.map) {
+               sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
 
-               ri->ifindex = 0;
-               ri->map = NULL;
-               /* we do not clear flags for future lookup */
+               tcb->bpf.key = 0;
+               tcb->bpf.map = NULL;
        }
 
        return sk;
@@ -1869,9 +1874,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
        .func           = bpf_sk_redirect_map,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_CONST_MAP_PTR,
-       .arg2_type      = ARG_ANYTHING,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_ANYTHING,
 };
 
 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -2500,11 +2506,17 @@ void xdp_do_flush_map(void)
 }
 EXPORT_SYMBOL_GPL(xdp_do_flush_map);
 
+static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
+                                  unsigned long aux)
+{
+       return (unsigned long)xdp_prog->aux != aux;
+}
+
 static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
                               struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
-       const struct bpf_prog *map_owner = ri->map_owner;
+       unsigned long map_owner = ri->map_owner;
        struct bpf_map *map = ri->map;
        struct net_device *fwd = NULL;
        u32 index = ri->ifindex;
@@ -2512,9 +2524,9 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
 
        ri->ifindex = 0;
        ri->map = NULL;
-       ri->map_owner = NULL;
+       ri->map_owner = 0;
 
-       if (unlikely(map_owner != xdp_prog)) {
+       if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
                err = -EFAULT;
                map = NULL;
                goto err;
@@ -2574,7 +2586,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
                            struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
-       const struct bpf_prog *map_owner = ri->map_owner;
+       unsigned long map_owner = ri->map_owner;
        struct bpf_map *map = ri->map;
        struct net_device *fwd = NULL;
        u32 index = ri->ifindex;
@@ -2583,10 +2595,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 
        ri->ifindex = 0;
        ri->map = NULL;
-       ri->map_owner = NULL;
+       ri->map_owner = 0;
 
        if (map) {
-               if (unlikely(map_owner != xdp_prog)) {
+               if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
                        err = -EFAULT;
                        map = NULL;
                        goto err;
@@ -2632,7 +2644,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
        ri->ifindex = ifindex;
        ri->flags = flags;
        ri->map = NULL;
-       ri->map_owner = NULL;
+       ri->map_owner = 0;
 
        return XDP_REDIRECT;
 }
@@ -2646,7 +2658,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
 };
 
 BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
-          const struct bpf_prog *, map_owner)
+          unsigned long, map_owner)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 
@@ -3673,7 +3685,6 @@ static bool sk_skb_is_valid_access(int off, int size,
 {
        if (type == BPF_WRITE) {
                switch (off) {
-               case bpf_ctx_range(struct __sk_buff, mark):
                case bpf_ctx_range(struct __sk_buff, tc_index):
                case bpf_ctx_range(struct __sk_buff, priority):
                        break;
@@ -3683,6 +3694,7 @@ static bool sk_skb_is_valid_access(int off, int size,
        }
 
        switch (off) {
+       case bpf_ctx_range(struct __sk_buff, mark):
        case bpf_ctx_range(struct __sk_buff, tc_classid):
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
@@ -4232,6 +4244,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
        return insn - insn_buf;
 }
 
+static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
+                                    const struct bpf_insn *si,
+                                    struct bpf_insn *insn_buf,
+                                    struct bpf_prog *prog, u32 *target_size)
+{
+       struct bpf_insn *insn = insn_buf;
+       int off;
+
+       switch (si->off) {
+       case offsetof(struct __sk_buff, data_end):
+               off  = si->off;
+               off -= offsetof(struct __sk_buff, data_end);
+               off += offsetof(struct sk_buff, cb);
+               off += offsetof(struct tcp_skb_cb, bpf.data_end);
+               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
+                                     si->src_reg, off);
+               break;
+       default:
+               return bpf_convert_ctx_access(type, si, insn_buf, prog,
+                                             target_size);
+       }
+
+       return insn - insn_buf;
+}
+
 const struct bpf_verifier_ops sk_filter_prog_ops = {
        .get_func_proto         = sk_filter_func_proto,
        .is_valid_access        = sk_filter_is_valid_access,
@@ -4290,7 +4327,7 @@ const struct bpf_verifier_ops sock_ops_prog_ops = {
 const struct bpf_verifier_ops sk_skb_prog_ops = {
        .get_func_proto         = sk_skb_func_proto,
        .is_valid_access        = sk_skb_is_valid_access,
-       .convert_ctx_access     = bpf_convert_ctx_access,
+       .convert_ctx_access     = sk_skb_convert_ctx_access,
        .gen_prologue           = sk_skb_prologue,
 };
 
index a78fd61da0ec50a0cf2f8f6216a15944086d0e89..5ace48926b196666265a7f95b77779cbdd1ff848 100644 (file)
@@ -1483,7 +1483,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_LINKINFO]         = { .type = NLA_NESTED },
        [IFLA_NET_NS_PID]       = { .type = NLA_U32 },
        [IFLA_NET_NS_FD]        = { .type = NLA_U32 },
-       [IFLA_IFALIAS]          = { .type = NLA_STRING, .len = IFALIASZ-1 },
+       /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
+        * allow 0-length string (needed to remove an alias).
+        */
+       [IFLA_IFALIAS]          = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
        [IFLA_VFINFO_LIST]      = {. type = NLA_NESTED },
        [IFLA_VF_PORTS]         = { .type = NLA_NESTED },
        [IFLA_PORT_SELF]        = { .type = NLA_NESTED },
@@ -2093,7 +2096,7 @@ static int do_setlink(const struct sk_buff *skb,
                                dev->tx_queue_len = orig_len;
                                goto errout;
                        }
-                       status |= DO_SETLINK_NOTIFY;
+                       status |= DO_SETLINK_MODIFIED;
                }
        }
 
@@ -2248,7 +2251,7 @@ static int do_setlink(const struct sk_buff *skb,
 
 errout:
        if (status & DO_SETLINK_MODIFIED) {
-               if (status & DO_SETLINK_NOTIFY)
+               if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
                        netdev_state_change(dev);
 
                if (err < 0)
@@ -3854,6 +3857,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
                return -EMSGSIZE;
 
        ifsm = nlmsg_data(nlh);
+       ifsm->family = PF_UNSPEC;
+       ifsm->pad1 = 0;
+       ifsm->pad2 = 0;
        ifsm->ifindex = dev->ifindex;
        ifsm->filter_mask = filter_mask;
 
@@ -4276,13 +4282,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
 
        switch (event) {
        case NETDEV_REBOOT:
+       case NETDEV_CHANGEMTU:
        case NETDEV_CHANGEADDR:
        case NETDEV_CHANGENAME:
        case NETDEV_FEAT_CHANGE:
        case NETDEV_BONDING_FAILOVER:
+       case NETDEV_POST_TYPE_CHANGE:
        case NETDEV_NOTIFY_PEERS:
+       case NETDEV_CHANGEUPPER:
        case NETDEV_RESEND_IGMP:
        case NETDEV_CHANGEINFODATA:
+       case NETDEV_CHANGE_TX_QUEUE_LEN:
                rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
                                   GFP_KERNEL);
                break;
index 16982de649b97b92423a4f9f5eac1e98ca803370..24656076906d2a0f3b70b030c977e854caba2487 100644 (file)
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
 
        err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
        if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
+               struct sock *save_sk = skb->sk;
+
                /* Streams do not free skb on error. Reset to prev state. */
                msg->msg_iter = orig_iter;
+               skb->sk = sk;
                ___pskb_trim(skb, orig_len);
+               skb->sk = save_sk;
                return err;
        }
 
@@ -1896,7 +1900,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
        }
 
        /* If we need update frag list, we are in troubles.
-        * Certainly, it possible to add an offset to skb data,
+        * Certainly, it is possible to add an offset to skb data,
         * but taking into account that pulling is expected to
         * be very rare operation, it is worth to fight against
         * further bloating skb head and crucify ourselves here instead.
index 9b7b6bbb2a23e7652a1f34a305f29d49de00bc8c..415f441c63b9e2ff8feb010f44ca27303c72aaa1 100644 (file)
@@ -1654,6 +1654,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 
                sock_copy(newsk, sk);
 
+               newsk->sk_prot_creator = sk->sk_prot;
+
                /* SANITY */
                if (likely(newsk->sk_net_refcnt))
                        get_net(sock_net(newsk));
@@ -1675,20 +1677,28 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_dst_pending_confirm = 0;
                newsk->sk_wmem_queued   = 0;
                newsk->sk_forward_alloc = 0;
+
+               /* sk->sk_memcg will be populated at accept() time */
+               newsk->sk_memcg = NULL;
+
                atomic_set(&newsk->sk_drops, 0);
                newsk->sk_send_head     = NULL;
                newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
                atomic_set(&newsk->sk_zckey, 0);
 
                sock_reset_flag(newsk, SOCK_DONE);
+               cgroup_sk_alloc(&newsk->sk_cgrp_data);
 
-               filter = rcu_dereference_protected(newsk->sk_filter, 1);
+               rcu_read_lock();
+               filter = rcu_dereference(sk->sk_filter);
                if (filter != NULL)
                        /* though it's an empty new sock, the charging may fail
                         * if sysctl_optmem_max was changed between creation of
                         * original socket and cloning
                         */
                        is_charged = sk_filter_charge(newsk, filter);
+               RCU_INIT_POINTER(newsk->sk_filter, filter);
+               rcu_read_unlock();
 
                if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
                        /* We need to make sure that we don't uncharge the new
@@ -1709,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_incoming_cpu = raw_smp_processor_id();
                atomic64_set(&newsk->sk_cookie, 0);
 
-               mem_cgroup_sk_alloc(newsk);
-               cgroup_sk_alloc(&newsk->sk_cgrp_data);
-
                /*
                 * Before updating sk_refcnt, we must commit prior changes to memory
                 * (Documentation/RCU/rculist_nulls.txt for details)
index eed1ebf7f29d0fac552074b127e5636fecede65f..b1e0dbea1e8cac4283aa2e659ac4f23d9c291a36 100644 (file)
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
         * soft irq of receive path or setsockopt from process context
         */
        spin_lock_bh(&reuseport_lock);
-       WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
-                                           lockdep_is_held(&reuseport_lock)),
-                 "multiple allocations for the same socket");
+
+       /* Allocation attempts can occur concurrently via the setsockopt path
+        * and the bind/hash path.  Nothing to do when we lose the race.
+        */
+       if (rcu_dereference_protected(sk->sk_reuseport_cb,
+                                     lockdep_is_held(&reuseport_lock)))
+               goto out;
+
        reuse = __reuseport_alloc(INIT_SOCKS);
        if (!reuse) {
                spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
        reuse->num_socks = 1;
        rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
 
+out:
        spin_unlock_bh(&reuseport_lock);
 
        return 0;
index 001c08696334bba0ceb896c116e595b814af0667..e65fcb45c3f6c1edc70fc9898ebe6404175b102f 100644 (file)
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
        sk_daddr_set(newsk, ireq->ir_rmt_addr);
        sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newinet->inet_saddr     = ireq->ir_loc_addr;
-       newinet->inet_opt       = ireq->opt;
-       ireq->opt          = NULL;
+       RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
        newinet->mc_index  = inet_iif(skb);
        newinet->mc_ttl    = ip_hdr(skb)->ttl;
        newinet->inet_id   = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
        *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-
+       if (*own_req)
+               ireq->ireq_opt = NULL;
+       else
+               newinet->inet_opt = NULL;
        return newsk;
 
 exit_overflow:
@@ -441,6 +443,7 @@ exit:
        __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return NULL;
 put_and_exit:
+       newinet->inet_opt = NULL;
        inet_csk_prepare_forced_close(newsk);
        dccp_done(newsk);
        goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
                                                              ireq->ir_rmt_addr);
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq->opt);
+                                           ireq_opt_deref(ireq));
                err = net_xmit_eval(err);
        }
 
@@ -548,7 +551,7 @@ out:
 static void dccp_v4_reqsk_destructor(struct request_sock *req)
 {
        dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
-       kfree(inet_rsk(req)->opt);
+       kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
 }
 
 void dccp_syn_ack_timeout(const struct request_sock *req)
index 8737412c7b27f33125c14c8aa6e64c4ce5cd2dc8..e1d4d898a007df9ba0911fd612caac4a48a50dc2 100644 (file)
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
 static void dns_resolver_describe(const struct key *key, struct seq_file *m)
 {
        seq_puts(m, key->description);
-       if (key_is_instantiated(key)) {
+       if (key_is_positive(key)) {
                int err = PTR_ERR(key->payload.data[dns_key_error]);
 
                if (err)
index 873af0108e243fc269f591ec7d7c59a9c8b374fc..045d8a1762793142de2619497da3034f0af55e0c 100644 (file)
@@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
                if (!ethernet)
                        return -EINVAL;
                ethernet_dev = of_find_net_device_by_node(ethernet);
+               if (!ethernet_dev)
+                       return -EPROBE_DEFER;
        } else {
                ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
+               if (!ethernet_dev)
+                       return -EPROBE_DEFER;
                dev_put(ethernet_dev);
        }
 
-       if (!ethernet_dev)
-               return -EPROBE_DEFER;
-
        if (!dst->cpu_dp) {
                dst->cpu_dp = port;
                dst->cpu_dp->netdev = ethernet_dev;
index 2afa99506f8b4f8ac051c5972acbf5dbd0c9bec3..865e29e62bad87e7fede6599ace0645ebbd2d196 100644 (file)
@@ -1301,28 +1301,33 @@ int dsa_slave_create(struct dsa_port *port, const char *name)
        p->old_duplex = -1;
 
        port->netdev = slave_dev;
-       ret = register_netdev(slave_dev);
-       if (ret) {
-               netdev_err(master, "error %d registering interface %s\n",
-                          ret, slave_dev->name);
-               port->netdev = NULL;
-               free_percpu(p->stats64);
-               free_netdev(slave_dev);
-               return ret;
-       }
 
        netif_carrier_off(slave_dev);
 
        ret = dsa_slave_phy_setup(p, slave_dev);
        if (ret) {
                netdev_err(master, "error %d setting up slave phy\n", ret);
-               unregister_netdev(slave_dev);
-               free_percpu(p->stats64);
-               free_netdev(slave_dev);
-               return ret;
+               goto out_free;
+       }
+
+       ret = register_netdev(slave_dev);
+       if (ret) {
+               netdev_err(master, "error %d registering interface %s\n",
+                          ret, slave_dev->name);
+               goto out_phy;
        }
 
        return 0;
+
+out_phy:
+       phy_disconnect(p->phy);
+       if (of_phy_is_fixed_link(p->dp->dn))
+               of_phy_deregister_fixed_link(p->dp->dn);
+out_free:
+       free_percpu(p->stats64);
+       free_netdev(slave_dev);
+       port->netdev = NULL;
+       return ret;
 }
 
 void dsa_slave_destroy(struct net_device *slave_dev)
index 91a2557942fa8533564943f1f8e8d9df4d7df141..f48fe6fc7e8c413d7d7e4d7d37d1d859a566e8fb 100644 (file)
@@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES
          address into account. Furthermore, the TOS (Type-Of-Service) field
          of the packet can be used for routing decisions as well.
 
-         If you are interested in this, please see the preliminary
-         documentation at <http://www.compendium.com.ar/policy-routing.txt>
-         and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>.
-         You will need supporting software from
-         <ftp://ftp.tux.org/pub/net/ip-routing/>.
+         If you need more information, see the Linux Advanced
+         Routing and Traffic Control documentation at
+         <http://lartc.org/howto/lartc.rpdb.html>
 
          If unsure, say N.
 
index 2ae8f54cb32148f2499f78ecbf29259db36bd207..82178cc69c9618bae69c096290a7a96a8b8bade0 100644 (file)
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
        buf = NULL;
 
        req_inet = inet_rsk(req);
-       opt = xchg(&req_inet->opt, opt);
+       opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
        if (opt)
                kfree_rcu(opt, rcu);
 
@@ -1973,11 +1973,13 @@ req_setattr_failure:
  * values on failure.
  *
  */
-static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
+static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
 {
+       struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
        int hdr_delta = 0;
-       struct ip_options_rcu *opt = *opt_ptr;
 
+       if (!opt || opt->opt.cipso == 0)
+               return 0;
        if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
                u8 cipso_len;
                u8 cipso_off;
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
  */
 void cipso_v4_sock_delattr(struct sock *sk)
 {
-       int hdr_delta;
-       struct ip_options_rcu *opt;
        struct inet_sock *sk_inet;
+       int hdr_delta;
 
        sk_inet = inet_sk(sk);
-       opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
-       if (!opt || opt->opt.cipso == 0)
-               return;
 
        hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
        if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
  */
 void cipso_v4_req_delattr(struct request_sock *req)
 {
-       struct ip_options_rcu *opt;
-       struct inet_request_sock *req_inet;
-
-       req_inet = inet_rsk(req);
-       opt = req_inet->opt;
-       if (!opt || opt->opt.cipso == 0)
-               return;
-
-       cipso_v4_delopt(&req_inet->opt);
+       cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
 }
 
 /**
index 416bb304a281a41970944e5f979eaf4c8aa1ad03..1859c473b21a862b383edebbcf2c1656f9c58b3b 100644 (file)
@@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                greh = (struct gre_base_hdr *)skb_transport_header(skb);
                pcsum = (__sum16 *)(greh + 1);
 
-               if (gso_partial) {
+               if (gso_partial && skb_is_gso(skb)) {
                        unsigned int partial_adj;
 
                        /* Adjust checksum to account for the fact that
index b9c64b40a83af1e151f553ba0e624fae5060ffd0..b47a59cb3573b3b77aa5cbb9c2739a12ef37a237 100644 (file)
@@ -266,7 +266,7 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
 #if IS_ENABLED(CONFIG_IPV6)
        if (tb->fast_sk_family == AF_INET6)
                return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
-                                           &sk->sk_v6_rcv_saddr,
+                                           inet6_rcv_saddr(sk),
                                            tb->fast_rcv_saddr,
                                            sk->sk_rcv_saddr,
                                            tb->fast_ipv6_only,
@@ -321,13 +321,14 @@ tb_found:
                        goto fail_unlock;
        }
 success:
-       if (!hlist_empty(&tb->owners)) {
+       if (hlist_empty(&tb->owners)) {
                tb->fastreuse = reuse;
                if (sk->sk_reuseport) {
                        tb->fastreuseport = FASTREUSEPORT_ANY;
                        tb->fastuid = uid;
                        tb->fast_rcv_saddr = sk->sk_rcv_saddr;
                        tb->fast_ipv6_only = ipv6_only_sock(sk);
+                       tb->fast_sk_family = sk->sk_family;
 #if IS_ENABLED(CONFIG_IPV6)
                        tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 #endif
@@ -354,6 +355,7 @@ success:
                                tb->fastuid = uid;
                                tb->fast_rcv_saddr = sk->sk_rcv_saddr;
                                tb->fast_ipv6_only = ipv6_only_sock(sk);
+                               tb->fast_sk_family = sk->sk_family;
 #if IS_ENABLED(CONFIG_IPV6)
                                tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
 #endif
@@ -473,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
                }
                spin_unlock_bh(&queue->fastopenq.lock);
        }
+       mem_cgroup_sk_alloc(newsk);
 out:
        release_sock(sk);
        if (req)
@@ -537,9 +540,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct net *net = read_pnet(&ireq->ireq_net);
-       struct ip_options_rcu *opt = ireq->opt;
+       struct ip_options_rcu *opt;
        struct rtable *rt;
 
+       opt = ireq_opt_deref(ireq);
+
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -573,10 +578,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
        struct flowi4 *fl4;
        struct rtable *rt;
 
+       opt = rcu_dereference(ireq->ireq_opt);
        fl4 = &newinet->cork.fl.u.ip4;
 
-       rcu_read_lock();
-       opt = rcu_dereference(newinet->inet_opt);
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -589,13 +593,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
                goto no_route;
        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
                goto route_err;
-       rcu_read_unlock();
        return &rt->dst;
 
 route_err:
        ip_rt_put(rt);
 no_route:
-       rcu_read_unlock();
        __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
index 597bb4cfe805281a5a7cb1f8d1334c2103d34dc8..e7d15fb0d94d9790675356d3144d204b91eab984 100644 (file)
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                        return reuseport_add_sock(sk, sk2);
        }
 
-       /* Initial allocation may have already happened via setsockopt */
-       if (!rcu_access_pointer(sk->sk_reuseport_cb))
-               return reuseport_alloc(sk);
-       return 0;
+       return reuseport_alloc(sk);
 }
 
 int __inet_hash(struct sock *sk, struct sock *osk)
index e7eb590c86ce2b33654c17c61619de74ff07bfd1..b20c8ac640811e1b4c5416134181dd77675db878 100644 (file)
@@ -128,9 +128,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
                        break;
                }
                if (cmp == -1)
-                       pp = &(*pp)->rb_left;
+                       pp = &next->rb_left;
                else
-                       pp = &(*pp)->rb_right;
+                       pp = &next->rb_right;
        }
        *parent_p = parent;
        *pp_p = pp;
index 0162fb955b33abf18514cbfd482e72a0ebce6e48..467e44d7587dcf6dc9eeb3845b1fe069b6c8389e 100644 (file)
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        struct ip_tunnel *tunnel;
        struct erspanhdr *ershdr;
        const struct iphdr *iph;
-       __be32 session_id;
        __be32 index;
        int len;
 
@@ -275,8 +274,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        /* The original GRE header does not have key field,
         * Use ERSPAN 10-bit session ID as key.
         */
-       session_id = cpu_to_be32(ntohs(ershdr->session_id));
-       tpi->key = session_id;
+       tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
        index = ershdr->md.index;
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
                                  tpi->flags | TUNNEL_KEY,
@@ -733,7 +731,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
        if (skb_cow_head(skb, dev->needed_headroom))
                goto free_skb;
 
-       if (skb->len > dev->mtu) {
+       if (skb->len - dev->hard_header_len > dev->mtu) {
                pskb_trim(skb, dev->mtu);
                truncate = true;
        }
@@ -1223,6 +1221,7 @@ static int gre_tap_init(struct net_device *dev)
 {
        __gre_tunnel_init(dev);
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       netif_keep_dst(dev);
 
        return ip_tunnel_init(dev);
 }
@@ -1246,13 +1245,16 @@ static int erspan_tunnel_init(struct net_device *dev)
 
        tunnel->tun_hlen = 8;
        tunnel->parms.iph.protocol = IPPROTO_GRE;
-       t_hlen = tunnel->hlen + sizeof(struct iphdr) + sizeof(struct erspanhdr);
+       tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+                      sizeof(struct erspanhdr);
+       t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
        dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
        dev->mtu = ETH_DATA_LEN - t_hlen - 4;
        dev->features           |= GRE_FEATURES;
        dev->hw_features        |= GRE_FEATURES;
        dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE;
+       netif_keep_dst(dev);
 
        return ip_tunnel_init(dev);
 }
index fa2dc8f692c631f1ff7fe814c3ee27f0de2a41d8..57fc13c6ab2b7843a4fdb11680c82fc342f465c7 100644 (file)
@@ -311,9 +311,10 @@ drop:
 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
-       struct rtable *rt;
+       int (*edemux)(struct sk_buff *skb);
        struct net_device *dev = skb->dev;
-       void (*edemux)(struct sk_buff *skb);
+       struct rtable *rt;
+       int err;
 
        /* if ingress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
@@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 
                ipprot = rcu_dereference(inet_protos[protocol]);
                if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
-                       edemux(skb);
+                       err = edemux(skb);
+                       if (unlikely(err))
+                               goto drop_error;
                        /* must reload iph, skb->head might have changed */
                        iph = ip_hdr(skb);
                }
@@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
         *      how the packet travels inside Linux networking.
         */
        if (!skb_valid_dst(skb)) {
-               int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
-                                              iph->tos, dev);
-               if (unlikely(err)) {
-                       if (err == -EXDEV)
-                               __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
-                       goto drop;
-               }
+               err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+                                          iph->tos, dev);
+               if (unlikely(err))
+                       goto drop_error;
        }
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
@@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 drop:
        kfree_skb(skb);
        return NET_RX_DROP;
+
+drop_error:
+       if (err == -EXDEV)
+               __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
+       goto drop;
 }
 
 /*
index 5ed63d25095062d44dacfd291e227290d24ea0ed..89453cf62158fbdcb18bf28494e9b54ee143558f 100644 (file)
@@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        struct ip_tunnel_parm *parms = &tunnel->parms;
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *tdev;        /* Device to other host */
+       int pkt_len = skb->len;
        int err;
        int mtu;
 
@@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
 
        err = dst_output(tunnel->net, skb->sk, skb);
        if (net_xmit_eval(err) == 0)
-               err = skb->len;
+               err = pkt_len;
        iptunnel_xmit_stats(dev, err);
        return NETDEV_TX_OK;
 
index fb1ad22b5e292d5669c70b5640ad3207c353c6bb..cdd627355ed106ae8228ee4a995f5f3b4588a842 100644 (file)
@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
 
 static int ipip_err(struct sk_buff *skb, u32 info)
 {
-
-/* All the routers (except for Linux) return only
-   8 bytes of packet payload. It means, that precise relaying of
-   ICMP in the real Internet is absolutely infeasible.
- */
+       /* All the routers (except for Linux) return only
+        * 8 bytes of packet payload. It means, that precise relaying of
+        * ICMP in the real Internet is absolutely infeasible.
+        */
        struct net *net = dev_net(skb->dev);
        struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
        const struct iphdr *iph = (const struct iphdr *)skb->data;
-       struct ip_tunnel *t;
-       int err;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
+       struct ip_tunnel *t;
+       int err = 0;
+
+       switch (type) {
+       case ICMP_DEST_UNREACH:
+               switch (code) {
+               case ICMP_SR_FAILED:
+                       /* Impossible event. */
+                       goto out;
+               default:
+                       /* All others are translated to HOST_UNREACH.
+                        * rfc2003 contains "deep thoughts" about NET_UNREACH,
+                        * I believe they are just ether pollution. --ANK
+                        */
+                       break;
+               }
+               break;
+
+       case ICMP_TIME_EXCEEDED:
+               if (code != ICMP_EXC_TTL)
+                       goto out;
+               break;
+
+       case ICMP_REDIRECT:
+               break;
+
+       default:
+               goto out;
+       }
 
-       err = -ENOENT;
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                             iph->daddr, iph->saddr, 0);
-       if (!t)
+       if (!t) {
+               err = -ENOENT;
                goto out;
+       }
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, iph->protocol, 0);
-               err = 0;
+               ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
+                                iph->protocol, 0);
                goto out;
        }
 
        if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
-                             iph->protocol, 0);
-               err = 0;
+               ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
                goto out;
        }
 
-       if (t->parms.iph.daddr == 0)
+       if (t->parms.iph.daddr == 0) {
+               err = -ENOENT;
                goto out;
+       }
 
-       err = 0;
        if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
                goto out;
 
index 811689e523c310dc41b71f6ec9fcff3d2749e590..f75fc6b531152a4d1f4fb96052ad47f3e76c9a21 100644 (file)
@@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
        if (synproxy == NULL)
                return NF_ACCEPT;
 
-       if (nf_is_loopback_packet(skb))
+       if (nf_is_loopback_packet(skb) ||
+           ip_hdr(skb)->protocol != IPPROTO_TCP)
                return NF_ACCEPT;
 
        thoff = ip_hdrlen(skb);
index 94d4cd2d5ea4f4589783528d8e951d3365078bc6..3d9f1c2f81c58afb45a1445f6ed06a97203606a1 100644 (file)
@@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
 EXPORT_SYMBOL(rt_dst_alloc);
 
 /* called in rcu_read_lock() section */
-static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
-                               u8 tos, struct net_device *dev, int our)
+int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+                         u8 tos, struct net_device *dev,
+                         struct in_device *in_dev, u32 *itag)
 {
-       struct rtable *rth;
-       struct in_device *in_dev = __in_dev_get_rcu(dev);
-       unsigned int flags = RTCF_MULTICAST;
-       u32 itag = 0;
        int err;
 
        /* Primary sanity checks. */
-
        if (!in_dev)
                return -EINVAL;
 
        if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
            skb->protocol != htons(ETH_P_IP))
-               goto e_inval;
+               return -EINVAL;
 
        if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
-               goto e_inval;
+               return -EINVAL;
 
        if (ipv4_is_zeronet(saddr)) {
                if (!ipv4_is_local_multicast(daddr))
-                       goto e_inval;
+                       return -EINVAL;
        } else {
                err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
-                                         in_dev, &itag);
+                                         in_dev, itag);
                if (err < 0)
-                       goto e_err;
+                       return err;
        }
+       return 0;
+}
+
+/* called in rcu_read_lock() section */
+static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+                            u8 tos, struct net_device *dev, int our)
+{
+       struct in_device *in_dev = __in_dev_get_rcu(dev);
+       unsigned int flags = RTCF_MULTICAST;
+       struct rtable *rth;
+       u32 itag = 0;
+       int err;
+
+       err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
+       if (err)
+               return err;
+
        if (our)
                flags |= RTCF_LOCAL;
 
        rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
                           IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
        if (!rth)
-               goto e_nobufs;
+               return -ENOBUFS;
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
        rth->dst.tclassid = itag;
@@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        skb_dst_set(skb, &rth->dst);
        return 0;
-
-e_nobufs:
-       return -ENOBUFS;
-e_inval:
-       return -EINVAL;
-e_err:
-       return err;
 }
 
 
@@ -2507,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
        struct rtable *ort = (struct rtable *) dst_orig;
        struct rtable *rt;
 
-       rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
+       rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
        if (rt) {
                struct dst_entry *new = &rt->dst;
 
index b1bb1b3a108232d56aa82383422d68b5ff9da3ed..77cf32a80952fcf3ceff4ada946cc2d0df2411d9 100644 (file)
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        /* We throwed the options of the initial SYN away, so we hope
         * the ACK carries the same options again (see RFC1122 4.2.3.8)
         */
-       ireq->opt = tcp_v4_save_options(sock_net(sk), skb);
+       RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
 
        if (security_inet_conn_request(sk, skb, req)) {
                reqsk_free(req);
index c5d7656beeee29b3c92e1c8824dbf00d3fa32d28..7eec3383702bbab497a12095b55d255532ad5f60 100644 (file)
@@ -6196,7 +6196,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
                struct inet_request_sock *ireq = inet_rsk(req);
 
                kmemcheck_annotate_bitfield(ireq, flags);
-               ireq->opt = NULL;
+               ireq->ireq_opt = NULL;
 #if IS_ENABLED(CONFIG_IPV6)
                ireq->pktopts = NULL;
 #endif
index d9416b5162bc1bdd1acd34fcb4da21cb6b62d0ae..5b027c69cbc540d4e933189f9de5baab5472eadb 100644 (file)
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq->opt);
+                                           ireq_opt_deref(ireq));
                err = net_xmit_eval(err);
        }
 
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
  */
 static void tcp_v4_reqsk_destructor(struct request_sock *req)
 {
-       kfree(inet_rsk(req)->opt);
+       kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req,
                            struct sk_buff *skb)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
+       struct net *net = sock_net(sk_listener);
 
        sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
        sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
-       ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
+       RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
 }
 
 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        sk_daddr_set(newsk, ireq->ir_rmt_addr);
        sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newsk->sk_bound_dev_if = ireq->ir_iif;
-       newinet->inet_saddr           = ireq->ir_loc_addr;
-       inet_opt              = ireq->opt;
-       rcu_assign_pointer(newinet->inet_opt, inet_opt);
-       ireq->opt             = NULL;
+       newinet->inet_saddr   = ireq->ir_loc_addr;
+       inet_opt              = rcu_dereference(ireq->ireq_opt);
+       RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
        newinet->mc_index     = inet_iif(skb);
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
        *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-       if (*own_req)
+       if (likely(*own_req)) {
                tcp_move_syn(newtp, req);
-
+               ireq->ireq_opt = NULL;
+       } else {
+               newinet->inet_opt = NULL;
+       }
        return newsk;
 
 exit_overflow:
@@ -1416,6 +1419,7 @@ exit:
        tcp_listendrop(sk);
        return NULL;
 put_and_exit:
+       newinet->inet_opt = NULL;
        inet_csk_prepare_forced_close(newsk);
        tcp_done(newsk);
        goto exit;
@@ -1503,23 +1507,23 @@ csum_err:
 }
 EXPORT_SYMBOL(tcp_v4_do_rcv);
 
-void tcp_v4_early_demux(struct sk_buff *skb)
+int tcp_v4_early_demux(struct sk_buff *skb)
 {
        const struct iphdr *iph;
        const struct tcphdr *th;
        struct sock *sk;
 
        if (skb->pkt_type != PACKET_HOST)
-               return;
+               return 0;
 
        if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
-               return;
+               return 0;
 
        iph = ip_hdr(skb);
        th = tcp_hdr(skb);
 
        if (th->doff < sizeof(struct tcphdr) / 4)
-               return;
+               return 0;
 
        sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
                                       iph->saddr, th->source,
@@ -1538,6 +1542,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
                                skb_dst_set_noref(skb, dst);
                }
        }
+       return 0;
 }
 
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
index 1c839c99114cd22bbfbd181cf702acccb3aeb61b..ae60dd3faed0adc71731bc686f878afd4c628d32 100644 (file)
@@ -739,8 +739,10 @@ static void tcp_tsq_handler(struct sock *sk)
                struct tcp_sock *tp = tcp_sk(sk);
 
                if (tp->lost_out > tp->retrans_out &&
-                   tp->snd_cwnd > tcp_packets_in_flight(tp))
+                   tp->snd_cwnd > tcp_packets_in_flight(tp)) {
+                       tcp_mstamp_refresh(tp);
                        tcp_xmit_retransmit_queue(sk);
+               }
 
                tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
                               0, GFP_ATOMIC);
@@ -1806,40 +1808,6 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
        return !after(end_seq, tcp_wnd_end(tp));
 }
 
-/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
- * should be put on the wire right now.  If so, it returns the number of
- * packets allowed by the congestion window.
- */
-static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
-                                unsigned int cur_mss, int nonagle)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       unsigned int cwnd_quota;
-
-       tcp_init_tso_segs(skb, cur_mss);
-
-       if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
-               return 0;
-
-       cwnd_quota = tcp_cwnd_test(tp, skb);
-       if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
-               cwnd_quota = 0;
-
-       return cwnd_quota;
-}
-
-/* Test if sending is allowed right now. */
-bool tcp_may_send_now(struct sock *sk)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb = tcp_send_head(sk);
-
-       return skb &&
-               tcp_snd_test(sk, skb, tcp_current_mss(sk),
-                            (tcp_skb_is_last(sk, skb) ?
-                             tp->nonagle : TCP_NAGLE_PUSH));
-}
-
 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
  * which is put after SKB on the list.  It is very much like
  * tcp_fragment() except that it may make several kinds of assumptions
@@ -2271,6 +2239,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 
        sent_pkts = 0;
 
+       tcp_mstamp_refresh(tp);
        if (!push_one) {
                /* Do MTU probing. */
                result = tcp_mtu_probe(sk);
@@ -2282,7 +2251,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
        }
 
        max_segs = tcp_tso_segs(sk, mss_now);
-       tcp_mstamp_refresh(tp);
        while ((skb = tcp_send_head(sk))) {
                unsigned int limit;
 
@@ -2875,8 +2843,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
                nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
                err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
                             -ENOBUFS;
-               if (!err)
+               if (!err) {
                        skb->skb_mstamp = tp->tcp_mstamp;
+                       tcp_rate_skb_sent(sk, skb);
+               }
        } else {
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
@@ -3423,6 +3393,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
                goto done;
        }
 
+       /* data was not sent, this is our new send_head */
+       sk->sk_send_head = syn_data;
+       tp->packets_out -= tcp_skb_pcount(syn_data);
+
 fallback:
        /* Send a regular SYN with Fast Open cookie request option */
        if (fo->cookie.len > 0)
@@ -3475,6 +3449,11 @@ int tcp_connect(struct sock *sk)
         */
        tp->snd_nxt = tp->write_seq;
        tp->pushed_seq = tp->write_seq;
+       buff = tcp_send_head(sk);
+       if (unlikely(buff)) {
+               tp->snd_nxt     = TCP_SKB_CB(buff)->seq;
+               tp->pushed_seq  = TCP_SKB_CB(buff)->seq;
+       }
        TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
 
        /* Timer for repeating the SYN until an answer. */
index ef29df8648e4d388547269fe6f972e8ab473419e..ebfbccae62fde187ec5863670c03cd5b5c96258b 100644 (file)
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
                }
        }
 
-       /* Initial allocation may have already happened via setsockopt */
-       if (!rcu_access_pointer(sk->sk_reuseport_cb))
-               return reuseport_alloc(sk);
-       return 0;
+       return reuseport_alloc(sk);
 }
 
 /**
@@ -1061,7 +1058,7 @@ back_from_confirm:
                /* ... which is an evident application bug. --ANK */
                release_sock(sk);
 
-               net_dbg_ratelimited("cork app bug 2\n");
+               net_dbg_ratelimited("socket already corked\n");
                err = -EINVAL;
                goto out;
        }
@@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
        if (unlikely(!up->pending)) {
                release_sock(sk);
 
-               net_dbg_ratelimited("udp cork app bug 3\n");
+               net_dbg_ratelimited("cork failed\n");
                return -EINVAL;
        }
 
@@ -2221,9 +2218,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
        return NULL;
 }
 
-void udp_v4_early_demux(struct sk_buff *skb)
+int udp_v4_early_demux(struct sk_buff *skb)
 {
        struct net *net = dev_net(skb->dev);
+       struct in_device *in_dev = NULL;
        const struct iphdr *iph;
        const struct udphdr *uh;
        struct sock *sk = NULL;
@@ -2234,25 +2232,21 @@ void udp_v4_early_demux(struct sk_buff *skb)
 
        /* validate the packet */
        if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
-               return;
+               return 0;
 
        iph = ip_hdr(skb);
        uh = udp_hdr(skb);
 
-       if (skb->pkt_type == PACKET_BROADCAST ||
-           skb->pkt_type == PACKET_MULTICAST) {
-               struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+       if (skb->pkt_type == PACKET_MULTICAST) {
+               in_dev = __in_dev_get_rcu(skb->dev);
 
                if (!in_dev)
-                       return;
+                       return 0;
 
-               /* we are supposed to accept bcast packets */
-               if (skb->pkt_type == PACKET_MULTICAST) {
-                       ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
-                                              iph->protocol);
-                       if (!ours)
-                               return;
-               }
+               ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+                                      iph->protocol);
+               if (!ours)
+                       return 0;
 
                sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
                                                   uh->source, iph->saddr,
@@ -2263,7 +2257,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
        }
 
        if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
-               return;
+               return 0;
 
        skb->sk = sk;
        skb->destructor = sock_efree;
@@ -2272,12 +2266,23 @@ void udp_v4_early_demux(struct sk_buff *skb)
        if (dst)
                dst = dst_check(dst, 0);
        if (dst) {
+               u32 itag = 0;
+
                /* set noref for now.
                 * any place which wants to hold dst has to call
                 * dst_hold_safe()
                 */
                skb_dst_set_noref(skb, dst);
+
+               /* for unconnected multicast sockets we need to validate
+                * the source on each packet
+                */
+               if (!inet_sk(sk)->inet_daddr && in_dev)
+                       return ip_mc_validate_source(skb, iph->daddr,
+                                                    iph->saddr, iph->tos,
+                                                    skb->dev, in_dev, &itag);
        }
+       return 0;
 }
 
 int udp_rcv(struct sk_buff *skb)
index 97658bfc1b58ab8a19026811e3aa917e598e554b..e360d55be5554d1bee56d3f493752ba9ae2c8015 100644 (file)
@@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
                 * will be using a length value equal to only one MSS sized
                 * segment instead of the entire frame.
                 */
-               if (gso_partial) {
+               if (gso_partial && skb_is_gso(skb)) {
                        uh->len = htons(skb_shinfo(skb)->gso_size +
                                        SKB_GSO_CB(skb)->data_offset +
                                        skb->head - (unsigned char *)uh);
index c2e2a78787ec990f4dac2040fb1e26dc150860e2..4a96ebbf8eda5f59a6ff88e836d666a404d2bf0d 100644 (file)
@@ -1399,10 +1399,18 @@ static inline int ipv6_saddr_preferred(int type)
        return 0;
 }
 
-static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
+static bool ipv6_use_optimistic_addr(struct net *net,
+                                    struct inet6_dev *idev)
 {
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
-       return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
+       if (!idev)
+               return false;
+       if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+               return false;
+       if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
+               return false;
+
+       return true;
 #else
        return false;
 #endif
@@ -1472,7 +1480,7 @@ static int ipv6_get_saddr_eval(struct net *net,
                /* Rule 3: Avoid deprecated and optimistic addresses */
                u8 avoid = IFA_F_DEPRECATED;
 
-               if (!ipv6_use_optimistic_addr(score->ifa->idev))
+               if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
                        avoid |= IFA_F_OPTIMISTIC;
                ret = ipv6_saddr_preferred(score->addr_type) ||
                      !(score->ifa->flags & avoid);
@@ -2460,7 +2468,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
                int max_addresses = in6_dev->cnf.max_addresses;
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
-               if (in6_dev->cnf.optimistic_dad &&
+               if ((net->ipv6.devconf_all->optimistic_dad ||
+                    in6_dev->cnf.optimistic_dad) &&
                    !net->ipv6.devconf_all->forwarding && sllao)
                        addr_flags |= IFA_F_OPTIMISTIC;
 #endif
@@ -3051,7 +3060,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev,
        u32 addr_flags = flags | IFA_F_PERMANENT;
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
-       if (idev->cnf.optimistic_dad &&
+       if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
+            idev->cnf.optimistic_dad) &&
            !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
                addr_flags |= IFA_F_OPTIMISTIC;
 #endif
@@ -3810,7 +3820,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
                goto out;
 
        if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
-           idev->cnf.accept_dad < 1 ||
+           (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
+            idev->cnf.accept_dad < 1) ||
            !(ifp->flags&IFA_F_TENTATIVE) ||
            ifp->flags & IFA_F_NODAD) {
                bump_id = ifp->flags & IFA_F_TENTATIVE;
@@ -3841,7 +3852,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
         */
        if (ifp->flags & IFA_F_OPTIMISTIC) {
                ip6_ins_rt(ifp->rt);
-               if (ipv6_use_optimistic_addr(idev)) {
+               if (ipv6_use_optimistic_addr(dev_net(dev), idev)) {
                        /* Because optimistic nodes can use this address,
                         * notify listeners. If DAD fails, RTM_DELADDR is sent.
                         */
@@ -3897,7 +3908,9 @@ static void addrconf_dad_work(struct work_struct *w)
                action = DAD_ABORT;
                ifp->state = INET6_IFADDR_STATE_POSTDAD;
 
-               if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
+               if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
+                    idev->cnf.accept_dad > 1) &&
+                   !idev->cnf.disable_ipv6 &&
                    !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
                        struct in6_addr addr;
 
@@ -4940,9 +4953,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
 
        /* Don't send DELADDR notification for TENTATIVE address,
         * since NEWADDR notification is sent only after removing
-        * TENTATIVE flag.
+        * TENTATIVE flag, if DAD has not failed.
         */
-       if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
+       if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
+           event == RTM_DELADDR)
                return;
 
        skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
index 8081bafe441b83f60f414114bfdc3529d6ea0a09..15535ee327c5780e80feb050c2ab4e0d1cc3e99c 100644 (file)
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
        }
        opt_space->dst1opt = fopt->dst1opt;
        opt_space->opt_flen = fopt->opt_flen;
+       opt_space->tot_len = fopt->tot_len;
        return opt_space;
 }
 EXPORT_SYMBOL_GPL(fl6_merge_options);
index b7a72d40933441f835708f55e2d8af371661a5fb..59c121b932ac5140380e4592568e455caa64287e 100644 (file)
@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        case ICMPV6_DEST_UNREACH:
                net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
                                    t->parms.name);
-               break;
+               if (code != ICMPV6_PORT_UNREACH)
+                       break;
+               return;
        case ICMPV6_TIME_EXCEED:
                if (code == ICMPV6_EXC_HOPLIMIT) {
                        net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
                                            t->parms.name);
+                       break;
                }
-               break;
+               return;
        case ICMPV6_PARAMPROB:
                teli = 0;
                if (code == ICMPV6_HDR_FIELD)
@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                        net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
                                            t->parms.name);
                }
-               break;
+               return;
        case ICMPV6_PKT_TOOBIG:
                mtu = be32_to_cpu(info) - offset - t->tun_hlen;
                if (t->dev->type == ARPHRD_ETHER)
@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (mtu < IPV6_MIN_MTU)
                        mtu = IPV6_MIN_MTU;
                t->dev->mtu = mtu;
-               break;
+               return;
        }
 
        if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                               __u32 *pmtu, __be16 proto)
 {
        struct ip6_tnl *tunnel = netdev_priv(dev);
-       __be16 protocol = (dev->type == ARPHRD_ETHER) ?
-                         htons(ETH_P_TEB) : proto;
+       struct dst_entry *dst = skb_dst(skb);
+       __be16 protocol;
 
        if (dev->type == ARPHRD_ETHER)
                IPCB(skb)->flags = 0;
@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                tunnel->o_seqno++;
 
        /* Push GRE header. */
+       protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
        gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
                         protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
+       /* TooBig packet may have updated dst->dev's mtu */
+       if (dst && dst_mtu(dst) > dst->dev->mtu)
+               dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+
        return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
                            NEXTHDR_GRE);
 }
@@ -940,24 +948,25 @@ done:
 }
 
 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
-                       unsigned short type,
-                       const void *daddr, const void *saddr, unsigned int len)
+                        unsigned short type, const void *daddr,
+                        const void *saddr, unsigned int len)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       struct ipv6hdr *ipv6h = skb_push(skb, t->hlen);
-       __be16 *p = (__be16 *)(ipv6h+1);
+       struct ipv6hdr *ipv6h;
+       __be16 *p;
 
-       ip6_flow_hdr(ipv6h, 0,
-                    ip6_make_flowlabel(dev_net(dev), skb,
-                                       t->fl.u.ip6.flowlabel, true,
-                                       &t->fl.u.ip6));
+       ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
+       ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
+                                                 t->fl.u.ip6.flowlabel,
+                                                 true, &t->fl.u.ip6));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = NEXTHDR_GRE;
        ipv6h->saddr = t->parms.laddr;
        ipv6h->daddr = t->parms.raddr;
 
-       p[0]            = t->parms.o_flags;
-       p[1]            = htons(type);
+       p = (__be16 *)(ipv6h + 1);
+       p[0] = t->parms.o_flags;
+       p[1] = htons(type);
 
        /*
         *      Set the source hardware address.
@@ -1310,6 +1319,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
        dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       netif_keep_dst(dev);
 }
 
 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
index cdb3728faca7746d91e2430f6024f060a82b24fd..4a87f9428ca519b475f8feaceaaa3a225bcfe6d2 100644 (file)
@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
        for (skb = segs; skb; skb = skb->next) {
                ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
-               if (gso_partial)
+               if (gso_partial && skb_is_gso(skb))
                        payload_len = skb_shinfo(skb)->gso_size +
                                      SKB_GSO_CB(skb)->data_offset +
                                      skb->head - (unsigned char *)(ipv6h + 1);
index 43ca864327c73015f1724879d7ee8268a0de513b..5110a418cc4d0c1040506394460cb482698d8c15 100644 (file)
@@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
                if (WARN_ON(v6_cork->opt))
                        return -EINVAL;
 
-               v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
+               v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
                if (unlikely(!v6_cork->opt))
                        return -ENOBUFS;
 
-               v6_cork->opt->tot_len = opt->tot_len;
+               v6_cork->opt->tot_len = sizeof(*opt);
                v6_cork->opt->opt_flen = opt->opt_flen;
                v6_cork->opt->opt_nflen = opt->opt_nflen;
 
index ae73164559d5c4d7f2650ae63c56d76dc93b165c..a1c24443cd9e01de9c6e2d5d68c0f8426e25ceec 100644 (file)
@@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        struct dst_entry *dst = NULL, *ndst = NULL;
        struct net_device *tdev;
        int mtu;
+       unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
        unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
        unsigned int max_headroom = psh_hlen;
        bool use_cache = false;
@@ -1124,7 +1125,7 @@ route_lookup:
                                     t->parms.name);
                goto tx_err_dst_release;
        }
-       mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
+       mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
        if (encap_limit >= 0) {
                max_headroom += 8;
                mtu -= 8;
@@ -1133,7 +1134,7 @@ route_lookup:
                mtu = IPV6_MIN_MTU;
        if (skb_dst(skb) && !t->parms.collect_md)
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
-       if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
+       if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
                err = -EMSGSIZE;
                goto tx_err_dst_release;
@@ -2259,6 +2260,9 @@ static int __init ip6_tunnel_init(void)
 {
        int  err;
 
+       if (!ipv6_mod_enabled())
+               return -EOPNOTSUPP;
+
        err = register_pernet_device(&ip6_tnl_net_ops);
        if (err < 0)
                goto out_pernet;
index 79444a4bfd6d245b66a7edcefe2b5b32801bf2c0..bcdc2d557de13914b5046dece683614eb5f8f8c8 100644 (file)
@@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *tdev;
        struct xfrm_state *x;
+       int pkt_len = skb->len;
        int err = -1;
        int mtu;
 
@@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
 
                u64_stats_update_begin(&tstats->syncp);
-               tstats->tx_bytes += skb->len;
+               tstats->tx_bytes += pkt_len;
                tstats->tx_packets++;
                u64_stats_update_end(&tstats->syncp);
        } else {
index a5cd43d75393db2152fa5a4edb0b505d20fc2f45..437af8c95277f7a3364f2d0492455a172cc22ab8 100644 (file)
@@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
        nexthdr = ipv6_hdr(skb)->nexthdr;
        thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
                                 &frag_off);
-       if (thoff < 0)
+       if (thoff < 0 || nexthdr != IPPROTO_TCP)
                return NF_ACCEPT;
 
        th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
index 26cc9f483b6d282f0a665bfc4c2c206da7981921..a96d5b385d8fa25bab416d3f6bc836e743b3ca99 100644 (file)
@@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
        struct dst_entry *new = NULL;
 
        rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
-                      DST_OBSOLETE_NONE, 0);
+                      DST_OBSOLETE_DEAD, 0);
        if (rt) {
                rt6_info_init(rt);
 
index e2ecfb137297b931f05eb4d511f8e85cc7633336..40d7234c27b991e54f5cfbacb5d7081e8277ae56 100644 (file)
@@ -1015,6 +1015,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
                 */
                offset = skb_transport_offset(skb);
                skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+               csum = skb->csum;
 
                skb->ip_summed = CHECKSUM_NONE;
 
index ee485df73ccdd0f154579741462c19ab68574b4f..02d61101b108dcc6b8360e3184f9432cf2192955 100644 (file)
@@ -1314,6 +1314,9 @@ again:
 
                        hlist_del_init(&session->hlist);
 
+                       if (test_and_set_bit(0, &session->dead))
+                               goto again;
+
                        if (session->ref != NULL)
                                (*session->ref)(session);
 
@@ -1685,14 +1688,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
 
 /* This function is used by the netlink TUNNEL_DELETE command.
  */
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
-       l2tp_tunnel_inc_refcount(tunnel);
-       if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
-               l2tp_tunnel_dec_refcount(tunnel);
-               return 1;
+       if (!test_and_set_bit(0, &tunnel->dead)) {
+               l2tp_tunnel_inc_refcount(tunnel);
+               queue_work(l2tp_wq, &tunnel->del_work);
        }
-       return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
 
@@ -1750,6 +1751,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
  */
 int l2tp_session_delete(struct l2tp_session *session)
 {
+       if (test_and_set_bit(0, &session->dead))
+               return 0;
+
        if (session->ref)
                (*session->ref)(session);
        __l2tp_session_unhash(session);
index a305e0c5925a2ccb202383642a9f7220282d7489..67c79d9b5c6cb418221f1c06df62c48117405b4c 100644 (file)
@@ -76,6 +76,7 @@ struct l2tp_session_cfg {
 struct l2tp_session {
        int                     magic;          /* should be
                                                 * L2TP_SESSION_MAGIC */
+       long                    dead;
 
        struct l2tp_tunnel      *tunnel;        /* back pointer to tunnel
                                                 * context */
@@ -160,6 +161,9 @@ struct l2tp_tunnel_cfg {
 
 struct l2tp_tunnel {
        int                     magic;          /* Should be L2TP_TUNNEL_MAGIC */
+
+       unsigned long           dead;
+
        struct rcu_head rcu;
        rwlock_t                hlist_lock;     /* protect session_hlist */
        bool                    acpt_newsess;   /* Indicates whether this
@@ -254,7 +258,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
                       u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
                       struct l2tp_tunnel **tunnelp);
 void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
 struct l2tp_session *l2tp_session_create(int priv_size,
                                         struct l2tp_tunnel *tunnel,
                                         u32 session_id, u32 peer_session_id,
index 87da9ef61860886d9bf5cc668524ef8b48a60c31..014a7bc2a872514cf4422302a92b692ecda31c27 100644 (file)
@@ -44,7 +44,6 @@ struct l2tp_eth {
        struct net_device       *dev;
        struct sock             *tunnel_sock;
        struct l2tp_session     *session;
-       struct list_head        list;
        atomic_long_t           tx_bytes;
        atomic_long_t           tx_packets;
        atomic_long_t           tx_dropped;
@@ -58,17 +57,6 @@ struct l2tp_eth_sess {
        struct net_device       *dev;
 };
 
-/* per-net private data for this module */
-static unsigned int l2tp_eth_net_id;
-struct l2tp_eth_net {
-       struct list_head l2tp_eth_dev_list;
-       spinlock_t l2tp_eth_lock;
-};
-
-static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
-{
-       return net_generic(net, l2tp_eth_net_id);
-}
 
 static int l2tp_eth_dev_init(struct net_device *dev)
 {
@@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
 
 static void l2tp_eth_dev_uninit(struct net_device *dev)
 {
-       struct l2tp_eth *priv = netdev_priv(dev);
-       struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
-
-       spin_lock(&pn->l2tp_eth_lock);
-       list_del_init(&priv->list);
-       spin_unlock(&pn->l2tp_eth_lock);
        dev_put(dev);
 }
 
@@ -273,7 +255,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
        struct l2tp_eth *priv;
        struct l2tp_eth_sess *spriv;
        int rc;
-       struct l2tp_eth_net *pn;
 
        if (cfg->ifname) {
                strlcpy(name, cfg->ifname, IFNAMSIZ);
@@ -305,7 +286,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
        priv = netdev_priv(dev);
        priv->dev = dev;
        priv->session = session;
-       INIT_LIST_HEAD(&priv->list);
 
        priv->tunnel_sock = tunnel->sock;
        session->recv_skb = l2tp_eth_dev_recv;
@@ -326,10 +306,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
        strlcpy(session->ifname, dev->name, IFNAMSIZ);
 
        dev_hold(dev);
-       pn = l2tp_eth_pernet(dev_net(dev));
-       spin_lock(&pn->l2tp_eth_lock);
-       list_add(&priv->list, &pn->l2tp_eth_dev_list);
-       spin_unlock(&pn->l2tp_eth_lock);
 
        return 0;
 
@@ -342,22 +318,6 @@ out:
        return rc;
 }
 
-static __net_init int l2tp_eth_init_net(struct net *net)
-{
-       struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
-
-       INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
-       spin_lock_init(&pn->l2tp_eth_lock);
-
-       return 0;
-}
-
-static struct pernet_operations l2tp_eth_net_ops = {
-       .init = l2tp_eth_init_net,
-       .id   = &l2tp_eth_net_id,
-       .size = sizeof(struct l2tp_eth_net),
-};
-
 
 static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
        .session_create = l2tp_eth_create,
@@ -371,25 +331,18 @@ static int __init l2tp_eth_init(void)
 
        err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
        if (err)
-               goto out;
-
-       err = register_pernet_device(&l2tp_eth_net_ops);
-       if (err)
-               goto out_unreg;
+               goto err;
 
        pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
 
        return 0;
 
-out_unreg:
-       l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
-out:
+err:
        return err;
 }
 
 static void __exit l2tp_eth_exit(void)
 {
-       unregister_pernet_device(&l2tp_eth_net_ops);
        l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
 }
 
index 50e3ee9a9d612e6c033ef761d8dfeb1e6f209541..f50452b919d5ece9d70e2c25634bc8fef9962e6b 100644 (file)
@@ -437,11 +437,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
 
        BUG_ON(session->magic != L2TP_SESSION_MAGIC);
 
-       if (sock) {
+       if (sock)
                inet_shutdown(sock, SEND_SHUTDOWN);
-               /* Don't let the session go away before our socket does */
-               l2tp_session_inc_refcount(session);
-       }
+
+       /* Don't let the session go away before our socket does */
+       l2tp_session_inc_refcount(session);
 }
 
 /* Really kill the session socket. (Called from sock_put() if
@@ -988,6 +988,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
                 session->name, cmd, arg);
 
        sk = ps->sock;
+       if (!sk)
+               return -EBADR;
+
        sock_hold(sk);
 
        switch (cmd) {
index a354f1939e49b83a5c04bcebd54952d33e8dae8d..fb15d3b97cb214078f1533f880350fe280b57c69 100644 (file)
@@ -2727,12 +2727,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
        if (!ieee80211_sdata_running(sdata))
                return -ENETDOWN;
 
-       if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
-               ret = drv_set_bitrate_mask(local, sdata, mask);
-               if (ret)
-                       return ret;
-       }
-
        /*
         * If active validate the setting and reject it if it doesn't leave
         * at least one basic rate usable, since we really have to be able
@@ -2748,6 +2742,12 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
                        return -EINVAL;
        }
 
+       if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
+               ret = drv_set_bitrate_mask(local, sdata, mask);
+               if (ret)
+                       return ret;
+       }
+
        for (i = 0; i < NUM_NL80211_BANDS; i++) {
                struct ieee80211_supported_band *sband = wiphy->bands[i];
                int j;
index a98fc2b5e0dc94664a19ba319099385276212c44..938049395f9073169035ded5f506cb0c192c41f7 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright 2015      Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -19,6 +19,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <net/mac80211.h>
+#include <crypto/algapi.h>
 #include <asm/unaligned.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
        ieee80211_key_free_common(key);
 }
 
+static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
+                                   struct ieee80211_key *old,
+                                   struct ieee80211_key *new)
+{
+       u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
+       u8 *tk_old, *tk_new;
+
+       if (!old || new->conf.keylen != old->conf.keylen)
+               return false;
+
+       tk_old = old->conf.key;
+       tk_new = new->conf.key;
+
+       /*
+        * In station mode, don't compare the TX MIC key, as it's never used
+        * and offloaded rekeying may not care to send it to the host. This
+        * is the case in iwlwifi, for example.
+        */
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
+           new->conf.keylen == WLAN_KEY_LEN_TKIP &&
+           !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+               memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
+               memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
+               memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+               memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+               tk_old = tkip_old;
+               tk_new = tkip_new;
+       }
+
+       return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
+}
+
 int ieee80211_key_link(struct ieee80211_key *key,
                       struct ieee80211_sub_if_data *sdata,
                       struct sta_info *sta)
@@ -620,9 +654,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
 
        pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
        idx = key->conf.keyidx;
-       key->local = sdata->local;
-       key->sdata = sdata;
-       key->sta = sta;
 
        mutex_lock(&sdata->local->key_mtx);
 
@@ -633,6 +664,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
        else
                old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
 
+       /*
+        * Silently accept key re-installation without really installing the
+        * new version of the key to avoid nonce reuse or replay issues.
+        */
+       if (ieee80211_key_identical(sdata, old_key, key)) {
+               ieee80211_key_free_unused(key);
+               ret = 0;
+               goto out;
+       }
+
+       key->local = sdata->local;
+       key->sdata = sdata;
+       key->sta = sta;
+
        increment_tailroom_need_count(sdata);
 
        ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -648,6 +693,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
                ret = 0;
        }
 
+ out:
        mutex_unlock(&sdata->local->key_mtx);
 
        return ret;
index af3d636534efb8b81fc47547f2fb027f61f5be34..d30f7bd741d0610cd261b4514baf0b1cbf20c607 100644 (file)
@@ -286,6 +286,7 @@ struct ncsi_dev_priv {
        struct work_struct  work;            /* For channel management     */
        struct packet_type  ptype;           /* NCSI packet Rx handler     */
        struct list_head    node;            /* Form NCSI device list      */
+#define NCSI_MAX_VLAN_VIDS     15
        struct list_head    vlan_vids;       /* List of active VLAN IDs */
 };
 
index 6898e7229285a6720115a37d58673f569d4a9bff..f135938bf781eb9c135648599ce06e535c780390 100644 (file)
@@ -187,7 +187,7 @@ static struct ncsi_aen_handler {
 } ncsi_aen_handlers[] = {
        { NCSI_PKT_AEN_LSC,    12, ncsi_aen_handler_lsc    },
        { NCSI_PKT_AEN_CR,      4, ncsi_aen_handler_cr     },
-       { NCSI_PKT_AEN_HNCDSC,  4, ncsi_aen_handler_hncdsc }
+       { NCSI_PKT_AEN_HNCDSC,  8, ncsi_aen_handler_hncdsc }
 };
 
 int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
index 3fd3c39e627836117f250fc7d5037415491ee6f5..28c42b22b7489efa1d7fffd51eee2a146ecad636 100644 (file)
@@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data)
        struct ncsi_channel *nc = (struct ncsi_channel *)data;
        struct ncsi_package *np = nc->package;
        struct ncsi_dev_priv *ndp = np->ndp;
+       struct ncsi_channel_mode *ncm;
        struct ncsi_cmd_arg nca;
        bool enabled, chained;
        unsigned int monitor_state;
@@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data)
        monitor_state = nc->monitor.state;
        spin_unlock_irqrestore(&nc->lock, flags);
 
-       if (!enabled || chained)
+       if (!enabled || chained) {
+               ncsi_stop_channel_monitor(nc);
                return;
+       }
        if (state != NCSI_CHANNEL_INACTIVE &&
-           state != NCSI_CHANNEL_ACTIVE)
+           state != NCSI_CHANNEL_ACTIVE) {
+               ncsi_stop_channel_monitor(nc);
                return;
+       }
 
        switch (monitor_state) {
        case NCSI_CHANNEL_MONITOR_START:
@@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data)
                nca.type = NCSI_PKT_CMD_GLS;
                nca.req_flags = 0;
                ret = ncsi_xmit_cmd(&nca);
-               if (ret) {
+               if (ret)
                        netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
                                   ret);
-                       return;
-               }
-
                break;
        case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
                break;
        default:
-               if (!(ndp->flags & NCSI_DEV_HWA) &&
-                   state == NCSI_CHANNEL_ACTIVE) {
+               if (!(ndp->flags & NCSI_DEV_HWA)) {
                        ncsi_report_link(ndp, true);
                        ndp->flags |= NCSI_DEV_RESHUFFLE;
                }
 
+               ncsi_stop_channel_monitor(nc);
+
+               ncm = &nc->modes[NCSI_MODE_LINK];
                spin_lock_irqsave(&nc->lock, flags);
                nc->state = NCSI_CHANNEL_INVISIBLE;
+               ncm->data[2] &= ~0x1;
                spin_unlock_irqrestore(&nc->lock, flags);
 
                spin_lock_irqsave(&ndp->lock, flags);
-               nc->state = NCSI_CHANNEL_INACTIVE;
+               nc->state = NCSI_CHANNEL_ACTIVE;
                list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                spin_unlock_irqrestore(&ndp->lock, flags);
                ncsi_process_next_channel(ndp);
@@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
        if (index < 0) {
                netdev_err(ndp->ndev.dev,
                           "Failed to add new VLAN tag, error %d\n", index);
+               if (index == -ENOSPC)
+                       netdev_err(ndp->ndev.dev,
+                                  "Channel %u already has all VLAN filters set\n",
+                                  nc->id);
                return -1;
        }
 
@@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
        struct ncsi_package *np;
        struct ncsi_channel *nc;
        unsigned int cap;
+       bool has_channel = false;
 
        /* The hardware arbitration is disabled if any one channel
         * doesn't support explicitly.
         */
        NCSI_FOR_EACH_PACKAGE(ndp, np) {
                NCSI_FOR_EACH_CHANNEL(np, nc) {
+                       has_channel = true;
+
                        cap = nc->caps[NCSI_CAP_GENERIC].cap;
                        if (!(cap & NCSI_CAP_GENERIC_HWA) ||
                            (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
@@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
                }
        }
 
-       ndp->flags |= NCSI_DEV_HWA;
-       return true;
+       if (has_channel) {
+               ndp->flags |= NCSI_DEV_HWA;
+               return true;
+       }
+
+       ndp->flags &= ~NCSI_DEV_HWA;
+       return false;
 }
 
 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
@@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
 
 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
-       struct ncsi_channel_filter *ncf;
        struct ncsi_dev_priv *ndp;
        unsigned int n_vids = 0;
        struct vlan_vid *vlan;
@@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        }
 
        ndp = TO_NCSI_DEV_PRIV(nd);
-       ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
 
        /* Add the VLAN id to our internal list */
        list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
@@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
                        return 0;
                }
        }
-
-       if (n_vids >= ncf->total) {
-               netdev_info(dev,
-                           "NCSI Channel supports up to %u VLAN tags but %u are already set\n",
-                           ncf->total, n_vids);
-               return -EINVAL;
+       if (n_vids >= NCSI_MAX_VLAN_VIDS) {
+               netdev_warn(dev,
+                           "tried to add vlan id %u but NCSI max already registered (%u)\n",
+                           vid, NCSI_MAX_VLAN_VIDS);
+               return -ENOSPC;
        }
 
        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
index 265b9a892d4171bc74572891edb631dabac98c41..927dad4759d1c9b23e4037b218152c267bafa7d9 100644 (file)
@@ -959,7 +959,7 @@ static struct ncsi_rsp_handler {
        { NCSI_PKT_RSP_EGMF,    4, ncsi_rsp_handler_egmf    },
        { NCSI_PKT_RSP_DGMF,    4, ncsi_rsp_handler_dgmf    },
        { NCSI_PKT_RSP_SNFC,    4, ncsi_rsp_handler_snfc    },
-       { NCSI_PKT_RSP_GVI,    36, ncsi_rsp_handler_gvi     },
+       { NCSI_PKT_RSP_GVI,    40, ncsi_rsp_handler_gvi     },
        { NCSI_PKT_RSP_GC,     32, ncsi_rsp_handler_gc      },
        { NCSI_PKT_RSP_GP,     -1, ncsi_rsp_handler_gp      },
        { NCSI_PKT_RSP_GCPS,  172, ncsi_rsp_handler_gcps    },
index e495b5e484b11c03c26674d999e3dc31422efab9..cf84f7b37cd9dfb828892e23b0aa2603827b6427 100644 (file)
@@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
              from->family == to->family))
                return -IPSET_ERR_TYPE_MISMATCH;
 
-       if (from->ref_netlink || to->ref_netlink)
+       write_lock_bh(&ip_set_ref_lock);
+
+       if (from->ref_netlink || to->ref_netlink) {
+               write_unlock_bh(&ip_set_ref_lock);
                return -EBUSY;
+       }
 
        strncpy(from_name, from->name, IPSET_MAXNAMELEN);
        strncpy(from->name, to->name, IPSET_MAXNAMELEN);
        strncpy(to->name, from_name, IPSET_MAXNAMELEN);
 
-       write_lock_bh(&ip_set_ref_lock);
        swap(from->ref, to->ref);
        ip_set(inst, from_id) = to;
        ip_set(inst, to_id) = from;
@@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = {
 static int __init
 ip_set_init(void)
 {
-       int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+       int ret = register_pernet_subsys(&ip_set_net_ops);
+
+       if (ret) {
+               pr_err("ip_set: cannot register pernet_subsys.\n");
+               return ret;
+       }
 
+       ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
        if (ret != 0) {
                pr_err("ip_set: cannot register with nfnetlink.\n");
+               unregister_pernet_subsys(&ip_set_net_ops);
                return ret;
        }
+
        ret = nf_register_sockopt(&so_set);
        if (ret != 0) {
                pr_err("SO_SET registry failed: %d\n", ret);
                nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+               unregister_pernet_subsys(&ip_set_net_ops);
                return ret;
        }
-       ret = register_pernet_subsys(&ip_set_net_ops);
-       if (ret) {
-               pr_err("ip_set: cannot register pernet_subsys.\n");
-               nf_unregister_sockopt(&so_set);
-               nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-               return ret;
-       }
+
        pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
        return 0;
 }
@@ -2098,9 +2104,10 @@ ip_set_init(void)
 static void __exit
 ip_set_fini(void)
 {
-       unregister_pernet_subsys(&ip_set_net_ops);
        nf_unregister_sockopt(&so_set);
        nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+
+       unregister_pernet_subsys(&ip_set_net_ops);
        pr_debug("these are the famous last words\n");
 }
 
index f236c0bc7b3f3e171e4303dd7c5379f34011812f..51063d9ed0f75d53be7a59738bf859570cd67d9b 100644 (file)
@@ -1041,12 +1041,24 @@ out:
 static int
 mtype_head(struct ip_set *set, struct sk_buff *skb)
 {
-       const struct htype *h = set->data;
+       struct htype *h = set->data;
        const struct htable *t;
        struct nlattr *nested;
        size_t memsize;
        u8 htable_bits;
 
+       /* If any members have expired, set->elements will be wrong
+        * mytype_expire function will update it with the right count.
+        * we do not hold set->lock here, so grab it first.
+        * set->elements can still be incorrect in the case of a huge set,
+        * because elements might time out during the listing.
+        */
+       if (SET_WITH_TIMEOUT(set)) {
+               spin_lock_bh(&set->lock);
+               mtype_expire(set, h);
+               spin_unlock_bh(&set->lock);
+       }
+
        rcu_read_lock_bh();
        t = rcu_dereference_bh_nfnl(h->table);
        memsize = mtype_ahash_memsize(h, t) + set->ext_size;
index 20bfbd315f61822e53e90273356686eb9f1d3648..613eb212cb48896f1c5a45d4f4dfdd2d6a245c29 100644 (file)
@@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ret;
 
        ip &= ip_set_hostmask(h->netmask);
+       e.ip = htonl(ip);
+       if (e.ip == 0)
+               return -IPSET_ERR_HASH_ELEM;
 
-       if (adt == IPSET_TEST) {
-               e.ip = htonl(ip);
-               if (e.ip == 0)
-                       return -IPSET_ERR_HASH_ELEM;
+       if (adt == IPSET_TEST)
                return adtfn(set, &e, &ext, &ext, flags);
-       }
 
        ip_to = ip;
        if (tb[IPSET_ATTR_IP_TO]) {
@@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
-       if (retried)
+       if (retried) {
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip += hosts) {
                e.ip = htonl(ip);
-               if (e.ip == 0)
-                       return -IPSET_ERR_HASH_ELEM;
+       }
+       for (; ip <= ip_to;) {
                ret = adtfn(set, &e, &ext, &ext, flags);
-
                if (ret && !ip_set_eexist(ret, flags))
                        return ret;
 
+               ip += hosts;
+               e.ip = htonl(ip);
+               if (e.ip == 0)
+                       return 0;
+
                ret = 0;
        }
        return ret;
index b64cf14e8352f488588af54fc5c650b27f31a09d..f3ba8348cf9df331ea0f36ba1b1bac99123b0895 100644 (file)
@@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                e.ip = htonl(ip);
                ret = adtfn(set, &e, &ext, &ext, flags);
 
index f438740e6c6a4e4ee94d971de8cba7556ea65bec..ddb8039ec1d2736ae21f3160da52e95ff1e022a4 100644 (file)
@@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
                for (; p <= port_to; p++) {
index 6215fb898c509ebcd35d555ee2fc7a2371733f0d..a7f4d7a85420991e196f0a12de449ef1c20fa57b 100644 (file)
@@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
                for (; p <= port_to; p++) {
index 5ab1b99a53c2b4338837a1fc17e067b9fee6a9d7..a2f19b9906e90ebe4b990f9583ab9f95c7a6b68e 100644 (file)
@@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                e.ip = htonl(ip);
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
@@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                              ip == ntohl(h->next.ip) &&
                              p == ntohs(h->next.port)
                                ? ntohl(h->next.ip2) : ip2_from;
-                       while (!after(ip2, ip2_to)) {
+                       while (ip2 <= ip2_to) {
                                e.ip2 = htonl(ip2);
                                ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
                                                                &cidr);
index 5d9e895452e744a38c7324bcf37924c977b62727..1c67a1761e458e584b2277b8ae1462cb5fc666bb 100644 (file)
@@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
        if (retried)
                ip = ntohl(h->next.ip);
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
                ret = adtfn(set, &e, &ext, &ext, flags);
index 44cf11939c916473b024d0b3fddaa6dbb1777ed8..d417074f1c1a298c33a324748e3aa274ed66616b 100644 (file)
@@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
                ret = adtfn(set, &e, &ext, &ext, flags);
index db614e13b193ddb1733bcb098d53d2f12520066f..7f9ae2e9645be84faa6d24f2b7ee8b973ad11b6b 100644 (file)
@@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (retried)
                ip = ntohl(h->next.ip[0]);
 
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip[0] = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
                ip2 = (retried &&
                       ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
                                                   : ip2_from;
-               while (!after(ip2, ip2_to)) {
+               while (ip2 <= ip2_to) {
                        e.ip[1] = htonl(ip2);
                        last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
                        ret = adtfn(set, &e, &ext, &ext, flags);
index 54b64b6cd0cdb2196e1f507909784096af45e11f..e6ef382febe46e8a4d8af045abcb47ae546c8710 100644 (file)
@@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &cidr);
                e.cidr = cidr - 1;
index aff846960ac4423da8ec5a99f4faccf294a812e0..8602f2595a1a1606f6380d6f00af40fa33759630 100644 (file)
@@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (retried)
                ip = ntohl(h->next.ip[0]);
 
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip[0] = htonl(ip);
                ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
                p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
@@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                        ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
                               p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
                                                         : ip2_from;
-                       while (!after(ip2, ip2_to)) {
+                       while (ip2 <= ip2_to) {
                                e.ip[1] = htonl(ip2);
                                ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
                                                                &e.cidr[1]);
index 90d396814798e15d327aafe89c4bbb5a611da480..4527921b1c3ac97b95a48c62f699b71d8b853be0 100644 (file)
@@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
 {
        struct sk_buff *new_skb = NULL;
        struct iphdr *old_iph = NULL;
+       __u8 old_dsfield;
 #ifdef CONFIG_IP_VS_IPV6
        struct ipv6hdr *old_ipv6h = NULL;
 #endif
@@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
                        *payload_len =
                                ntohs(old_ipv6h->payload_len) +
                                sizeof(*old_ipv6h);
-               *dsfield = ipv6_get_dsfield(old_ipv6h);
+               old_dsfield = ipv6_get_dsfield(old_ipv6h);
                *ttl = old_ipv6h->hop_limit;
                if (df)
                        *df = 0;
@@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
 
                /* fix old IP header checksum */
                ip_send_check(old_iph);
-               *dsfield = ipv4_get_dsfield(old_iph);
+               old_dsfield = ipv4_get_dsfield(old_iph);
                *ttl = old_iph->ttl;
                if (payload_len)
                        *payload_len = ntohs(old_iph->tot_len);
        }
 
+       /* Implement full-functionality option for ECN encapsulation */
+       *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
+
        return skb;
 error:
        kfree_skb(skb);
index f393a7086025f6c5e16032baeed63fa1cffba168..af8345fc4fbde30adad608b61a4fd293b4554584 100644 (file)
@@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct,
 
                srchash = hash_by_src(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)];
+               lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
                spin_lock_bh(lock);
                hlist_add_head_rcu(&ct->nat_bysource,
                                   &nf_nat_bysource[srchash]);
@@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
        unsigned int h;
 
        h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
+       spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
        hlist_del_rcu(&ct->nat_bysource);
-       spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
+       spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
 }
 
 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
@@ -807,8 +807,8 @@ static int __init nf_nat_init(void)
 
        /* Leave them the same for the moment. */
        nf_nat_htable_size = nf_conntrack_htable_size;
-       if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks))
-               nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks);
+       if (nf_nat_htable_size < CONNTRACK_LOCKS)
+               nf_nat_htable_size = CONNTRACK_LOCKS;
 
        nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
        if (!nf_nat_bysource)
@@ -821,7 +821,7 @@ static int __init nf_nat_init(void)
                return ret;
        }
 
-       for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++)
+       for (i = 0; i < CONNTRACK_LOCKS; i++)
                spin_lock_init(&nf_nat_locks[i]);
 
        nf_ct_helper_expectfn_register(&follow_master_nat);
index 929927171426a6e286bd5cc4412aaa99b77fec77..64e1ee09122582bce81a4ee996064763083bcfa0 100644 (file)
@@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
                        goto nla_put_failure;
 
-               if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+               if (basechain->stats && nft_dump_stats(skb, basechain->stats))
                        goto nla_put_failure;
        }
 
@@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
 
                chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME],
                                                genmask);
-               if (IS_ERR(chain2))
-                       return PTR_ERR(chain2);
+               if (!IS_ERR(chain2))
+                       return -EEXIST;
        }
 
        if (nla[NFTA_CHAIN_COUNTERS]) {
@@ -2741,8 +2741,10 @@ cont:
        list_for_each_entry(i, &ctx->table->sets, list) {
                if (!nft_is_active_next(ctx->net, i))
                        continue;
-               if (!strcmp(set->name, i->name))
+               if (!strcmp(set->name, i->name)) {
+                       kfree(set->name);
                        return -ENFILE;
+               }
        }
        return 0;
 }
index c83a3b5e1c6c2a91b713b6681a794bd79ab3fa08..d8571f4142080a3c121fc90f0b52d81ee9df6712 100644 (file)
@@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
                        return ERR_PTR(-EFAULT);
 
-               strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+               memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
                info->num_counters = compat_tmp.num_counters;
                user += sizeof(compat_tmp);
        } else
@@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                if (copy_from_user(info, user, sizeof(*info)) != 0)
                        return ERR_PTR(-EFAULT);
 
-               info->name[sizeof(info->name) - 1] = '\0';
                user += sizeof(*info);
        }
+       info->name[sizeof(info->name) - 1] = '\0';
 
        size = sizeof(struct xt_counters);
        size *= info->num_counters;
index 38986a95216cd2c3f7a0f83deedcb42153f5a937..29123934887bbfe5081178f9ce2425c5bb618a9c 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/syscalls.h>
 #include <linux/skbuff.h>
 #include <linux/filter.h>
 #include <linux/bpf.h>
@@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
        return 0;
 }
 
+static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
+{
+       mm_segment_t oldfs = get_fs();
+       int retval, fd;
+
+       set_fs(KERNEL_DS);
+       fd = bpf_obj_get_user(path);
+       set_fs(oldfs);
+       if (fd < 0)
+               return fd;
+
+       retval = __bpf_mt_check_fd(fd, ret);
+       sys_close(fd);
+       return retval;
+}
+
 static int bpf_mt_check(const struct xt_mtchk_param *par)
 {
        struct xt_bpf_info *info = par->matchinfo;
@@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
                return __bpf_mt_check_bytecode(info->bpf_program,
                                               info->bpf_program_num_elem,
                                               &info->filter);
-       else if (info->mode == XT_BPF_MODE_FD_PINNED ||
-                info->mode == XT_BPF_MODE_FD_ELF)
+       else if (info->mode == XT_BPF_MODE_FD_ELF)
                return __bpf_mt_check_fd(info->fd, &info->filter);
+       else if (info->mode == XT_BPF_MODE_PATH_PINNED)
+               return __bpf_mt_check_path(info->path, &info->filter);
        else
                return -EINVAL;
 }
index e75ef39669c5a9a5b72c9a1cec8b72020600eae1..575d2153e3b819f32e9a262abddca95a108eee02 100644 (file)
@@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                        transparent = nf_sk_is_transparent(sk);
 
                if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-                   transparent)
+                   transparent && sk_fullsock(sk))
                        pskb->mark = sk->sk_mark;
 
                if (sk != skb->sk)
@@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
                        transparent = nf_sk_is_transparent(sk);
 
                if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-                   transparent)
+                   transparent && sk_fullsock(sk))
                        pskb->mark = sk->sk_mark;
 
                if (sk != skb->sk)
index 327807731b441e8f66bc899cced12edb405687e8..b93148e8e9fb2dc9a22cccf34d168e99b55042de 100644 (file)
@@ -2266,14 +2266,18 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        cb->min_dump_alloc = control->min_dump_alloc;
        cb->skb = skb;
 
+       if (cb->start) {
+               ret = cb->start(cb);
+               if (ret)
+                       goto error_unlock;
+       }
+
        nlk->cb_running = true;
 
        mutex_unlock(nlk->cb_mutex);
 
-       if (cb->start)
-               cb->start(cb);
-
        ret = netlink_dump(sk);
+
        sock_put(sk);
 
        if (ret)
@@ -2303,6 +2307,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        size_t tlvlen = 0;
        struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
        unsigned int flags = 0;
+       bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
 
        /* Error messages get the original request appened, unless the user
         * requests to cap the error message, and get extra error data if
@@ -2313,7 +2318,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
                        payload += nlmsg_len(nlh);
                else
                        flags |= NLM_F_CAPPED;
-               if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+               if (nlk_has_extack && extack) {
                        if (extack->_msg)
                                tlvlen += nla_total_size(strlen(extack->_msg) + 1);
                        if (extack->bad_attr)
@@ -2322,8 +2327,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        } else {
                flags |= NLM_F_CAPPED;
 
-               if (nlk->flags & NETLINK_F_EXT_ACK &&
-                   extack && extack->cookie_len)
+               if (nlk_has_extack && extack && extack->cookie_len)
                        tlvlen += nla_total_size(extack->cookie_len);
        }
 
@@ -2351,7 +2355,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        errmsg->error = err;
        memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
 
-       if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+       if (nlk_has_extack && extack) {
                if (err) {
                        if (extack->_msg)
                                WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
index c26172995511f77bf9ed4c36d55fd1f430f6de5e..2986941164b1952b3b6014ff81d2986b504c334a 100644 (file)
@@ -1684,10 +1684,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 
        mutex_lock(&fanout_mutex);
 
-       err = -EINVAL;
-       if (!po->running)
-               goto out;
-
        err = -EALREADY;
        if (po->fanout)
                goto out;
@@ -1749,7 +1745,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                list_add(&match->list, &fanout_list);
        }
        err = -EINVAL;
-       if (match->type == type &&
+
+       spin_lock(&po->bind_lock);
+       if (po->running &&
+           match->type == type &&
            match->prot_hook.type == po->prot_hook.type &&
            match->prot_hook.dev == po->prot_hook.dev) {
                err = -ENOSPC;
@@ -1761,9 +1760,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                        err = 0;
                }
        }
+       spin_unlock(&po->bind_lock);
+
+       if (err && !refcount_read(&match->sk_ref)) {
+               list_del(&match->list);
+               kfree(match);
+       }
+
 out:
        if (err && rollover) {
-               kfree(rollover);
+               kfree_rcu(rollover, rcu);
                po->rollover = NULL;
        }
        mutex_unlock(&fanout_mutex);
@@ -1790,8 +1796,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
                else
                        f = NULL;
 
-               if (po->rollover)
+               if (po->rollover) {
                        kfree_rcu(po->rollover, rcu);
+                       po->rollover = NULL;
+               }
        }
        mutex_unlock(&fanout_mutex);
 
@@ -2834,6 +2842,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        struct virtio_net_hdr vnet_hdr = { 0 };
        int offset = 0;
        struct packet_sock *po = pkt_sk(sk);
+       bool has_vnet_hdr = false;
        int hlen, tlen, linear;
        int extra_len = 0;
 
@@ -2877,6 +2886,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
                if (err)
                        goto out_unlock;
+               has_vnet_hdr = true;
        }
 
        if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
@@ -2935,7 +2945,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        skb->priority = sk->sk_priority;
        skb->mark = sockc.mark;
 
-       if (po->has_vnet_hdr) {
+       if (has_vnet_hdr) {
                err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
                if (err)
                        goto out_free;
@@ -3063,13 +3073,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
        int ret = 0;
        bool unlisted = false;
 
-       if (po->fanout)
-               return -EINVAL;
-
        lock_sock(sk);
        spin_lock(&po->bind_lock);
        rcu_read_lock();
 
+       if (po->fanout) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
        if (name) {
                dev = dev_get_by_name_rcu(sock_net(sk), name);
                if (!dev) {
@@ -3841,6 +3853,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        void *data = &val;
        union tpacket_stats_u st;
        struct tpacket_rollover_stats rstats;
+       struct packet_rollover *rollover;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3919,13 +3932,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                       0);
                break;
        case PACKET_ROLLOVER_STATS:
-               if (!po->rollover)
+               rcu_read_lock();
+               rollover = rcu_dereference(po->rollover);
+               if (rollover) {
+                       rstats.tp_all = atomic_long_read(&rollover->num);
+                       rstats.tp_huge = atomic_long_read(&rollover->num_huge);
+                       rstats.tp_failed = atomic_long_read(&rollover->num_failed);
+                       data = &rstats;
+                       lv = sizeof(rstats);
+               }
+               rcu_read_unlock();
+               if (!rollover)
                        return -EINVAL;
-               rstats.tp_all = atomic_long_read(&po->rollover->num);
-               rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
-               rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
-               data = &rstats;
-               lv = sizeof(rstats);
                break;
        case PACKET_TX_HAS_OFF:
                val = po->tp_tx_has_off;
index 6ab39dbcca0197339cb8c2e9ec5676a778c4d23e..8557a1cae04170496887815a0f41e7c1abd2d979 100644 (file)
@@ -661,13 +661,15 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
                        }
                }
 
-               rds_ib_set_wr_signal_state(ic, send, 0);
+               rds_ib_set_wr_signal_state(ic, send, false);
 
                /*
                 * Always signal the last one if we're stopping due to flow control.
                 */
-               if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
-                       send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+               if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
+                       rds_ib_set_wr_signal_state(ic, send, true);
+                       send->s_wr.send_flags |= IB_SEND_SOLICITED;
+               }
 
                if (send->s_wr.send_flags & IB_SEND_SIGNALED)
                        nr_sig++;
@@ -705,11 +707,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
        if (scat == &rm->data.op_sg[rm->data.op_count]) {
                prev->s_op = ic->i_data_op;
                prev->s_wr.send_flags |= IB_SEND_SOLICITED;
-               if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
-                       ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
-                       prev->s_wr.send_flags |= IB_SEND_SIGNALED;
-                       nr_sig++;
-               }
+               if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
+                       nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
                ic->i_data_op = NULL;
        }
 
@@ -792,6 +791,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
                send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
                send->s_atomic_wr.swap_mask = 0;
        }
+       send->s_wr.send_flags = 0;
        nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
        send->s_atomic_wr.wr.num_sge = 1;
        send->s_atomic_wr.wr.next = NULL;
index fb17552fd292ef5a67bff1c0da2a19e4ef06c6b8..4b0a8288c98a65195519f60f5162422b6040e9c6 100644 (file)
@@ -308,10 +308,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
                                     gfp);
        /* The socket has been unlocked. */
-       if (!IS_ERR(call))
+       if (!IS_ERR(call)) {
                call->notify_rx = notify_rx;
+               mutex_unlock(&call->user_mutex);
+       }
 
-       mutex_unlock(&call->user_mutex);
        _leave(" = %p", call);
        return call;
 }
index ec986ae528089081ecdf8528437bbd82b7e840e1..a9f9a2ccc66454196dc275d8825acb847661d7a7 100644 (file)
@@ -264,6 +264,7 @@ static int __init sample_init_module(void)
 
 static void __exit sample_cleanup_module(void)
 {
+       rcu_barrier();
        tcf_unregister_action(&act_sample_ops, &sample_net_ops);
 }
 
index 0b2219adf520b33a471dd3b8e2fdf9f46271c3f9..231181c602edbae39fa7aecc345a12a25b35a6bb 100644 (file)
@@ -77,6 +77,8 @@ out:
 }
 EXPORT_SYMBOL(register_tcf_proto_ops);
 
+static struct workqueue_struct *tc_filter_wq;
+
 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
 {
        struct tcf_proto_ops *t;
@@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
         * tcf_proto_ops's destroy() handler.
         */
        rcu_barrier();
+       flush_workqueue(tc_filter_wq);
 
        write_lock(&cls_mod_lock);
        list_for_each_entry(t, &tcf_proto_base, head) {
@@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
 }
 EXPORT_SYMBOL(unregister_tcf_proto_ops);
 
+bool tcf_queue_work(struct work_struct *work)
+{
+       return queue_work(tc_filter_wq, work);
+}
+EXPORT_SYMBOL(tcf_queue_work);
+
 /* Select new prio value from the range, managed by kernel. */
 
 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
@@ -266,23 +275,30 @@ err_chain_create:
 }
 EXPORT_SYMBOL(tcf_block_get);
 
-void tcf_block_put(struct tcf_block *block)
+static void tcf_block_put_final(struct work_struct *work)
 {
+       struct tcf_block *block = container_of(work, struct tcf_block, work);
        struct tcf_chain *chain, *tmp;
 
-       if (!block)
-               return;
-
-       /* XXX: Standalone actions are not allowed to jump to any chain, and
-        * bound actions should be all removed after flushing. However,
-        * filters are destroyed in RCU callbacks, we have to hold the chains
-        * first, otherwise we would always race with RCU callbacks on this list
-        * without proper locking.
-        */
+       /* At this point, all the chains should have refcnt == 1. */
+       rtnl_lock();
+       list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+               tcf_chain_put(chain);
+       rtnl_unlock();
+       kfree(block);
+}
 
-       /* Wait for existing RCU callbacks to cool down. */
-       rcu_barrier();
+/* XXX: Standalone actions are not allowed to jump to any chain, and bound
+ * actions should be all removed after flushing. However, filters are destroyed
+ * in RCU callbacks, we have to hold the chains first, otherwise we would
+ * always race with RCU callbacks on this list without proper locking.
+ */
+static void tcf_block_put_deferred(struct work_struct *work)
+{
+       struct tcf_block *block = container_of(work, struct tcf_block, work);
+       struct tcf_chain *chain;
 
+       rtnl_lock();
        /* Hold a refcnt for all chains, except 0, in case they are gone. */
        list_for_each_entry(chain, &block->chain_list, list)
                if (chain->index)
@@ -292,13 +308,27 @@ void tcf_block_put(struct tcf_block *block)
        list_for_each_entry(chain, &block->chain_list, list)
                tcf_chain_flush(chain);
 
-       /* Wait for RCU callbacks to release the reference count. */
+       INIT_WORK(&block->work, tcf_block_put_final);
+       /* Wait for RCU callbacks to release the reference count and make
+        * sure their works have been queued before this.
+        */
        rcu_barrier();
+       tcf_queue_work(&block->work);
+       rtnl_unlock();
+}
 
-       /* At this point, all the chains should have refcnt == 1. */
-       list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
-               tcf_chain_put(chain);
-       kfree(block);
+void tcf_block_put(struct tcf_block *block)
+{
+       if (!block)
+               return;
+
+       INIT_WORK(&block->work, tcf_block_put_deferred);
+       /* Wait for existing RCU callbacks to cool down, make sure their works
+        * have been queued before this. We can not flush pending works here
+        * because we are holding the RTNL lock.
+        */
+       rcu_barrier();
+       tcf_queue_work(&block->work);
 }
 EXPORT_SYMBOL(tcf_block_put);
 
@@ -879,6 +909,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
 #ifdef CONFIG_NET_CLS_ACT
        LIST_HEAD(actions);
 
+       ASSERT_RTNL();
        tcf_exts_to_list(exts, &actions);
        tcf_action_destroy(&actions, TCA_ACT_UNBIND);
        kfree(exts->actions);
@@ -1030,6 +1061,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev);
 
 static int __init tc_filter_init(void)
 {
+       tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
+       if (!tc_filter_wq)
+               return -ENOMEM;
+
        rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
index d89ebafd22390238b868425cb5a8bbd8dec2c268..f177649a24192a144f261d4e4a40b52ce7cdc91a 100644 (file)
@@ -34,7 +34,10 @@ struct basic_filter {
        struct tcf_result       res;
        struct tcf_proto        *tp;
        struct list_head        link;
-       struct rcu_head         rcu;
+       union {
+               struct work_struct      work;
+               struct rcu_head         rcu;
+       };
 };
 
 static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -82,15 +85,26 @@ static int basic_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void basic_delete_filter(struct rcu_head *head)
+static void basic_delete_filter_work(struct work_struct *work)
 {
-       struct basic_filter *f = container_of(head, struct basic_filter, rcu);
+       struct basic_filter *f = container_of(work, struct basic_filter, work);
 
+       rtnl_lock();
        tcf_exts_destroy(&f->exts);
        tcf_em_tree_destroy(&f->ematches);
+       rtnl_unlock();
+
        kfree(f);
 }
 
+static void basic_delete_filter(struct rcu_head *head)
+{
+       struct basic_filter *f = container_of(head, struct basic_filter, rcu);
+
+       INIT_WORK(&f->work, basic_delete_filter_work);
+       tcf_queue_work(&f->work);
+}
+
 static void basic_destroy(struct tcf_proto *tp)
 {
        struct basic_head *head = rtnl_dereference(tp->root);
index 520c5027646aea5146f22d7898acfbc7dee3385a..037a3ae86829946135e2154bd83ddc722b61af17 100644 (file)
@@ -49,7 +49,10 @@ struct cls_bpf_prog {
        struct sock_filter *bpf_ops;
        const char *bpf_name;
        struct tcf_proto *tp;
-       struct rcu_head rcu;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
 };
 
 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
        kfree(prog);
 }
 
+static void cls_bpf_delete_prog_work(struct work_struct *work)
+{
+       struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
+
+       rtnl_lock();
+       __cls_bpf_delete_prog(prog);
+       rtnl_unlock();
+}
+
 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
 {
-       __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
+       struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
+
+       INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
+       tcf_queue_work(&prog->work);
 }
 
 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
index d48452f8797528dd7ada96dae61a82b3905aa0de..a97e069bee89f0010daaa33c610cc1aea7b97811 100644 (file)
@@ -23,7 +23,10 @@ struct cls_cgroup_head {
        struct tcf_exts         exts;
        struct tcf_ematch_tree  ematches;
        struct tcf_proto        *tp;
-       struct rcu_head         rcu;
+       union {
+               struct work_struct      work;
+               struct rcu_head         rcu;
+       };
 };
 
 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
        [TCA_CGROUP_EMATCHES]   = { .type = NLA_NESTED },
 };
 
+static void cls_cgroup_destroy_work(struct work_struct *work)
+{
+       struct cls_cgroup_head *head = container_of(work,
+                                                   struct cls_cgroup_head,
+                                                   work);
+       rtnl_lock();
+       tcf_exts_destroy(&head->exts);
+       tcf_em_tree_destroy(&head->ematches);
+       kfree(head);
+       rtnl_unlock();
+}
+
 static void cls_cgroup_destroy_rcu(struct rcu_head *root)
 {
        struct cls_cgroup_head *head = container_of(root,
                                                    struct cls_cgroup_head,
                                                    rcu);
 
-       tcf_exts_destroy(&head->exts);
-       tcf_em_tree_destroy(&head->ematches);
-       kfree(head);
+       INIT_WORK(&head->work, cls_cgroup_destroy_work);
+       tcf_queue_work(&head->work);
 }
 
 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
index 2a3a60ec5b8617c48e58d8a9339cda69be68442f..67f3a2af6aab1aadb5b492265d8469a37a1e8fd5 100644 (file)
@@ -57,7 +57,10 @@ struct flow_filter {
        u32                     divisor;
        u32                     baseclass;
        u32                     hashrnd;
-       struct rcu_head         rcu;
+       union {
+               struct work_struct      work;
+               struct rcu_head         rcu;
+       };
 };
 
 static inline u32 addr_fold(void *addr)
@@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
        [TCA_FLOW_PERTURB]      = { .type = NLA_U32 },
 };
 
-static void flow_destroy_filter(struct rcu_head *head)
+static void flow_destroy_filter_work(struct work_struct *work)
 {
-       struct flow_filter *f = container_of(head, struct flow_filter, rcu);
+       struct flow_filter *f = container_of(work, struct flow_filter, work);
 
+       rtnl_lock();
        del_timer_sync(&f->perturb_timer);
        tcf_exts_destroy(&f->exts);
        tcf_em_tree_destroy(&f->ematches);
        kfree(f);
+       rtnl_unlock();
+}
+
+static void flow_destroy_filter(struct rcu_head *head)
+{
+       struct flow_filter *f = container_of(head, struct flow_filter, rcu);
+
+       INIT_WORK(&f->work, flow_destroy_filter_work);
+       tcf_queue_work(&f->work);
 }
 
 static int flow_change(struct net *net, struct sk_buff *in_skb,
index 1a267e77c6de93ad58fd609426d3daef27f39c54..5b5722c8b32c1a276f7441fb67baa0d16ba24ff2 100644 (file)
@@ -87,7 +87,10 @@ struct cls_fl_filter {
        struct list_head list;
        u32 handle;
        u32 flags;
-       struct rcu_head rcu;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
        struct net_device *hw_dev;
 };
 
@@ -215,12 +218,22 @@ static int fl_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void fl_destroy_filter(struct rcu_head *head)
+static void fl_destroy_filter_work(struct work_struct *work)
 {
-       struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+       struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
 
+       rtnl_lock();
        tcf_exts_destroy(&f->exts);
        kfree(f);
+       rtnl_unlock();
+}
+
+static void fl_destroy_filter(struct rcu_head *head)
+{
+       struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+
+       INIT_WORK(&f->work, fl_destroy_filter_work);
+       tcf_queue_work(&f->work);
 }
 
 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
@@ -234,6 +247,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
        tc_cls_common_offload_init(&cls_flower.common, tp);
        cls_flower.command = TC_CLSFLOWER_DESTROY;
        cls_flower.cookie = (unsigned long) f;
+       cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
 
        dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower);
 }
@@ -289,6 +303,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
        cls_flower.command = TC_CLSFLOWER_STATS;
        cls_flower.cookie = (unsigned long) f;
        cls_flower.exts = &f->exts;
+       cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
 
        dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
                                      &cls_flower);
@@ -922,28 +937,28 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 
                if (!tc_flags_valid(fnew->flags)) {
                        err = -EINVAL;
-                       goto errout;
+                       goto errout_idr;
                }
        }
 
        err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
        if (err)
-               goto errout;
+               goto errout_idr;
 
        err = fl_check_assign_mask(head, &mask);
        if (err)
-               goto errout;
+               goto errout_idr;
 
        if (!tc_skip_sw(fnew->flags)) {
                if (!fold && fl_lookup(head, &fnew->mkey)) {
                        err = -EEXIST;
-                       goto errout;
+                       goto errout_idr;
                }
 
                err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
                                             head->ht_params);
                if (err)
-                       goto errout;
+                       goto errout_idr;
        }
 
        if (!tc_skip_hw(fnew->flags)) {
@@ -952,7 +967,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                                           &mask.key,
                                           fnew);
                if (err)
-                       goto errout;
+                       goto errout_idr;
        }
 
        if (!tc_in_hw(fnew->flags))
@@ -981,6 +996,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        kfree(tb);
        return 0;
 
+errout_idr:
+       if (fnew->handle)
+               idr_remove_ext(&head->handle_idr, fnew->handle);
 errout:
        tcf_exts_destroy(&fnew->exts);
        kfree(fnew);
index 941245ad07fd4b8bb4fd97cd84fac8791c571276..99183b8621ecb0543af91020167d13c193cad214 100644 (file)
@@ -46,7 +46,10 @@ struct fw_filter {
 #endif /* CONFIG_NET_CLS_IND */
        struct tcf_exts         exts;
        struct tcf_proto        *tp;
-       struct rcu_head         rcu;
+       union {
+               struct work_struct      work;
+               struct rcu_head         rcu;
+       };
 };
 
 static u32 fw_hash(u32 handle)
@@ -119,12 +122,22 @@ static int fw_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void fw_delete_filter(struct rcu_head *head)
+static void fw_delete_filter_work(struct work_struct *work)
 {
-       struct fw_filter *f = container_of(head, struct fw_filter, rcu);
+       struct fw_filter *f = container_of(work, struct fw_filter, work);
 
+       rtnl_lock();
        tcf_exts_destroy(&f->exts);
        kfree(f);
+       rtnl_unlock();
+}
+
+static void fw_delete_filter(struct rcu_head *head)
+{
+       struct fw_filter *f = container_of(head, struct fw_filter, rcu);
+
+       INIT_WORK(&f->work, fw_delete_filter_work);
+       tcf_queue_work(&f->work);
 }
 
 static void fw_destroy(struct tcf_proto *tp)
index 21cc45caf8424a2d9ba75558c11fe1dc8170cd48..c33f711b90198ab8b77bd314c289d09cbfe2d170 100644 (file)
@@ -21,7 +21,10 @@ struct cls_mall_head {
        struct tcf_result res;
        u32 handle;
        u32 flags;
-       struct rcu_head rcu;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
 };
 
 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -32,6 +35,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        if (tc_skip_sw(head->flags))
                return -1;
 
+       *res = head->res;
        return tcf_exts_exec(skb, &head->exts, res);
 }
 
@@ -40,13 +44,23 @@ static int mall_init(struct tcf_proto *tp)
        return 0;
 }
 
+static void mall_destroy_work(struct work_struct *work)
+{
+       struct cls_mall_head *head = container_of(work, struct cls_mall_head,
+                                                 work);
+       rtnl_lock();
+       tcf_exts_destroy(&head->exts);
+       kfree(head);
+       rtnl_unlock();
+}
+
 static void mall_destroy_rcu(struct rcu_head *rcu)
 {
        struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
                                                  rcu);
 
-       tcf_exts_destroy(&head->exts);
-       kfree(head);
+       INIT_WORK(&head->work, mall_destroy_work);
+       tcf_queue_work(&head->work);
 }
 
 static int mall_replace_hw_filter(struct tcf_proto *tp,
index 9ddde65915d22615aa93036f01ae0368783c94f6..4b14ccd8b8f271aabecb97427fa3fa18f7484747 100644 (file)
@@ -57,7 +57,10 @@ struct route4_filter {
        u32                     handle;
        struct route4_bucket    *bkt;
        struct tcf_proto        *tp;
-       struct rcu_head         rcu;
+       union {
+               struct work_struct      work;
+               struct rcu_head         rcu;
+       };
 };
 
 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
@@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void route4_delete_filter(struct rcu_head *head)
+static void route4_delete_filter_work(struct work_struct *work)
 {
-       struct route4_filter *f = container_of(head, struct route4_filter, rcu);
+       struct route4_filter *f = container_of(work, struct route4_filter, work);
 
+       rtnl_lock();
        tcf_exts_destroy(&f->exts);
        kfree(f);
+       rtnl_unlock();
+}
+
+static void route4_delete_filter(struct rcu_head *head)
+{
+       struct route4_filter *f = container_of(head, struct route4_filter, rcu);
+
+       INIT_WORK(&f->work, route4_delete_filter_work);
+       tcf_queue_work(&f->work);
 }
 
 static void route4_destroy(struct tcf_proto *tp)
index b1f6ed48bc729b9238d1a3502c147ca856b3a996..bdbc541787f87d6bc0592122acef87a80b0b8d03 100644 (file)
@@ -97,7 +97,10 @@ struct rsvp_filter {
 
        u32                             handle;
        struct rsvp_session             *sess;
-       struct rcu_head                 rcu;
+       union {
+               struct work_struct              work;
+               struct rcu_head                 rcu;
+       };
 };
 
 static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
@@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp)
        return -ENOBUFS;
 }
 
-static void rsvp_delete_filter_rcu(struct rcu_head *head)
+static void rsvp_delete_filter_work(struct work_struct *work)
 {
-       struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+       struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
 
+       rtnl_lock();
        tcf_exts_destroy(&f->exts);
        kfree(f);
+       rtnl_unlock();
+}
+
+static void rsvp_delete_filter_rcu(struct rcu_head *head)
+{
+       struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+
+       INIT_WORK(&f->work, rsvp_delete_filter_work);
+       tcf_queue_work(&f->work);
 }
 
 static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
index 14a7e08b2fa9e2f8a3551f6dbc1fb38f1e8df579..beaa95e09c25c26ba94155f03375d27156e9cbc4 100644 (file)
 struct tcindex_filter_result {
        struct tcf_exts         exts;
        struct tcf_result       res;
-       struct rcu_head         rcu;
+       union {
+               struct work_struct      work;
+               struct rcu_head         rcu;
+       };
 };
 
 struct tcindex_filter {
        u16 key;
        struct tcindex_filter_result result;
        struct tcindex_filter __rcu *next;
-       struct rcu_head rcu;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
 };
 
 
@@ -133,12 +139,34 @@ static int tcindex_init(struct tcf_proto *tp)
        return 0;
 }
 
+static void tcindex_destroy_rexts_work(struct work_struct *work)
+{
+       struct tcindex_filter_result *r;
+
+       r = container_of(work, struct tcindex_filter_result, work);
+       rtnl_lock();
+       tcf_exts_destroy(&r->exts);
+       rtnl_unlock();
+}
+
 static void tcindex_destroy_rexts(struct rcu_head *head)
 {
        struct tcindex_filter_result *r;
 
        r = container_of(head, struct tcindex_filter_result, rcu);
-       tcf_exts_destroy(&r->exts);
+       INIT_WORK(&r->work, tcindex_destroy_rexts_work);
+       tcf_queue_work(&r->work);
+}
+
+static void tcindex_destroy_fexts_work(struct work_struct *work)
+{
+       struct tcindex_filter *f = container_of(work, struct tcindex_filter,
+                                               work);
+
+       rtnl_lock();
+       tcf_exts_destroy(&f->result.exts);
+       kfree(f);
+       rtnl_unlock();
 }
 
 static void tcindex_destroy_fexts(struct rcu_head *head)
@@ -146,8 +174,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
        struct tcindex_filter *f = container_of(head, struct tcindex_filter,
                                                rcu);
 
-       tcf_exts_destroy(&f->result.exts);
-       kfree(f);
+       INIT_WORK(&f->work, tcindex_destroy_fexts_work);
+       tcf_queue_work(&f->work);
 }
 
 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
index 10b8d851fc6be96c761bea127776b84d21b4fc05..dadd1b3444970d1be4936e4f1a6cf8fd9a6d14a6 100644 (file)
@@ -68,7 +68,10 @@ struct tc_u_knode {
        u32 __percpu            *pcpu_success;
 #endif
        struct tcf_proto        *tp;
-       struct rcu_head         rcu;
+       union {
+               struct work_struct      work;
+               struct rcu_head         rcu;
+       };
        /* The 'sel' field MUST be the last field in structure to allow for
         * tc_u32_keys allocated at end of structure.
         */
@@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
  * this the u32_delete_key_rcu variant does not free the percpu
  * statistics.
  */
+static void u32_delete_key_work(struct work_struct *work)
+{
+       struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
+
+       rtnl_lock();
+       u32_destroy_key(key->tp, key, false);
+       rtnl_unlock();
+}
+
 static void u32_delete_key_rcu(struct rcu_head *rcu)
 {
        struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
 
-       u32_destroy_key(key->tp, key, false);
+       INIT_WORK(&key->work, u32_delete_key_work);
+       tcf_queue_work(&key->work);
 }
 
 /* u32_delete_key_freepf_rcu is the rcu callback variant
@@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu)
  * for the variant that should be used with keys return from
  * u32_init_knode()
  */
+static void u32_delete_key_freepf_work(struct work_struct *work)
+{
+       struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
+
+       rtnl_lock();
+       u32_destroy_key(key->tp, key, true);
+       rtnl_unlock();
+}
+
 static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
 {
        struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
 
-       u32_destroy_key(key->tp, key, true);
+       INIT_WORK(&key->work, u32_delete_key_freepf_work);
+       tcf_queue_work(&key->work);
 }
 
 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
index c6deb74e3d2f4a007554b9cf78e4ddf7b7b84535..22bc6fc4831116d75027a3871e7854e07b823d71 100644 (file)
@@ -301,6 +301,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
        struct Qdisc *q;
 
+       if (!handle)
+               return NULL;
        q = qdisc_match_from_root(dev->qdisc, handle);
        if (q)
                goto out;
index 92237e75dbbc5e3e7dab124fb67baca98ae2160f..bf8c81e07c70457f37eeb167764a2aa33be34ace 100644 (file)
@@ -685,6 +685,7 @@ void qdisc_reset(struct Qdisc *qdisc)
                qdisc->gso_skb = NULL;
        }
        qdisc->q.qlen = 0;
+       qdisc->qstats.backlog = 0;
 }
 EXPORT_SYMBOL(qdisc_reset);
 
index daaf214e5201919ca3681e1670ac1389cb7985a4..3f88b75488b03275b152298bab0e66ea02298caa 100644 (file)
@@ -958,6 +958,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        }
 
        if (cl != NULL) {
+               int old_flags;
+
                if (parentid) {
                        if (cl->cl_parent &&
                            cl->cl_parent->cl_common.classid != parentid)
@@ -978,6 +980,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                }
 
                sch_tree_lock(sch);
+               old_flags = cl->cl_flags;
+
                if (rsc != NULL)
                        hfsc_change_rsc(cl, rsc, cur_time);
                if (fsc != NULL)
@@ -986,10 +990,21 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                        hfsc_change_usc(cl, usc, cur_time);
 
                if (cl->qdisc->q.qlen != 0) {
-                       if (cl->cl_flags & HFSC_RSC)
-                               update_ed(cl, qdisc_peek_len(cl->qdisc));
-                       if (cl->cl_flags & HFSC_FSC)
-                               update_vf(cl, 0, cur_time);
+                       int len = qdisc_peek_len(cl->qdisc);
+
+                       if (cl->cl_flags & HFSC_RSC) {
+                               if (old_flags & HFSC_RSC)
+                                       update_ed(cl, len);
+                               else
+                                       init_ed(cl, len);
+                       }
+
+                       if (cl->cl_flags & HFSC_FSC) {
+                               if (old_flags & HFSC_FSC)
+                                       update_vf(cl, 0, cur_time);
+                               else
+                                       init_vf(cl, len);
+                       }
                }
                sch_tree_unlock(sch);
 
index 92a07141fd07396a816478569f6de18a2aa13ebb..621b5ca3fd1c17c3d7ef7bb1c7677ab98cebbe77 100644 (file)
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
 {
        struct dst_entry *dst;
 
-       if (!t)
+       if (sock_owned_by_user(sk) || !t)
                return;
        dst = sctp_transport_dst_check(t);
        if (dst)
@@ -794,7 +794,7 @@ hit:
 struct sctp_hash_cmp_arg {
        const union sctp_addr   *paddr;
        const struct net        *net;
-       u16                     lport;
+       __be16                  lport;
 };
 
 static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
@@ -820,37 +820,37 @@ out:
        return err;
 }
 
-static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
+static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
 {
        const struct sctp_transport *t = data;
        const union sctp_addr *paddr = &t->ipaddr;
        const struct net *net = sock_net(t->asoc->base.sk);
-       u16 lport = htons(t->asoc->base.bind_addr.port);
-       u32 addr;
+       __be16 lport = htons(t->asoc->base.bind_addr.port);
+       __u32 addr;
 
        if (paddr->sa.sa_family == AF_INET6)
                addr = jhash(&paddr->v6.sin6_addr, 16, seed);
        else
-               addr = paddr->v4.sin_addr.s_addr;
+               addr = (__force __u32)paddr->v4.sin_addr.s_addr;
 
-       return  jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
+       return  jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
                             (__force __u32)lport, net_hash_mix(net), seed);
 }
 
-static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed)
+static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
 {
        const struct sctp_hash_cmp_arg *x = data;
        const union sctp_addr *paddr = x->paddr;
        const struct net *net = x->net;
-       u16 lport = x->lport;
-       u32 addr;
+       __be16 lport = x->lport;
+       __u32 addr;
 
        if (paddr->sa.sa_family == AF_INET6)
                addr = jhash(&paddr->v6.sin6_addr, 16, seed);
        else
-               addr = paddr->v4.sin_addr.s_addr;
+               addr = (__force __u32)paddr->v4.sin_addr.s_addr;
 
-       return  jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
+       return  jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
                             (__force __u32)lport, net_hash_mix(net), seed);
 }
 
index 51c4887695909d171285b98ce1be779a3adedbab..a6dfa86c02016e3ff81f10f729a56e6673affc68 100644 (file)
@@ -738,7 +738,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
 /* Was this packet marked by Explicit Congestion Notification? */
 static int sctp_v6_is_ce(const struct sk_buff *skb)
 {
-       return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20);
+       return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
 }
 
 /* Dump the v6 addr to the seq file. */
@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                        net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
                        dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
-                       if (!dev ||
-                           !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
+                       if (!dev || !(opt->inet.freebind ||
+                                     net->ipv6.sysctl.ip_nonlocal_bind ||
+                                     ipv6_chk_addr(net, &addr->v6.sin6_addr,
+                                                   dev, 0))) {
                                rcu_read_unlock();
                                return 0;
                        }
index 22ed01a76b19cbd8af715f9a31f5c2d39312ce74..a72a7d925d4631e30cfb0743df392ff4cef456ca 100644 (file)
@@ -463,6 +463,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
                .r = r,
                .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
        };
+       int pos = cb->args[2];
 
        /* eps hashtable dumps
         * args:
@@ -493,7 +494,8 @@ skip:
                goto done;
 
        sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
-                               net, (int *)&cb->args[2], &commp);
+                               net, &pos, &commp);
+       cb->args[2] = pos;
 
 done:
        cb->args[1] = cb->args[4];
index ca8f196b6c6c106386a65c75dccd7bc3451798ff..514465b03829b18c18ae3e890e0899138035bd7a 100644 (file)
@@ -2854,7 +2854,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
                addr_param_len = af->to_addr_param(addr, &addr_param);
                param.param_hdr.type = flags;
                param.param_hdr.length = htons(paramlen + addr_param_len);
-               param.crr_id = i;
+               param.crr_id = htonl(i);
 
                sctp_addto_chunk(retval, paramlen, &param);
                sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -2867,7 +2867,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
                addr_param_len = af->to_addr_param(addr, &addr_param);
                param.param_hdr.type = SCTP_PARAM_DEL_IP;
                param.param_hdr.length = htons(paramlen + addr_param_len);
-               param.crr_id = i;
+               param.crr_id = htonl(i);
 
                sctp_addto_chunk(retval, paramlen, &param);
                sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -3591,7 +3591,7 @@ static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc,
  */
 struct sctp_chunk *sctp_make_strreset_req(
                                        const struct sctp_association *asoc,
-                                       __u16 stream_num, __u16 *stream_list,
+                                       __u16 stream_num, __be16 *stream_list,
                                        bool out, bool in)
 {
        struct sctp_strreset_outreq outreq;
@@ -3788,7 +3788,8 @@ bool sctp_verify_reconf(const struct sctp_association *asoc,
 {
        struct sctp_reconf_chunk *hdr;
        union sctp_params param;
-       __u16 last = 0, cnt = 0;
+       __be16 last = 0;
+       __u16 cnt = 0;
 
        hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
        sctp_walk_params(param, hdr, params) {
index e6a2974e020e1a4232d94e6c2933eebff5f8acb4..e2d9a4b49c9c98061a1c5b358b65ce5285f68de8 100644 (file)
@@ -1607,12 +1607,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
                        break;
 
                case SCTP_CMD_INIT_FAILED:
-                       sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
+                       sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
                        break;
 
                case SCTP_CMD_ASSOC_FAILED:
                        sctp_cmd_assoc_failed(commands, asoc, event_type,
-                                             subtype, chunk, cmd->obj.err);
+                                             subtype, chunk, cmd->obj.u32);
                        break;
 
                case SCTP_CMD_INIT_COUNTER_INC:
@@ -1680,8 +1680,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
                case SCTP_CMD_PROCESS_CTSN:
                        /* Dummy up a SACK for processing. */
                        sackh.cum_tsn_ack = cmd->obj.be32;
-                       sackh.a_rwnd = asoc->peer.rwnd +
-                                       asoc->outqueue.outstanding_bytes;
+                       sackh.a_rwnd = htonl(asoc->peer.rwnd +
+                                            asoc->outqueue.outstanding_bytes);
                        sackh.num_gap_ack_blocks = 0;
                        sackh.num_dup_tsns = 0;
                        chunk->subh.sack_hdr = &sackh;
index d4730ada7f3233367be7a0e3bb10e286a25602c8..6f45d1713452df982a985e1e882c964259ad9e20 100644 (file)
@@ -170,6 +170,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
        sk_mem_charge(sk, chunk->skb->truesize);
 }
 
+static void sctp_clear_owner_w(struct sctp_chunk *chunk)
+{
+       skb_orphan(chunk->skb);
+}
+
+static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+                                      void (*cb)(struct sctp_chunk *))
+
+{
+       struct sctp_outq *q = &asoc->outqueue;
+       struct sctp_transport *t;
+       struct sctp_chunk *chunk;
+
+       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+               list_for_each_entry(chunk, &t->transmitted, transmitted_list)
+                       cb(chunk);
+
+       list_for_each_entry(chunk, &q->retransmit, list)
+               cb(chunk);
+
+       list_for_each_entry(chunk, &q->sacked, list)
+               cb(chunk);
+
+       list_for_each_entry(chunk, &q->abandoned, list)
+               cb(chunk);
+
+       list_for_each_entry(chunk, &q->out_chunk_list, list)
+               cb(chunk);
+}
+
 /* Verify that this is a valid address. */
 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
                                   int len)
@@ -4906,6 +4936,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
        struct socket *sock;
        int err = 0;
 
+       /* Do not peel off from one netns to another one. */
+       if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
+               return -EINVAL;
+
        if (!asoc)
                return -EINVAL;
 
@@ -8208,7 +8242,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         * paths won't try to lock it and then oldsk.
         */
        lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
+       sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
        sctp_assoc_migrate(assoc, newsk);
+       sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
 
        /* If the association on the newsk is already closed before accept()
         * is called, set RCV_SHUTDOWN flag.
index 63ea1550371493ec8863627c7a43f46a22f4a4c9..fa8371ff05c43f31a7a0c5947161e08508671bcc 100644 (file)
@@ -118,6 +118,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
        __u16 i, str_nums, *str_list;
        struct sctp_chunk *chunk;
        int retval = -EINVAL;
+       __be16 *nstr_list;
        bool out, in;
 
        if (!asoc->peer.reconf_capable ||
@@ -148,13 +149,18 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
                        if (str_list[i] >= stream->incnt)
                                goto out;
 
+       nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
+       if (!nstr_list) {
+               retval = -ENOMEM;
+               goto out;
+       }
+
        for (i = 0; i < str_nums; i++)
-               str_list[i] = htons(str_list[i]);
+               nstr_list[i] = htons(str_list[i]);
 
-       chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in);
+       chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
 
-       for (i = 0; i < str_nums; i++)
-               str_list[i] = ntohs(str_list[i]);
+       kfree(nstr_list);
 
        if (!chunk) {
                retval = -ENOMEM;
@@ -305,7 +311,7 @@ out:
 }
 
 static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
-                       struct sctp_association *asoc, __u32 resp_seq,
+                       struct sctp_association *asoc, __be32 resp_seq,
                        __be16 type)
 {
        struct sctp_chunk *chunk = asoc->strreset_chunk;
@@ -345,8 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
 {
        struct sctp_strreset_outreq *outreq = param.v;
        struct sctp_stream *stream = &asoc->stream;
-       __u16 i, nums, flags = 0, *str_p = NULL;
        __u32 result = SCTP_STRRESET_DENIED;
+       __u16 i, nums, flags = 0;
+       __be16 *str_p = NULL;
        __u32 request_seq;
 
        request_seq = ntohl(outreq->request_seq);
@@ -439,8 +446,9 @@ struct sctp_chunk *sctp_process_strreset_inreq(
        struct sctp_stream *stream = &asoc->stream;
        __u32 result = SCTP_STRRESET_DENIED;
        struct sctp_chunk *chunk = NULL;
-       __u16 i, nums, *str_p;
        __u32 request_seq;
+       __u16 i, nums;
+       __be16 *str_p;
 
        request_seq = ntohl(inreq->request_seq);
        if (TSN_lt(asoc->strreset_inseq, request_seq) ||
@@ -769,7 +777,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
        if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
                struct sctp_strreset_outreq *outreq;
-               __u16 *str_p;
+               __be16 *str_p;
 
                outreq = (struct sctp_strreset_outreq *)req;
                str_p = outreq->list_of_streams;
@@ -794,7 +802,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
                        nums, str_p, GFP_ATOMIC);
        } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
                struct sctp_strreset_inreq *inreq;
-               __u16 *str_p;
+               __be16 *str_p;
 
                /* if the result is performed, it's impossible for inreq */
                if (result == SCTP_STRRESET_PERFORMED)
index 67abc0194f301239fcbf0bd6a1136aeec585e660..5447228bf1a014ad6c95b80355bb1c3827800db7 100644 (file)
@@ -847,7 +847,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
 
 struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
        const struct sctp_association *asoc, __u16 flags, __u16 stream_num,
-       __u16 *stream_list, gfp_t gfp)
+       __be16 *stream_list, gfp_t gfp)
 {
        struct sctp_stream_reset_event *sreset;
        struct sctp_ulpevent *event;
index 8c6d24b2995dd5ced548275acce04d3a6d70f043..745f145d4c4d43316f3cb30083066456497fec3a 100644 (file)
@@ -282,6 +282,7 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock,
                         __be32 *subnet, u8 *prefix_len)
 {
        struct dst_entry *dst = sk_dst_get(clcsock->sk);
+       struct in_device *in_dev;
        struct sockaddr_in addr;
        int rc = -ENOENT;
        int len;
@@ -298,14 +299,17 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock,
        /* get address to which the internal TCP socket is bound */
        kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len);
        /* analyze IPv4 specific data of net_device belonging to TCP socket */
-       for_ifa(dst->dev->ip_ptr) {
-               if (ifa->ifa_address != addr.sin_addr.s_addr)
+       rcu_read_lock();
+       in_dev = __in_dev_get_rcu(dst->dev);
+       for_ifa(in_dev) {
+               if (!inet_ifa_match(addr.sin_addr.s_addr, ifa))
                        continue;
                *prefix_len = inet_mask_len(ifa->ifa_mask);
                *subnet = ifa->ifa_address & ifa->ifa_mask;
                rc = 0;
                break;
-       } endfor_ifa(dst->dev->ip_ptr);
+       } endfor_ifa(in_dev);
+       rcu_read_unlock();
 
 out_rel:
        dst_release(dst);
@@ -509,7 +513,7 @@ decline_rdma:
        /* RDMA setup failed, switch back to TCP */
        smc->use_fallback = true;
        if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
-               rc = smc_clc_send_decline(smc, reason_code, 0);
+               rc = smc_clc_send_decline(smc, reason_code);
                if (rc < sizeof(struct smc_clc_msg_decline))
                        goto out_err;
        }
@@ -804,8 +808,6 @@ static void smc_listen_work(struct work_struct *work)
                rc = local_contact;
                if (rc == -ENOMEM)
                        reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
-               else if (rc == -ENOLINK)
-                       reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
                goto decline_rdma;
        }
        link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
@@ -899,7 +901,7 @@ decline_rdma:
        smc_conn_free(&new_smc->conn);
        new_smc->use_fallback = true;
        if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
-               rc = smc_clc_send_decline(new_smc, reason_code, 0);
+               rc = smc_clc_send_decline(new_smc, reason_code);
                if (rc < sizeof(struct smc_clc_msg_decline))
                        goto out_err;
        }
index 6e44313e4467d01fbdc77f07ee7d14c50ab92ce2..0ccd6fa387ad7074cf0010cb3966f38d6c3b20cb 100644 (file)
@@ -149,7 +149,7 @@ struct smc_connection {
        atomic_t                sndbuf_space;   /* remaining space in sndbuf */
        u16                     tx_cdc_seq;     /* sequence # for CDC send */
        spinlock_t              send_lock;      /* protect wr_sends */
-       struct work_struct      tx_work;        /* retry of smc_cdc_msg_send */
+       struct delayed_work     tx_work;        /* retry of smc_cdc_msg_send */
 
        struct smc_host_cdc_msg local_rx_ctrl;  /* filled during event_handl.
                                                 * .prod cf. TCP rcv_nxt
index 3934913ab835cc791362a6a2ea816e871ce2cee1..b7dd2743fb5c0cfa11228b6c36b3a7fe3c53498e 100644 (file)
@@ -95,9 +95,10 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        }
        if (clcm->type == SMC_CLC_DECLINE) {
                reason_code = SMC_CLC_DECL_REPLY;
-               if (ntohl(((struct smc_clc_msg_decline *)buf)->peer_diagnosis)
-                       == SMC_CLC_DECL_SYNCERR)
+               if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
                        smc->conn.lgr->sync_err = true;
+                       smc_lgr_terminate(smc->conn.lgr);
+               }
        }
 
 out:
@@ -105,8 +106,7 @@ out:
 }
 
 /* send CLC DECLINE message across internal TCP socket */
-int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
-                        u8 out_of_sync)
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
 {
        struct smc_clc_msg_decline dclc;
        struct msghdr msg;
@@ -118,7 +118,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
        dclc.hdr.type = SMC_CLC_DECLINE;
        dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
        dclc.hdr.version = SMC_CLC_V1;
-       dclc.hdr.flag = out_of_sync ? 1 : 0;
+       dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
        memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
        dclc.peer_diagnosis = htonl(peer_diag_info);
        memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
index 13db8ce177c91dd913fac15fdebb5e05875e0ec9..1c55414041d49b560dfdd89fe9c1ed122a073cea 100644 (file)
@@ -106,8 +106,7 @@ struct smc_ib_device;
 
 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                     u8 expected_type);
-int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
-                        u8 out_of_sync);
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
 int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev,
                          u8 ibport);
 int smc_clc_send_confirm(struct smc_sock *smc);
index 3c2e166b5d222f4c932179ae442cbd88969efc92..f0d16fb825f7a545104232baacca32520cb14e08 100644 (file)
@@ -174,15 +174,15 @@ int smc_close_active(struct smc_sock *smc)
 {
        struct smc_cdc_conn_state_flags *txflags =
                &smc->conn.local_tx_ctrl.conn_state_flags;
-       long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
        struct smc_connection *conn = &smc->conn;
        struct sock *sk = &smc->sk;
        int old_state;
+       long timeout;
        int rc = 0;
 
-       if (sock_flag(sk, SOCK_LINGER) &&
-           !(current->flags & PF_EXITING))
-               timeout = sk->sk_lingertime;
+       timeout = current->flags & PF_EXITING ?
+                 0 : sock_flag(sk, SOCK_LINGER) ?
+                     sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
 
 again:
        old_state = sk->sk_state;
@@ -208,7 +208,7 @@ again:
        case SMC_ACTIVE:
                smc_close_stream_wait(smc, timeout);
                release_sock(sk);
-               cancel_work_sync(&conn->tx_work);
+               cancel_delayed_work_sync(&conn->tx_work);
                lock_sock(sk);
                if (sk->sk_state == SMC_ACTIVE) {
                        /* send close request */
@@ -234,7 +234,7 @@ again:
                if (!smc_cdc_rxed_any_close(conn))
                        smc_close_stream_wait(smc, timeout);
                release_sock(sk);
-               cancel_work_sync(&conn->tx_work);
+               cancel_delayed_work_sync(&conn->tx_work);
                lock_sock(sk);
                if (sk->sk_err != ECONNABORTED) {
                        /* confirm close from peer */
@@ -263,7 +263,9 @@ again:
                /* peer sending PeerConnectionClosed will cause transition */
                break;
        case SMC_PROCESSABORT:
-               cancel_work_sync(&conn->tx_work);
+               release_sock(sk);
+               cancel_delayed_work_sync(&conn->tx_work);
+               lock_sock(sk);
                smc_close_abort(conn);
                sk->sk_state = SMC_CLOSED;
                smc_close_wait_tx_pends(smc);
@@ -411,13 +413,14 @@ void smc_close_sock_put_work(struct work_struct *work)
 int smc_close_shutdown_write(struct smc_sock *smc)
 {
        struct smc_connection *conn = &smc->conn;
-       long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
        struct sock *sk = &smc->sk;
        int old_state;
+       long timeout;
        int rc = 0;
 
-       if (sock_flag(sk, SOCK_LINGER))
-               timeout = sk->sk_lingertime;
+       timeout = current->flags & PF_EXITING ?
+                 0 : sock_flag(sk, SOCK_LINGER) ?
+                     sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
 
 again:
        old_state = sk->sk_state;
@@ -425,7 +428,7 @@ again:
        case SMC_ACTIVE:
                smc_close_stream_wait(smc, timeout);
                release_sock(sk);
-               cancel_work_sync(&conn->tx_work);
+               cancel_delayed_work_sync(&conn->tx_work);
                lock_sock(sk);
                /* send close wr request */
                rc = smc_close_wr(conn);
@@ -439,7 +442,7 @@ again:
                if (!smc_cdc_rxed_any_close(conn))
                        smc_close_stream_wait(smc, timeout);
                release_sock(sk);
-               cancel_work_sync(&conn->tx_work);
+               cancel_delayed_work_sync(&conn->tx_work);
                lock_sock(sk);
                /* confirm close from peer */
                rc = smc_close_wr(conn);
index 1a16d51e2330bb43de8bdd67c18f8b03c1e3ee92..20b66e79c5d6305dddf120f09249654732842c00 100644 (file)
@@ -25,8 +25,9 @@
 #include "smc_cdc.h"
 #include "smc_close.h"
 
-#define SMC_LGR_NUM_INCR       256
-#define SMC_LGR_FREE_DELAY     (600 * HZ)
+#define SMC_LGR_NUM_INCR               256
+#define SMC_LGR_FREE_DELAY_SERV                (600 * HZ)
+#define SMC_LGR_FREE_DELAY_CLNT                (SMC_LGR_FREE_DELAY_SERV + 10)
 
 static u32 smc_lgr_num;                        /* unique link group number */
 
@@ -107,8 +108,15 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
                __smc_lgr_unregister_conn(conn);
        }
        write_unlock_bh(&lgr->conns_lock);
-       if (reduced && !lgr->conns_num)
-               schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY);
+       if (!reduced || lgr->conns_num)
+               return;
+       /* client link group creation always follows the server link group
+        * creation. For client use a somewhat higher removal delay time,
+        * otherwise there is a risk of out-of-sync link groups.
+        */
+       mod_delayed_work(system_wq, &lgr->free_work,
+                        lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
+                                                SMC_LGR_FREE_DELAY_SERV);
 }
 
 static void smc_lgr_free_work(struct work_struct *work)
index 547e0e113b17b19dc93e2606987a3dc0ff4c9c47..0b5852299158a71dea6c4f38b16482d80c078f56 100644 (file)
@@ -380,6 +380,7 @@ static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
        ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport);
        if (ndev) {
                memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN);
+               dev_put(ndev);
        } else if (!rc) {
                memcpy(&smcibdev->mac[ibport - 1][0],
                       &smcibdev->gid[ibport - 1].raw[8], 3);
index 78f7af28ae4f25d71469d54d44f98ef4e2df94eb..31f8453c25c5854340e7f701baf0990d0002376e 100644 (file)
@@ -181,8 +181,10 @@ static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem)
                             sizeof(new_pnetelem->ndev->name)) ||
                    smc_pnet_same_ibname(pnetelem,
                                         new_pnetelem->smcibdev->ibdev->name,
-                                        new_pnetelem->ib_port))
+                                        new_pnetelem->ib_port)) {
+                       dev_put(pnetelem->ndev);
                        goto found;
+               }
        }
        list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist);
        rc = 0;
index b17a333e9bb0733893d9e44d92fc7ddf2f75ffbf..3e631ae4b6b65f1ac9f32e5a22d6fdc5b294519f 100644 (file)
@@ -148,6 +148,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
                                read_done = sock_intr_errno(timeo);
                                break;
                        }
+                       if (!timeo)
+                               return -EAGAIN;
                }
 
                if (!atomic_read(&conn->bytes_to_rcv)) {
index 3c656beb8820cb16a28f379ee32f2f39a8ce6631..3866573288ddaf67f14943122ed6dbd941e6df3b 100644 (file)
@@ -24,6 +24,8 @@
 #include "smc_cdc.h"
 #include "smc_tx.h"
 
+#define SMC_TX_WORK_DELAY      HZ
+
 /***************************** sndbuf producer *******************************/
 
 /* callback implementation for sk.sk_write_space()
@@ -406,7 +408,8 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
                                goto out_unlock;
                        }
                        rc = 0;
-                       schedule_work(&conn->tx_work);
+                       schedule_delayed_work(&conn->tx_work,
+                                             SMC_TX_WORK_DELAY);
                }
                goto out_unlock;
        }
@@ -430,7 +433,7 @@ out_unlock:
  */
 static void smc_tx_work(struct work_struct *work)
 {
-       struct smc_connection *conn = container_of(work,
+       struct smc_connection *conn = container_of(to_delayed_work(work),
                                                   struct smc_connection,
                                                   tx_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
@@ -468,7 +471,8 @@ void smc_tx_consumer_update(struct smc_connection *conn)
                if (!rc)
                        rc = smc_cdc_msg_send(conn, wr_buf, pend);
                if (rc < 0) {
-                       schedule_work(&conn->tx_work);
+                       schedule_delayed_work(&conn->tx_work,
+                                             SMC_TX_WORK_DELAY);
                        return;
                }
                smc_curs_write(&conn->rx_curs_confirmed,
@@ -487,6 +491,6 @@ void smc_tx_consumer_update(struct smc_connection *conn)
 void smc_tx_init(struct smc_sock *smc)
 {
        smc->sk.sk_write_space = smc_tx_write_space;
-       INIT_WORK(&smc->conn.tx_work, smc_tx_work);
+       INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
        spin_lock_init(&smc->conn.send_lock);
 }
index ab56bda667837c3944eebfdfe921a2397fa52414..525d91e0d57e2fb3fa00ac9435df8a43bf0ea719 100644 (file)
@@ -244,7 +244,7 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
        int rc;
 
        ib_req_notify_cq(link->smcibdev->roce_cq_send,
-                        IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS);
+                        IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
        pend = container_of(priv, struct smc_wr_tx_pend, priv);
        rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
                          &failed_wr);
index d4ea46a5f233b42d56d2884bf1532ca7d9374a94..c5fda15ba3193f811151043ac3675a2ebfb15c38 100644 (file)
@@ -49,7 +49,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
 {
        /* Unrecoverable error in receive */
 
-       del_timer(&strp->msg_timer);
+       cancel_delayed_work(&strp->msg_timer_work);
 
        if (strp->stopped)
                return;
@@ -68,7 +68,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
 static void strp_start_timer(struct strparser *strp, long timeo)
 {
        if (timeo)
-               mod_timer(&strp->msg_timer, timeo);
+               mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
 }
 
 /* Lower lock held */
@@ -319,7 +319,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                eaten += (cand_len - extra);
 
                /* Hurray, we have a new message! */
-               del_timer(&strp->msg_timer);
+               cancel_delayed_work(&strp->msg_timer_work);
                strp->skb_head = NULL;
                STRP_STATS_INCR(strp->stats.msgs);
 
@@ -450,9 +450,10 @@ static void strp_work(struct work_struct *w)
        do_strp_work(container_of(w, struct strparser, work));
 }
 
-static void strp_msg_timeout(unsigned long arg)
+static void strp_msg_timeout(struct work_struct *w)
 {
-       struct strparser *strp = (struct strparser *)arg;
+       struct strparser *strp = container_of(w, struct strparser,
+                                             msg_timer_work.work);
 
        /* Message assembly timed out */
        STRP_STATS_INCR(strp->stats.msg_timeouts);
@@ -505,9 +506,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
        strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
        strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
 
-       setup_timer(&strp->msg_timer, strp_msg_timeout,
-                   (unsigned long)strp);
-
+       INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout);
        INIT_WORK(&strp->work, strp_work);
 
        return 0;
@@ -532,7 +531,7 @@ void strp_done(struct strparser *strp)
 {
        WARN_ON(!strp->stopped);
 
-       del_timer_sync(&strp->msg_timer);
+       cancel_delayed_work_sync(&strp->msg_timer_work);
        cancel_work_sync(&strp->work);
 
        if (strp->skb_head) {
index e741ec2b4d8e6ea5d08a8e942e3021e5a20e4c6a..898485e3ece4e82a93d7759af2d77fd509a8676d 100644 (file)
@@ -1333,7 +1333,7 @@ void xprt_release(struct rpc_task *task)
                rpc_count_iostats(task, task->tk_client->cl_metrics);
        spin_lock(&xprt->recv_lock);
        if (!list_empty(&req->rq_list)) {
-               list_del(&req->rq_list);
+               list_del_init(&req->rq_list);
                xprt_wait_on_pinned_rqst(req);
        }
        spin_unlock(&xprt->recv_lock);
@@ -1445,6 +1445,23 @@ out:
        return xprt;
 }
 
+static void xprt_destroy_cb(struct work_struct *work)
+{
+       struct rpc_xprt *xprt =
+               container_of(work, struct rpc_xprt, task_cleanup);
+
+       rpc_xprt_debugfs_unregister(xprt);
+       rpc_destroy_wait_queue(&xprt->binding);
+       rpc_destroy_wait_queue(&xprt->pending);
+       rpc_destroy_wait_queue(&xprt->sending);
+       rpc_destroy_wait_queue(&xprt->backlog);
+       kfree(xprt->servername);
+       /*
+        * Tear down transport state and free the rpc_xprt
+        */
+       xprt->ops->destroy(xprt);
+}
+
 /**
  * xprt_destroy - destroy an RPC transport, killing off all requests.
  * @xprt: transport to destroy
@@ -1454,22 +1471,19 @@ static void xprt_destroy(struct rpc_xprt *xprt)
 {
        dprintk("RPC:       destroying transport %p\n", xprt);
 
-       /* Exclude transport connect/disconnect handlers */
+       /*
+        * Exclude transport connect/disconnect handlers and autoclose
+        */
        wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
 
        del_timer_sync(&xprt->timer);
 
-       rpc_xprt_debugfs_unregister(xprt);
-       rpc_destroy_wait_queue(&xprt->binding);
-       rpc_destroy_wait_queue(&xprt->pending);
-       rpc_destroy_wait_queue(&xprt->sending);
-       rpc_destroy_wait_queue(&xprt->backlog);
-       cancel_work_sync(&xprt->task_cleanup);
-       kfree(xprt->servername);
        /*
-        * Tear down transport state and free the rpc_xprt
+        * Destroy sockets etc from the system workqueue so they can
+        * safely flush receive work running on rpciod.
         */
-       xprt->ops->destroy(xprt);
+       INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
+       schedule_work(&xprt->task_cleanup);
 }
 
 static void xprt_destroy_kref(struct kref *kref)
index 5a936a6a31a3245cc6ab0f6e9804d268bc198261..df062e086bdbbda8eb9ec724754937ea42134505 100644 (file)
@@ -401,7 +401,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
        if (unlikely(n != mw->mw_nents))
                goto out_mapmr_err;
 
-       dprintk("RPC:       %s: Using frmr %p to map %u segments (%u bytes)\n",
+       dprintk("RPC:       %s: Using frmr %p to map %u segments (%llu bytes)\n",
                __func__, frmr, mw->mw_nents, mr->length);
 
        key = (u8)(mr->rkey & 0x000000FF);
index 9b5de31aa42939cbc3f6aa4a0a94d7a28f711e84..c1841f234a717fdcf46413e25cb1e265912a2dfb 100644 (file)
@@ -2203,7 +2203,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
        struct sock_xprt *transport =
                container_of(work, struct sock_xprt, connect_worker.work);
        struct rpc_xprt *xprt = &transport->xprt;
-       struct socket *sock = transport->sock;
+       struct socket *sock;
        int status = -EIO;
 
        sock = xs_create_sock(xprt, transport,
index 7d99029df342f15b28c1f8e49d43bc90c709d1b0..a140dd4a84afc44a2574b2fac25969ceba28aaf9 100644 (file)
@@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
        struct sk_buff_head xmitq;
        int rc = 0;
 
-       __skb_queue_head_init(&xmitq);
+       skb_queue_head_init(&xmitq);
        tipc_bcast_lock(net);
        if (tipc_link_bc_peers(l))
                rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
        u32 dst, selector;
 
        selector = msg_link_selector(buf_msg(skb_peek(pkts)));
-       __skb_queue_head_init(&_pkts);
+       skb_queue_head_init(&_pkts);
 
        list_for_each_entry_safe(n, tmp, &dests->list, list) {
                dst = n->value;
index 6ef379f004ac6da5ef908911368149e10186b5c4..17146c16ee2df5bdefcb949ecb40dd81b00f9940 100644 (file)
@@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
                return false;
        if (msg_errcode(msg))
                return false;
-       *err = -TIPC_ERR_NO_NAME;
+       *err = TIPC_ERR_NO_NAME;
        if (skb_linearize(skb))
                return false;
        msg = buf_msg(skb);
@@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
        msg_set_destnode(msg, dnode);
        msg_set_destport(msg, dport);
        *err = TIPC_OK;
+
+       if (!skb_cloned(skb))
+               return true;
+
+       /* Unclone buffer in case it was bundled */
+       if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
+               return false;
+
        return true;
 }
 
index 4d9679701a6df5113df3e24a1c9b86e2a63b3710..384c84e83462e51d24e469515f4b52f8dcf55877 100644 (file)
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
        err = -ENOENT;
        if (sk == NULL)
                goto out_nosk;
+       if (!net_eq(sock_net(sk), net))
+               goto out;
 
        err = sock_diag_check_cookie(sk, req->udiag_cookie);
        if (err)
index 14ed5a344cdf302ba3f2d8e9dec4fb7c66fdd239..e21991fe883a7c5f03a3d6eefb52518ef9a67d6c 100644 (file)
@@ -310,11 +310,15 @@ static void hvs_close_connection(struct vmbus_channel *chan)
        struct sock *sk = get_per_channel_state(chan);
        struct vsock_sock *vsk = vsock_sk(sk);
 
+       lock_sock(sk);
+
        sk->sk_state = SS_UNCONNECTED;
        sock_set_flag(sk, SOCK_DONE);
        vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
 
        sk->sk_state_change(sk);
+
+       release_sock(sk);
 }
 
 static void hvs_open_connection(struct vmbus_channel *chan)
@@ -344,6 +348,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
        if (!sk)
                return;
 
+       lock_sock(sk);
+
        if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
            (!conn_from_host && sk->sk_state != SS_CONNECTING))
                goto out;
@@ -395,9 +401,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 
                vsock_insert_connected(vnew);
 
-               lock_sock(sk);
                vsock_enqueue_accept(sk, new);
-               release_sock(sk);
        } else {
                sk->sk_state = SS_CONNECTED;
                sk->sk_socket->state = SS_CONNECTED;
@@ -410,6 +414,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 out:
        /* Release refcnt obtained when we called vsock_find_bound_socket() */
        sock_put(sk);
+
+       release_sock(sk);
 }
 
 static u32 hvs_get_local_cid(void)
@@ -476,13 +482,21 @@ out:
 
 static void hvs_release(struct vsock_sock *vsk)
 {
+       struct sock *sk = sk_vsock(vsk);
        struct hvsock *hvs = vsk->trans;
-       struct vmbus_channel *chan = hvs->chan;
+       struct vmbus_channel *chan;
 
+       lock_sock(sk);
+
+       sk->sk_state = SS_DISCONNECTING;
+       vsock_remove_sock(vsk);
+
+       release_sock(sk);
+
+       chan = hvs->chan;
        if (chan)
                hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
 
-       vsock_remove_sock(vsk);
 }
 
 static void hvs_destruct(struct vsock_sock *vsk)
index 0df8023f480bb69feb4cc2d3e0df6ea95f384e1a..d396cb61a280d24b6c4bd4733885ff9693f1c673 100644 (file)
@@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
        [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
 };
 
+/* policy for packet pattern attributes */
+static const struct nla_policy
+nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
+       [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
+       [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
+       [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
+};
+
 static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                                     struct netlink_callback *cb,
                                     struct cfg80211_registered_device **rdev,
@@ -9987,6 +9995,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
                if (err)
                        return err;
 
+               if (!setup.chandef.chan)
+                       return -EINVAL;
+
                err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
                                              &setup.beacon_rate);
                if (err)
@@ -10529,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                        u8 *mask_pat;
 
                        nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                        NULL, info->extack);
+                                        nl80211_packet_pattern_policy,
+                                        info->extack);
                        err = -EINVAL;
                        if (!pat_tb[NL80211_PKTPAT_MASK] ||
                            !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -10778,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                            rem) {
                u8 *mask_pat;
 
-               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL);
+               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                nl80211_packet_pattern_policy, NULL);
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
                    !pat_tb[NL80211_PKTPAT_PATTERN])
                        return -EINVAL;
@@ -10903,6 +10916,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
+       if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
+           !tb[NL80211_REKEY_DATA_KCK])
+               return -EINVAL;
        if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
                return -ERANGE;
        if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
index 0a49b88070d0b2d6a6fbcafe6e8816fded775261..b6533ecbf5b1903510b1bf4318b8dee4c4839896 100644 (file)
@@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
                return -EOPNOTSUPP;
 
        if (wdev->current_bss) {
-               if (!prev_bssid)
-                       return -EALREADY;
-               if (prev_bssid &&
-                   !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
-                       return -ENOTCONN;
                cfg80211_unhold_bss(wdev->current_bss);
                cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
                wdev->current_bss = NULL;
@@ -1063,11 +1058,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (WARN_ON(wdev->connect_keys)) {
-               kzfree(wdev->connect_keys);
-               wdev->connect_keys = NULL;
+       /*
+        * If we have an ssid_len, we're trying to connect or are
+        * already connected, so reject a new SSID unless it's the
+        * same (which is the case for re-association.)
+        */
+       if (wdev->ssid_len &&
+           (wdev->ssid_len != connect->ssid_len ||
+            memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
+               return -EALREADY;
+
+       /*
+        * If connected, reject (re-)association unless prev_bssid
+        * matches the current BSSID.
+        */
+       if (wdev->current_bss) {
+               if (!prev_bssid)
+                       return -EALREADY;
+               if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+                       return -ENOTCONN;
        }
 
+       /*
+        * Reject if we're in the process of connecting with WEP,
+        * this case isn't very interesting and trying to handle
+        * it would make the code much more complex.
+        */
+       if (wdev->connect_keys)
+               return -EINPROGRESS;
+
        cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
                                  rdev->wiphy.ht_capa_mod_mask);
 
@@ -1118,7 +1137,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
 
        if (err) {
                wdev->connect_keys = NULL;
-               wdev->ssid_len = 0;
+               /*
+                * This could be reassoc getting refused, don't clear
+                * ssid_len in that case.
+                */
+               if (!wdev->current_bss)
+                       wdev->ssid_len = 0;
                return err;
        }
 
@@ -1145,6 +1169,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
        else if (wdev->ssid_len)
                err = rdev_disconnect(rdev, dev, reason);
 
+       /*
+        * Clear ssid_len unless we actually were fully connected,
+        * in which case cfg80211_disconnected() will take care of
+        * this later.
+        */
+       if (!wdev->current_bss)
+               wdev->ssid_len = 0;
+
        return err;
 }
 
index acf00104ef312b563be9f3aa9698fa2a9dbdfea7..30e5746085b8fcfc5aa8abc7a8a23753c510a630 100644 (file)
@@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        }
 
        if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
+               xso->dev = NULL;
                dev_put(dev);
                return 0;
        }
index 2515cd2bc5db1a56915856aa5252f2e01592c9e7..8ac9d32fb79d8bbb9e6d6765c9d7eb8b13ebedb5 100644 (file)
@@ -429,7 +429,8 @@ resume:
        nf_reset(skb);
 
        if (decaps) {
-               skb->sp->olen = 0;
+               if (skb->sp)
+                       skb->sp->olen = 0;
                skb_dst_drop(skb);
                gro_cells_receive(&gro_cells, skb);
                return 0;
@@ -440,7 +441,8 @@ resume:
 
                err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
                if (xfrm_gro) {
-                       skb->sp->olen = 0;
+                       if (skb->sp)
+                               skb->sp->olen = 0;
                        skb_dst_drop(skb);
                        gro_cells_receive(&gro_cells, skb);
                        return err;
index f06253969972aa3489e557faf1ef76f54b1eb3d3..2746b62a8944e436d177892153326bb45fc462fa 100644 (file)
@@ -1573,6 +1573,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                        goto put_states;
                }
 
+               if (!dst_prev)
+                       dst0 = dst1;
+               else
+                       /* Ref count is taken during xfrm_alloc_dst()
+                        * No need to do dst_clone() on dst1
+                        */
+                       dst_prev->child = dst1;
+
                if (xfrm[i]->sel.family == AF_UNSPEC) {
                        inner_mode = xfrm_ip2inner_mode(xfrm[i],
                                                        xfrm_af2proto(family));
@@ -1584,14 +1592,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                } else
                        inner_mode = xfrm[i]->inner_mode;
 
-               if (!dst_prev)
-                       dst0 = dst1;
-               else
-                       /* Ref count is taken during xfrm_alloc_dst()
-                        * No need to do dst_clone() on dst1
-                        */
-                       dst_prev->child = dst1;
-
                xdst->route = dst;
                dst_copy_metrics(dst1, dst);
 
index 0dab1cd79ce4d1afe84ba9422a740689a9ebdf71..12213477cd3ad90af9dc2e1bed236e461621115b 100644 (file)
@@ -732,12 +732,12 @@ restart:
                        }
                }
        }
+out:
+       spin_unlock_bh(&net->xfrm.xfrm_state_lock);
        if (cnt) {
                err = 0;
                xfrm_policy_cache_flush();
        }
-out:
-       spin_unlock_bh(&net->xfrm.xfrm_state_lock);
        return err;
 }
 EXPORT_SYMBOL(xfrm_state_flush);
index 2bfbd9121e3b21b0eb793d2d3a685bd4cebde22b..e44a0fed48dd088ac95a0726a5f2b58b0f07c5bb 100644 (file)
@@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
+               xfrm_dev_state_delete(x);
                __xfrm_state_put(x);
                goto out;
        }
@@ -1692,32 +1693,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
 
 static int xfrm_dump_policy_done(struct netlink_callback *cb)
 {
-       struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+       struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
        struct net *net = sock_net(cb->skb->sk);
 
        xfrm_policy_walk_done(walk, net);
        return 0;
 }
 
+static int xfrm_dump_policy_start(struct netlink_callback *cb)
+{
+       struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+
+       BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
+
+       xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+       return 0;
+}
+
 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
-       struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+       struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
        struct xfrm_dump_info info;
 
-       BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
-                    sizeof(cb->args) - sizeof(cb->args[0]));
-
        info.in_skb = cb->skb;
        info.out_skb = skb;
        info.nlmsg_seq = cb->nlh->nlmsg_seq;
        info.nlmsg_flags = NLM_F_MULTI;
 
-       if (!cb->args[0]) {
-               cb->args[0] = 1;
-               xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
-       }
-
        (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
 
        return skb->len;
@@ -2473,6 +2476,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
 
 static const struct xfrm_link {
        int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+       int (*start)(struct netlink_callback *);
        int (*dump)(struct sk_buff *, struct netlink_callback *);
        int (*done)(struct netlink_callback *);
        const struct nla_policy *nla_pol;
@@ -2486,6 +2490,7 @@ static const struct xfrm_link {
        [XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
        [XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
        [XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
+                                                  .start = xfrm_dump_policy_start,
                                                   .dump = xfrm_dump_policy,
                                                   .done = xfrm_dump_policy_done },
        [XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2538,6 +2543,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 
                {
                        struct netlink_dump_control c = {
+                               .start = link->start,
                                .dump = link->dump,
                                .done = link->done,
                        };
index f9b38ef82dc2449e56094b9c8c0c805673c0a7b0..52b0053274f425a6a07eb70dfbce89ca9c32d4fb 100644 (file)
@@ -62,7 +62,7 @@ int bpf_prog2(struct __sk_buff *skb)
                ret = 1;
 
        bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret);
-       return bpf_sk_redirect_map(&sock_map, ret, 0);
+       return bpf_sk_redirect_map(skb, &sock_map, ret, 0);
 }
 
 SEC("sockops")
index bc7fcf010a5b4ccb7c25dcfd651e53c61ce80b1a..5522692100ba0d01d3cd669b7ea1bbdf84b0b72e 100644 (file)
@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg)
 }
 
 static DEFINE_MUTEX(thread_mutex);
+static int simple_thread_cnt;
 
 int foo_bar_reg(void)
 {
+       mutex_lock(&thread_mutex);
+       if (simple_thread_cnt++)
+               goto out;
+
        pr_info("Starting thread for foo_bar_fn\n");
        /*
         * We shouldn't be able to start a trace when the module is
         * unloading (there's other locks to prevent that). But
         * for consistency sake, we still take the thread_mutex.
         */
-       mutex_lock(&thread_mutex);
        simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ out:
        mutex_unlock(&thread_mutex);
        return 0;
 }
 
 void foo_bar_unreg(void)
 {
-       pr_info("Killing thread for foo_bar_fn\n");
-       /* protect against module unloading */
        mutex_lock(&thread_mutex);
+       if (--simple_thread_cnt)
+               goto out;
+
+       pr_info("Killing thread for foo_bar_fn\n");
        if (simple_tsk_fn)
                kthread_stop(simple_tsk_fn);
        simple_tsk_fn = NULL;
+ out:
        mutex_unlock(&thread_mutex);
 }
 
index 2e3a10e79ca9ed645eaedbf0f8242332e96ba400..061d0c3a420ad58084df157a143e76133f304dc2 100644 (file)
@@ -265,6 +265,8 @@ objtool_args += --no-fp
 endif
 ifdef CONFIG_GCOV_KERNEL
 objtool_args += --no-unreachable
+else
+objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
 endif
 
 # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
index 16923ba4b5b1005158508b4e0c9d2b3d947c119c..756d14f0d76384274f9dd04ac1045a0d00f87e2c 100644 (file)
@@ -97,7 +97,6 @@ vmlinux.o: FORCE
        $(call cmd,kernel-mod)
 
 # Declare generated files as targets for modpost
-$(symverfile):         __modpost ;
 $(modules:.ko=.mod.c): __modpost ;
 
 
index dd2c262aebbf2a43283f38badcf445702c59c9e9..8b80bac055e490219f97d913f33bce165960fe92 100755 (executable)
@@ -6390,7 +6390,7 @@ sub process {
                exit(0);
        }
 
-       if (!$is_patch && $file !~ /cover-letter\.patch$/) {
+       if (!$is_patch && $filename !~ /cover-letter\.patch$/) {
                ERROR("NOT_UNIFIED_DIFF",
                      "Does not appear to be a unified-diff format patch\n");
        }
index f9a3d8d23c644a536ce0c9aaeed2c04b580af2f4..8c4fbad2055e59e973a82ce8aa6c00a58047e315 100755 (executable)
@@ -86,6 +86,7 @@ eod
 compile_to_dts() {
 
        dtx="$1"
+       dtc_include="$2"
 
        if [ -d "${dtx}" ] ; then
 
@@ -113,7 +114,7 @@ compile_to_dts() {
                # -----  input is DTS (source)
 
                if ( cpp ${cpp_flags} -x assembler-with-cpp ${dtx} \
-                       | ${DTC} -I dts ) ; then
+                       | ${DTC} ${dtc_include} -I dts ) ; then
                        return
                fi
 
@@ -320,18 +321,13 @@ fi
 
 cpp_flags="\
        -nostdinc                                  \
-       -I${srctree}/arch/${ARCH}/boot/dts         \
        -I${srctree}/scripts/dtc/include-prefixes  \
-       -I${srctree}/drivers/of/testcase-data      \
        -undef -D__DTS__"
 
-dtc_flags="\
-       -i ${srctree}/arch/${ARCH}/boot/dts/ \
-       -i ${srctree}/kernel/dts             \
-       ${dtx_path_1_dtc_include}            \
-       ${dtx_path_2_dtc_include}"
-
-DTC="${DTC} ${dtc_flags} -O dts -qq -f ${dtc_sort} -o -"
+DTC="\
+       ${DTC}                                     \
+       -i ${srctree}/scripts/dtc/include-prefixes \
+       -O dts -qq -f ${dtc_sort} -o -"
 
 
 # -----  do the diff or decompile
@@ -339,11 +335,11 @@ DTC="${DTC} ${dtc_flags} -O dts -qq -f ${dtc_sort} -o -"
 if (( ${cmd_diff} )) ; then
 
        diff ${diff_flags} --label "${dtx_file_1}" --label "${dtx_file_2}" \
-               <(compile_to_dts "${dtx_file_1}") \
-               <(compile_to_dts "${dtx_file_2}")
+               <(compile_to_dts "${dtx_file_1}" "${dtx_path_1_dtc_include}") \
+               <(compile_to_dts "${dtx_file_2}" "${dtx_path_2_dtc_include}")
 
 else
 
-       compile_to_dts "${dtx_file_1}"
+       compile_to_dts "${dtx_file_1}" "${dtx_path_1_dtc_include}"
 
 fi
index 29df825d375c6eb4b5f2ac4d15b5f3df90ae38c4..2f6ce802397db2833e26e255f133076518f79046 100755 (executable)
@@ -103,11 +103,12 @@ __faddr2line() {
 
        # Go through each of the object's symbols which match the func name.
        # In rare cases there might be duplicates.
+       file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
        while read symbol; do
                local fields=($symbol)
                local sym_base=0x${fields[0]}
                local sym_type=${fields[1]}
-               local sym_end=0x${fields[3]}
+               local sym_end=${fields[3]}
 
                # calculate the size
                local sym_size=$(($sym_end - $sym_base))
@@ -157,7 +158,7 @@ __faddr2line() {
                addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
                DONE=1
 
-       done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }')
+       done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
 }
 
 [[ $# -lt 2 ]] && usage
index 5d554419170b7d54ec82ddb1d31093d3eab0aa7d..9ee9bf7fd1a2113bfb869cf53b6d26d2f0852455 100644 (file)
@@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s)
        else if (str[0] == '$')
                return -1;
        /* exclude debugging symbols */
-       else if (stype == 'N')
+       else if (stype == 'N' || stype == 'n')
                return -1;
 
        /* include the type field in the symbol name, so that it gets
diff --git a/scripts/mkversion b/scripts/mkversion
deleted file mode 100644 (file)
index c12addc..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-if [ ! -f .version ]
-then
-    echo 1
-else
-    expr 0`cat .version` + 1
-fi
index 71b4a8af9d4dcdf1dcec434b6be98636f468b2dd..73f9f3192b9fbf8770e6ef632723d23e8efb5b63 100644 (file)
@@ -50,8 +50,6 @@ rpm-pkg rpm: FORCE
        $(MAKE) clean
        $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
        $(call cmd,src_tar,$(KERNELPATH),kernel.spec)
-       $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
-       mv -f $(objtree)/.tmp_version $(objtree)/.version
        rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
        rm $(KERNELPATH).tar.gz kernel.spec
 
@@ -60,9 +58,6 @@ rpm-pkg rpm: FORCE
 binrpm-pkg: FORCE
        $(MAKE) KBUILD_SRC=
        $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
-       $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
-       mv -f $(objtree)/.tmp_version $(objtree)/.version
-
        rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
                $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
        rm binkernel.spec
index aad67000e4dd76796894cbd1975d087c559ecc5c..0bc87473f68f817b81640591f75f18c3b068351c 100755 (executable)
@@ -92,12 +92,10 @@ else
 fi
 sourcename=$KDEB_SOURCENAME
 tmpdir="$objtree/debian/tmp"
-fwdir="$objtree/debian/fwtmp"
 kernel_headers_dir="$objtree/debian/hdrtmp"
 libc_headers_dir="$objtree/debian/headertmp"
 dbg_dir="$objtree/debian/dbgtmp"
 packagename=linux-image-$version
-fwpackagename=linux-firmware-image-$version
 kernel_headers_packagename=linux-headers-$version
 libc_headers_packagename=linux-libc-dev
 dbg_packagename=$packagename-dbg
@@ -126,10 +124,9 @@ esac
 BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
 
 # Setup the directory structure
-rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
+rm -rf "$tmpdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
 mkdir -m 755 -p "$tmpdir/DEBIAN"
 mkdir -p "$tmpdir/lib" "$tmpdir/boot"
-mkdir -p "$fwdir/lib/firmware/$version/"
 mkdir -p "$kernel_headers_dir/lib/modules/$version/"
 
 # Build and install the kernel
@@ -306,7 +303,6 @@ else
        cat <<EOF >> debian/control
 
 Package: $packagename
-Suggests: $fwpackagename
 Architecture: any
 Description: Linux kernel, version $version
  This package contains the Linux kernel, modules and corresponding other
@@ -345,22 +341,6 @@ Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
  This is useful for people who need to build external modules
 EOF
 
-# Do we have firmware? Move it out of the way and build it into a package.
-if [ -e "$tmpdir/lib/firmware" ]; then
-       mv "$tmpdir/lib/firmware"/* "$fwdir/lib/firmware/$version/"
-       rmdir "$tmpdir/lib/firmware"
-
-       cat <<EOF >> debian/control
-
-Package: $fwpackagename
-Architecture: all
-Description: Linux kernel firmware, version $version
- This package contains firmware from the Linux kernel, version $version.
-EOF
-
-       create_package "$fwpackagename" "$fwdir"
-fi
-
 cat <<EOF >> debian/control
 
 Package: $libc_headers_packagename
index bb43f153fd8e70fd48c32a2f92fa6699242501f7..f47f17aae135188eae37db2178db6c9911a80023 100755 (executable)
@@ -27,9 +27,7 @@ __KERNELRELEASE=`echo $KERNELRELEASE | sed -e "s/-/_/g"`
 echo "Name: kernel"
 echo "Summary: The Linux Kernel"
 echo "Version: $__KERNELRELEASE"
-# we need to determine the NEXT version number so that uname and
-# rpm -q will agree
-echo "Release: `. $srctree/scripts/mkversion`"
+echo "Release: $(cat .version 2>/dev/null || echo 1)"
 echo "License: GPL"
 echo "Group: System Environment/Kernel"
 echo "Vendor: The Linux Community"
@@ -77,7 +75,7 @@ fi
 echo "%build"
 
 if ! $PREBUILT; then
-echo "make clean && make %{?_smp_mflags}"
+echo "make clean && make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}"
 echo ""
 fi
 
@@ -88,11 +86,8 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
 echo "%else"
 echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
 echo "%endif"
-echo 'mkdir -p $RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
 
-echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= mod-fw= modules_install'
-echo 'INSTALL_FW_PATH=$RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
-echo 'make INSTALL_FW_PATH=$INSTALL_FW_PATH' firmware_install
+echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
 echo "%ifarch ia64"
 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
@@ -119,7 +114,7 @@ if ! $PREBUILT; then
 echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/build"
 echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/source"
 echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
-echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
+echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude .config.old --exclude .missing-syscalls.d\""
 echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
 echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
 echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
@@ -154,7 +149,6 @@ echo '%defattr (-, root, root)'
 echo "/lib/modules/$KERNELRELEASE"
 echo "%exclude /lib/modules/$KERNELRELEASE/build"
 echo "%exclude /lib/modules/$KERNELRELEASE/source"
-echo "/lib/firmware/$KERNELRELEASE"
 echo "/boot/*"
 echo ""
 echo "%files headers"
index 400ef35169c540426acba1ffbfaf42e44fe95c1a..aa0cc49ad1adc5d07ee4e0f9608809ce2e2de11a 100644 (file)
@@ -53,6 +53,7 @@ acumulator||accumulator
 adapater||adapter
 addional||additional
 additionaly||additionally
+additonal||additional
 addres||address
 adddress||address
 addreses||addresses
@@ -67,6 +68,8 @@ adviced||advised
 afecting||affecting
 againt||against
 agaist||against
+aggreataon||aggregation
+aggreation||aggregation
 albumns||albums
 alegorical||allegorical
 algined||aligned
@@ -80,6 +83,8 @@ aligment||alignment
 alignement||alignment
 allign||align
 alligned||aligned
+alllocate||allocate
+alloated||allocated
 allocatote||allocate
 allocatrd||allocated
 allocte||allocate
@@ -171,6 +176,7 @@ availale||available
 availavility||availability
 availble||available
 availiable||available
+availible||available
 avalable||available
 avaliable||available
 aysnc||async
@@ -203,6 +209,7 @@ broadcat||broadcast
 cacluated||calculated
 caculation||calculation
 calender||calendar
+calescing||coalescing
 calle||called
 callibration||calibration
 calucate||calculate
@@ -210,6 +217,7 @@ calulate||calculate
 cancelation||cancellation
 cancle||cancel
 capabilites||capabilities
+capabilty||capability
 capabitilies||capabilities
 capatibilities||capabilities
 capapbilities||capabilities
@@ -302,6 +310,7 @@ containts||contains
 contaisn||contains
 contant||contact
 contence||contents
+continious||continuous
 continous||continuous
 continously||continuously
 continueing||continuing
@@ -393,6 +402,7 @@ differrence||difference
 diffrent||different
 diffrentiate||differentiate
 difinition||definition
+dimesions||dimensions
 diplay||display
 direectly||directly
 disassocation||disassociation
@@ -449,6 +459,7 @@ equiped||equipped
 equivelant||equivalent
 equivilant||equivalent
 eror||error
+errorr||error
 estbalishment||establishment
 etsablishment||establishment
 etsbalishment||establishment
@@ -481,6 +492,7 @@ failied||failed
 faillure||failure
 failue||failure
 failuer||failure
+failng||failing
 faireness||fairness
 falied||failed
 faliure||failure
@@ -493,6 +505,7 @@ fetaure||feature
 fetaures||features
 fileystem||filesystem
 fimware||firmware
+firware||firmware
 finanize||finalize
 findn||find
 finilizes||finalizes
@@ -502,6 +515,7 @@ folloing||following
 followign||following
 followings||following
 follwing||following
+fonud||found
 forseeable||foreseeable
 forse||force
 fortan||fortran
@@ -532,6 +546,7 @@ grabing||grabbing
 grahical||graphical
 grahpical||graphical
 grapic||graphic
+grranted||granted
 guage||gauge
 guarenteed||guaranteed
 guarentee||guarantee
@@ -543,6 +558,7 @@ happend||happened
 harware||hardware
 heirarchically||hierarchically
 helpfull||helpful
+hybernate||hibernate
 hierachy||hierarchy
 hierarchie||hierarchy
 howver||however
@@ -565,16 +581,19 @@ implemenation||implementation
 implementaiton||implementation
 implementated||implemented
 implemention||implementation
+implementd||implemented
 implemetation||implementation
 implemntation||implementation
 implentation||implementation
 implmentation||implementation
 implmenting||implementing
+incative||inactive
 incomming||incoming
 incompatabilities||incompatibilities
 incompatable||incompatible
 inconsistant||inconsistent
 increas||increase
+incremeted||incremented
 incrment||increment
 indendation||indentation
 indended||intended
@@ -619,6 +638,7 @@ interger||integer
 intermittant||intermittent
 internel||internal
 interoprability||interoperability
+interuupt||interrupt
 interrface||interface
 interrrupt||interrupt
 interrup||interrupt
@@ -638,8 +658,10 @@ intrrupt||interrupt
 intterrupt||interrupt
 intuative||intuitive
 invaid||invalid
+invald||invalid
 invalde||invalid
 invalide||invalid
+invalidiate||invalidate
 invalud||invalid
 invididual||individual
 invokation||invocation
@@ -713,6 +735,7 @@ misformed||malformed
 mispelled||misspelled
 mispelt||misspelt
 mising||missing
+mismactch||mismatch
 missmanaged||mismanaged
 missmatch||mismatch
 miximum||maximum
@@ -731,6 +754,7 @@ multidimensionnal||multidimensional
 multple||multiple
 mumber||number
 muticast||multicast
+mutilcast||multicast
 mutiple||multiple
 mutli||multi
 nams||names
@@ -834,6 +858,7 @@ posible||possible
 positon||position
 possibilites||possibilities
 powerfull||powerful
+preample||preamble
 preapre||prepare
 preceeded||preceded
 preceeding||preceding
@@ -1059,6 +1084,7 @@ sturcture||structure
 subdirectoires||subdirectories
 suble||subtle
 substract||subtract
+submition||submission
 succesfully||successfully
 succesful||successful
 successed||succeeded
@@ -1078,6 +1104,7 @@ suppoted||supported
 suppported||supported
 suppport||support
 supress||suppress
+surpressed||suppressed
 surpresses||suppresses
 susbsystem||subsystem
 suspeneded||suspended
@@ -1091,6 +1118,7 @@ swithced||switched
 swithcing||switching
 swithed||switched
 swithing||switching
+swtich||switch
 symetric||symmetric
 synax||syntax
 synchonized||synchronized
@@ -1111,7 +1139,9 @@ therfore||therefore
 thier||their
 threds||threads
 threshhold||threshold
+thresold||threshold
 throught||through
+troughput||throughput
 thses||these
 tiggered||triggered
 tipically||typically
@@ -1120,6 +1150,7 @@ tmis||this
 torerable||tolerable
 tramsmitted||transmitted
 tramsmit||transmit
+tranasction||transaction
 tranfer||transfer
 transciever||transceiver
 transferd||transferred
@@ -1133,6 +1164,7 @@ trasmission||transmission
 treshold||threshold
 trigerring||triggering
 trun||turn
+tunning||tuning
 ture||true
 tyep||type
 udpate||update
@@ -1199,6 +1231,7 @@ visiters||visitors
 vitual||virtual
 wakeus||wakeups
 wating||waiting
+wiat||wait
 wether||whether
 whataver||whatever
 whcih||which
index a16b195274dec2c48d3ee41e3cdad1b6460138c1..81a34426d0240f92187e19dde633d3e8e5b365ef 100644 (file)
@@ -4,7 +4,7 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
 
 apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
               path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
-              resource.o secid.o file.o policy_ns.o label.o
+              resource.o secid.o file.o policy_ns.o label.o mount.o
 apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
 
 clean-files := capability_names.h rlim_names.h
index 853c2ec8e0c9521e53fe2c6edecb0b7913cfad26..caaf51dda64812067e07d1c9079c7750c4296e11 100644 (file)
@@ -32,6 +32,7 @@
 #include "include/audit.h"
 #include "include/context.h"
 #include "include/crypto.h"
+#include "include/ipc.h"
 #include "include/policy_ns.h"
 #include "include/label.h"
 #include "include/policy.h"
@@ -248,8 +249,10 @@ static struct dentry *aafs_create(const char *name, umode_t mode,
 
        inode_lock(dir);
        dentry = lookup_one_len(name, parent, strlen(name));
-       if (IS_ERR(dentry))
+       if (IS_ERR(dentry)) {
+               error = PTR_ERR(dentry);
                goto fail_lock;
+       }
 
        if (d_really_is_positive(dentry)) {
                error = -EEXIST;
@@ -1443,6 +1446,10 @@ void __aafs_profile_migrate_dents(struct aa_profile *old,
 {
        int i;
 
+       AA_BUG(!old);
+       AA_BUG(!new);
+       AA_BUG(!mutex_is_locked(&profiles_ns(old)->lock));
+
        for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
                new->dents[i] = old->dents[i];
                if (new->dents[i])
@@ -1506,6 +1513,9 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
        struct dentry *dent = NULL, *dir;
        int error;
 
+       AA_BUG(!profile);
+       AA_BUG(!mutex_is_locked(&profiles_ns(profile)->lock));
+
        if (!parent) {
                struct aa_profile *p;
                p = aa_deref_parent(profile);
@@ -1731,6 +1741,7 @@ void __aafs_ns_rmdir(struct aa_ns *ns)
 
        if (!ns)
                return;
+       AA_BUG(!mutex_is_locked(&ns->lock));
 
        list_for_each_entry(child, &ns->base.profiles, base.list)
                __aafs_profile_rmdir(child);
@@ -1903,6 +1914,10 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
 {
        struct aa_ns *parent, *next;
 
+       AA_BUG(!root);
+       AA_BUG(!ns);
+       AA_BUG(ns != root && !mutex_is_locked(&ns->parent->lock));
+
        /* is next namespace a child */
        if (!list_empty(&ns->sub_ns)) {
                next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
@@ -1937,6 +1952,9 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
 static struct aa_profile *__first_profile(struct aa_ns *root,
                                          struct aa_ns *ns)
 {
+       AA_BUG(!root);
+       AA_BUG(ns && !mutex_is_locked(&ns->lock));
+
        for (; ns; ns = __next_ns(root, ns)) {
                if (!list_empty(&ns->base.profiles))
                        return list_first_entry(&ns->base.profiles,
@@ -1959,6 +1977,8 @@ static struct aa_profile *__next_profile(struct aa_profile *p)
        struct aa_profile *parent;
        struct aa_ns *ns = p->ns;
 
+       AA_BUG(!mutex_is_locked(&profiles_ns(p)->lock));
+
        /* is next profile a child */
        if (!list_empty(&p->base.profiles))
                return list_first_entry(&p->base.profiles, typeof(*p),
@@ -2127,6 +2147,11 @@ static struct aa_sfs_entry aa_sfs_entry_ptrace[] = {
        { }
 };
 
+static struct aa_sfs_entry aa_sfs_entry_signal[] = {
+       AA_SFS_FILE_STRING("mask", AA_SFS_SIG_MASK),
+       { }
+};
+
 static struct aa_sfs_entry aa_sfs_entry_domain[] = {
        AA_SFS_FILE_BOOLEAN("change_hat",       1),
        AA_SFS_FILE_BOOLEAN("change_hatv",      1),
@@ -2151,9 +2176,14 @@ static struct aa_sfs_entry aa_sfs_entry_policy[] = {
        { }
 };
 
+static struct aa_sfs_entry aa_sfs_entry_mount[] = {
+       AA_SFS_FILE_STRING("mask", "mount umount pivot_root"),
+       { }
+};
+
 static struct aa_sfs_entry aa_sfs_entry_ns[] = {
        AA_SFS_FILE_BOOLEAN("profile",          1),
-       AA_SFS_FILE_BOOLEAN("pivot_root",       1),
+       AA_SFS_FILE_BOOLEAN("pivot_root",       0),
        { }
 };
 
@@ -2172,22 +2202,24 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
        AA_SFS_DIR("policy",                    aa_sfs_entry_policy),
        AA_SFS_DIR("domain",                    aa_sfs_entry_domain),
        AA_SFS_DIR("file",                      aa_sfs_entry_file),
+       AA_SFS_DIR("mount",                     aa_sfs_entry_mount),
        AA_SFS_DIR("namespaces",                aa_sfs_entry_ns),
        AA_SFS_FILE_U64("capability",           VFS_CAP_FLAGS_MASK),
        AA_SFS_DIR("rlimit",                    aa_sfs_entry_rlimit),
        AA_SFS_DIR("caps",                      aa_sfs_entry_caps),
        AA_SFS_DIR("ptrace",                    aa_sfs_entry_ptrace),
+       AA_SFS_DIR("signal",                    aa_sfs_entry_signal),
        AA_SFS_DIR("query",                     aa_sfs_entry_query),
        { }
 };
 
 static struct aa_sfs_entry aa_sfs_entry_apparmor[] = {
-       AA_SFS_FILE_FOPS(".access", 0640, &aa_sfs_access),
+       AA_SFS_FILE_FOPS(".access", 0666, &aa_sfs_access),
        AA_SFS_FILE_FOPS(".stacked", 0444, &seq_ns_stacked_fops),
        AA_SFS_FILE_FOPS(".ns_stacked", 0444, &seq_ns_nsstacked_fops),
-       AA_SFS_FILE_FOPS(".ns_level", 0666, &seq_ns_level_fops),
-       AA_SFS_FILE_FOPS(".ns_name", 0640, &seq_ns_name_fops),
-       AA_SFS_FILE_FOPS("profiles", 0440, &aa_sfs_profiles_fops),
+       AA_SFS_FILE_FOPS(".ns_level", 0444, &seq_ns_level_fops),
+       AA_SFS_FILE_FOPS(".ns_name", 0444, &seq_ns_name_fops),
+       AA_SFS_FILE_FOPS("profiles", 0444, &aa_sfs_profiles_fops),
        AA_SFS_DIR("features", aa_sfs_entry_features),
        { }
 };
index 17a601c67b62c93f26246389b0ee8805850bee05..dd754b7850a82b4d129c11de0c55603de19268ba 100644 (file)
@@ -374,8 +374,8 @@ static const char *next_name(int xtype, const char *name)
  *
  * Returns: refcounted label, or NULL on failure (MAYBE NULL)
  */
-static struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
-                                      const char **name)
+struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+                               const char **name)
 {
        struct aa_label *label = NULL;
        u32 xtype = xindex & AA_X_TYPE_MASK;
index aaf893f4e4f50c8f35f2e74c18a264f5ee583a39..829082c35faac7a932a568745068a2a3a979c124 100644 (file)
@@ -27,7 +27,9 @@
 #define AA_CLASS_NET           4
 #define AA_CLASS_RLIMITS       5
 #define AA_CLASS_DOMAIN                6
+#define AA_CLASS_MOUNT         7
 #define AA_CLASS_PTRACE                9
+#define AA_CLASS_SIGNAL                10
 #define AA_CLASS_LABEL         16
 
 #define AA_CLASS_LAST          AA_CLASS_LABEL
index c68839a44351e9b184951a6535fa574293c55876..620e811696592ddec6fb221e2bec07d436f258b4 100644 (file)
@@ -71,6 +71,10 @@ enum audit_type {
 #define OP_FMPROT "file_mprotect"
 #define OP_INHERIT "file_inherit"
 
+#define OP_PIVOTROOT "pivotroot"
+#define OP_MOUNT "mount"
+#define OP_UMOUNT "umount"
+
 #define OP_CREATE "create"
 #define OP_POST_CREATE "post_create"
 #define OP_BIND "bind"
@@ -86,6 +90,7 @@ enum audit_type {
 #define OP_SHUTDOWN "socket_shutdown"
 
 #define OP_PTRACE "ptrace"
+#define OP_SIGNAL "signal"
 
 #define OP_EXEC "exec"
 
@@ -122,14 +127,22 @@ struct apparmor_audit_data {
                        } fs;
                };
                struct {
-                       const char *name;
-                       long pos;
+                       struct aa_profile *profile;
                        const char *ns;
+                       long pos;
                } iface;
+               int signal;
                struct {
                        int rlim;
                        unsigned long max;
                } rlim;
+               struct {
+                       const char *src_name;
+                       const char *type;
+                       const char *trans;
+                       const char *data;
+                       unsigned long flags;
+               } mnt;
        };
 };
 
index 24c5976d6143fee013882a92b56bc96ec8105429..ac9862ff7cdf5d7bed9a365a30e8d62c1cfc0c1f 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/binfmts.h>
 #include <linux/types.h>
 
+#include "label.h"
+
 #ifndef __AA_DOMAIN_H
 #define __AA_DOMAIN_H
 
@@ -29,6 +31,9 @@ struct aa_domain {
 #define AA_CHANGE_ONEXEC  4
 #define AA_CHANGE_STACK 8
 
+struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+                               const char **name);
+
 int apparmor_bprm_set_creds(struct linux_binprm *bprm);
 
 void aa_free_domain_entries(struct aa_domain *domain);
index 656fdb81c8a0a08836b12ed614334222c0648212..5ffc218d1e748aebb6aa777ad129976e87752ddd 100644 (file)
@@ -27,8 +27,14 @@ struct aa_profile;
 
 #define AA_PTRACE_PERM_MASK (AA_PTRACE_READ | AA_PTRACE_TRACE | \
                             AA_MAY_BE_READ | AA_MAY_BE_TRACED)
+#define AA_SIGNAL_PERM_MASK (MAY_READ | MAY_WRITE)
+
+#define AA_SFS_SIG_MASK "hup int quit ill trap abrt bus fpe kill usr1 " \
+       "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
+       "xcpu xfsz vtalrm prof winch io pwr sys emt lost"
 
 int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
                  u32 request);
+int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig);
 
 #endif /* __AA_IPC_H */
index 9a283b722755786a2e213e591323d101d7b30816..af22dcbbcb8aa165d16d1769aba7e92eb5965ada 100644 (file)
@@ -310,6 +310,7 @@ bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
 #define FLAG_SHOW_MODE 1
 #define FLAG_VIEW_SUBNS 2
 #define FLAG_HIDDEN_UNCONFINED 4
+#define FLAG_ABS_ROOT 8
 int aa_label_snxprint(char *str, size_t size, struct aa_ns *view,
                      struct aa_label *label, int flags);
 int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
diff --git a/security/apparmor/include/mount.h b/security/apparmor/include/mount.h
new file mode 100644 (file)
index 0000000..25d6067
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor file mediation function definitions.
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_MOUNT_H
+#define __AA_MOUNT_H
+
+#include <linux/fs.h>
+#include <linux/path.h>
+
+#include "domain.h"
+#include "policy.h"
+
+/* mount perms */
+#define AA_MAY_PIVOTROOT       0x01
+#define AA_MAY_MOUNT           0x02
+#define AA_MAY_UMOUNT          0x04
+#define AA_AUDIT_DATA          0x40
+#define AA_MNT_CONT_MATCH      0x40
+
+#define AA_MS_IGNORE_MASK (MS_KERNMOUNT | MS_NOSEC | MS_ACTIVE | MS_BORN)
+
+int aa_remount(struct aa_label *label, const struct path *path,
+              unsigned long flags, void *data);
+
+int aa_bind_mount(struct aa_label *label, const struct path *path,
+                 const char *old_name, unsigned long flags);
+
+
+int aa_mount_change_type(struct aa_label *label, const struct path *path,
+                        unsigned long flags);
+
+int aa_move_mount(struct aa_label *label, const struct path *path,
+                 const char *old_name);
+
+int aa_new_mount(struct aa_label *label, const char *dev_name,
+                const struct path *path, const char *type, unsigned long flags,
+                void *data);
+
+int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags);
+
+int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+                const struct path *new_path);
+
+#endif /* __AA_MOUNT_H */
diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h
new file mode 100644 (file)
index 0000000..92e62fe
--- /dev/null
@@ -0,0 +1,98 @@
+#include <linux/signal.h>
+
+#define SIGUNKNOWN 0
+#define MAXMAPPED_SIG 35
+/* provide a mapping of arch signal to internal signal # for mediation
+ * those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO
+ * map to the same entry those that may/or may not get a separate entry
+ */
+static const int sig_map[MAXMAPPED_SIG] = {
+       [0] = MAXMAPPED_SIG,    /* existence test */
+       [SIGHUP] = 1,
+       [SIGINT] = 2,
+       [SIGQUIT] = 3,
+       [SIGILL] = 4,
+       [SIGTRAP] = 5,          /* -, 5, - */
+       [SIGABRT] = 6,          /*  SIGIOT: -, 6, - */
+       [SIGBUS] = 7,           /* 10, 7, 10 */
+       [SIGFPE] = 8,
+       [SIGKILL] = 9,
+       [SIGUSR1] = 10,         /* 30, 10, 16 */
+       [SIGSEGV] = 11,
+       [SIGUSR2] = 12,         /* 31, 12, 17 */
+       [SIGPIPE] = 13,
+       [SIGALRM] = 14,
+       [SIGTERM] = 15,
+#ifdef SIGSTKFLT
+       [SIGSTKFLT] = 16,       /* -, 16, - */
+#endif
+       [SIGCHLD] = 17,         /* 20, 17, 18.  SIGCHLD -, -, 18 */
+       [SIGCONT] = 18,         /* 19, 18, 25 */
+       [SIGSTOP] = 19,         /* 17, 19, 23 */
+       [SIGTSTP] = 20,         /* 18, 20, 24 */
+       [SIGTTIN] = 21,         /* 21, 21, 26 */
+       [SIGTTOU] = 22,         /* 22, 22, 27 */
+       [SIGURG] = 23,          /* 16, 23, 21 */
+       [SIGXCPU] = 24,         /* 24, 24, 30 */
+       [SIGXFSZ] = 25,         /* 25, 25, 31 */
+       [SIGVTALRM] = 26,       /* 26, 26, 28 */
+       [SIGPROF] = 27,         /* 27, 27, 29 */
+       [SIGWINCH] = 28,        /* 28, 28, 20 */
+       [SIGIO] = 29,           /* SIGPOLL: 23, 29, 22 */
+       [SIGPWR] = 30,          /* 29, 30, 19.  SIGINFO 29, -, - */
+#ifdef SIGSYS
+       [SIGSYS] = 31,          /* 12, 31, 12. often SIG LOST/UNUSED */
+#endif
+#ifdef SIGEMT
+       [SIGEMT] = 32,          /* 7, - , 7 */
+#endif
+#if defined(SIGLOST) && SIGPWR != SIGLOST              /* sparc */
+       [SIGLOST] = 33,         /* unused on Linux */
+#endif
+#if defined(SIGUNUSED) && \
+    defined(SIGLOST) && defined(SIGSYS) && SIGLOST != SIGSYS
+       [SIGUNUSED] = 34,       /* -, 31, - */
+#endif
+};
+
+/* this table is ordered post sig_map[sig] mapping */
+static const char *const sig_names[MAXMAPPED_SIG + 1] = {
+       "unknown",
+       "hup",
+       "int",
+       "quit",
+       "ill",
+       "trap",
+       "abrt",
+       "bus",
+       "fpe",
+       "kill",
+       "usr1",
+       "segv",
+       "usr2",
+       "pipe",
+       "alrm",
+       "term",
+       "stkflt",
+       "chld",
+       "cont",
+       "stop",
+       "stp",
+       "ttin",
+       "ttou",
+       "urg",
+       "xcpu",
+       "xfsz",
+       "vtalrm",
+       "prof",
+       "winch",
+       "io",
+       "pwr",
+       "sys",
+       "emt",
+       "lost",
+       "unused",
+
+       "exists",       /* always last existence test mapped to MAXMAPPED_SIG */
+};
+
index 11e66b5bbc4266c7c9b0f4fa47d13afdf709b47f..66fb9ede9447adc71e3fd27a0b90d306b095c0fe 100644 (file)
@@ -20,6 +20,7 @@
 #include "include/context.h"
 #include "include/policy.h"
 #include "include/ipc.h"
+#include "include/sig_names.h"
 
 /**
  * audit_ptrace_mask - convert mask to permission string
@@ -121,3 +122,101 @@ int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
 }
 
 
+static inline int map_signal_num(int sig)
+{
+       if (sig > SIGRTMAX)
+               return SIGUNKNOWN;
+       else if (sig >= SIGRTMIN)
+               return sig - SIGRTMIN + 128;    /* rt sigs mapped to 128 */
+       else if (sig <= MAXMAPPED_SIG)
+               return sig_map[sig];
+       return SIGUNKNOWN;
+}
+
+/**
+ * audit_file_mask - convert mask to permission string
+ * @buffer: buffer to write string to (NOT NULL)
+ * @mask: permission mask to convert
+ */
+static void audit_signal_mask(struct audit_buffer *ab, u32 mask)
+{
+       if (mask & MAY_READ)
+               audit_log_string(ab, "receive");
+       if (mask & MAY_WRITE)
+               audit_log_string(ab, "send");
+}
+
+/**
+ * audit_cb - call back for signal specific audit fields
+ * @ab: audit_buffer  (NOT NULL)
+ * @va: audit struct to audit values of  (NOT NULL)
+ */
+static void audit_signal_cb(struct audit_buffer *ab, void *va)
+{
+       struct common_audit_data *sa = va;
+
+       if (aad(sa)->request & AA_SIGNAL_PERM_MASK) {
+               audit_log_format(ab, " requested_mask=");
+               audit_signal_mask(ab, aad(sa)->request);
+               if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) {
+                       audit_log_format(ab, " denied_mask=");
+                       audit_signal_mask(ab, aad(sa)->denied);
+               }
+       }
+       if (aad(sa)->signal <= MAXMAPPED_SIG)
+               audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
+       else
+               audit_log_format(ab, " signal=rtmin+%d",
+                                aad(sa)->signal - 128);
+       audit_log_format(ab, " peer=");
+       aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+                       FLAGS_NONE, GFP_ATOMIC);
+}
+
+/* TODO: update to handle compound name&name2, conditionals */
+static void profile_match_signal(struct aa_profile *profile, const char *label,
+                                int signal, struct aa_perms *perms)
+{
+       unsigned int state;
+
+       /* TODO: secondary cache check <profile, profile, perm> */
+       state = aa_dfa_next(profile->policy.dfa,
+                           profile->policy.start[AA_CLASS_SIGNAL],
+                           signal);
+       state = aa_dfa_match(profile->policy.dfa, state, label);
+       aa_compute_perms(profile->policy.dfa, state, perms);
+}
+
+static int profile_signal_perm(struct aa_profile *profile,
+                              struct aa_profile *peer, u32 request,
+                              struct common_audit_data *sa)
+{
+       struct aa_perms perms;
+
+       if (profile_unconfined(profile) ||
+           !PROFILE_MEDIATES(profile, AA_CLASS_SIGNAL))
+               return 0;
+
+       aad(sa)->peer = &peer->label;
+       profile_match_signal(profile, peer->base.hname, aad(sa)->signal,
+                            &perms);
+       aa_apply_modes_to_perms(profile, &perms);
+       return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
+}
+
+static int aa_signal_cross_perm(struct aa_profile *sender,
+                               struct aa_profile *target,
+                               struct common_audit_data *sa)
+{
+       return xcheck(profile_signal_perm(sender, target, MAY_WRITE, sa),
+                     profile_signal_perm(target, sender, MAY_READ, sa));
+}
+
+int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
+{
+       DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_SIGNAL);
+
+       aad(&sa)->signal = map_signal_num(sig);
+       return xcheck_labels_profiles(sender, target, aa_signal_cross_perm,
+                                     &sa);
+}
index e052eaba1cf6ccb003c1e3ae364798bc44b1d292..c5b99b954580c940d3cc3659a48c10b22752c1f0 100644 (file)
@@ -49,7 +49,7 @@ static void free_proxy(struct aa_proxy *proxy)
                /* p->label will not updated any more as p is dead */
                aa_put_label(rcu_dereference_protected(proxy->label, true));
                memset(proxy, 0, sizeof(*proxy));
-               proxy->label = (struct aa_label *) PROXY_POISON;
+               RCU_INIT_POINTER(proxy->label, (struct aa_label *)PROXY_POISON);
                kfree(proxy);
        }
 }
@@ -1450,9 +1450,11 @@ bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
  * cached label name is present and visible
  * @label->hname only exists if label is namespace hierachical
  */
-static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label)
+static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
+                                  int flags)
 {
-       if (label->hname && labels_ns(label) == ns)
+       if (label->hname && (!ns || labels_ns(label) == ns) &&
+           !(flags & ~FLAG_SHOW_MODE))
                return true;
 
        return false;
@@ -1495,7 +1497,7 @@ static int aa_profile_snxprint(char *str, size_t size, struct aa_ns *view,
                view = profiles_ns(profile);
 
        if (view != profile->ns &&
-           (!prev_ns || (prev_ns && *prev_ns != profile->ns))) {
+           (!prev_ns || (*prev_ns != profile->ns))) {
                if (prev_ns)
                        *prev_ns = profile->ns;
                ns_name = aa_ns_name(view, profile->ns,
@@ -1605,8 +1607,13 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
        AA_BUG(!str && size != 0);
        AA_BUG(!label);
 
-       if (!ns)
+       if (flags & FLAG_ABS_ROOT) {
+               ns = root_ns;
+               len = snprintf(str, size, "=");
+               update_for_len(total, len, size, str);
+       } else if (!ns) {
                ns = labels_ns(label);
+       }
 
        label_for_each(i, label, profile) {
                if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
@@ -1710,10 +1717,8 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
        AA_BUG(!ab);
        AA_BUG(!label);
 
-       if (!ns)
-               ns = labels_ns(label);
-
-       if (!use_label_hname(ns, label) || display_mode(ns, label, flags)) {
+       if (!use_label_hname(ns, label, flags) ||
+           display_mode(ns, label, flags)) {
                len  = aa_label_asxprint(&name, ns, label, flags, gfp);
                if (len == -1) {
                        AA_DEBUG("label print error");
@@ -1738,10 +1743,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
        AA_BUG(!f);
        AA_BUG(!label);
 
-       if (!ns)
-               ns = labels_ns(label);
-
-       if (!use_label_hname(ns, label)) {
+       if (!use_label_hname(ns, label, flags)) {
                char *str;
                int len;
 
@@ -1764,10 +1766,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
 {
        AA_BUG(!label);
 
-       if (!ns)
-               ns = labels_ns(label);
-
-       if (!use_label_hname(ns, label)) {
+       if (!use_label_hname(ns, label, flags)) {
                char *str;
                int len;
 
@@ -1874,6 +1873,9 @@ struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
                if (*str == '&')
                        str++;
        }
+       if (*str == '=')
+               base = &root_ns->unconfined->label;
+
        error = vec_setup(profile, vec, len, gfp);
        if (error)
                return ERR_PTR(error);
index 7a82c0f61452d3fe3e2821bbcbda29a643572500..1346ee5be04f13e4bb64658755605852b8789ab5 100644 (file)
@@ -38,6 +38,7 @@
 #include "include/policy.h"
 #include "include/policy_ns.h"
 #include "include/procattr.h"
+#include "include/mount.h"
 
 /* Flag indicating whether initialization completed */
 int apparmor_initialized;
@@ -511,6 +512,65 @@ static int apparmor_file_mprotect(struct vm_area_struct *vma,
                           !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
 }
 
+static int apparmor_sb_mount(const char *dev_name, const struct path *path,
+                            const char *type, unsigned long flags, void *data)
+{
+       struct aa_label *label;
+       int error = 0;
+
+       /* Discard magic */
+       if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+               flags &= ~MS_MGC_MSK;
+
+       flags &= ~AA_MS_IGNORE_MASK;
+
+       label = __begin_current_label_crit_section();
+       if (!unconfined(label)) {
+               if (flags & MS_REMOUNT)
+                       error = aa_remount(label, path, flags, data);
+               else if (flags & MS_BIND)
+                       error = aa_bind_mount(label, path, dev_name, flags);
+               else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
+                                 MS_UNBINDABLE))
+                       error = aa_mount_change_type(label, path, flags);
+               else if (flags & MS_MOVE)
+                       error = aa_move_mount(label, path, dev_name);
+               else
+                       error = aa_new_mount(label, dev_name, path, type,
+                                            flags, data);
+       }
+       __end_current_label_crit_section(label);
+
+       return error;
+}
+
+static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
+{
+       struct aa_label *label;
+       int error = 0;
+
+       label = __begin_current_label_crit_section();
+       if (!unconfined(label))
+               error = aa_umount(label, mnt, flags);
+       __end_current_label_crit_section(label);
+
+       return error;
+}
+
+static int apparmor_sb_pivotroot(const struct path *old_path,
+                                const struct path *new_path)
+{
+       struct aa_label *label;
+       int error = 0;
+
+       label = aa_get_current_label();
+       if (!unconfined(label))
+               error = aa_pivotroot(label, old_path, new_path);
+       aa_put_label(label);
+
+       return error;
+}
+
 static int apparmor_getprocattr(struct task_struct *task, char *name,
                                char **value)
 {
@@ -656,12 +716,36 @@ static int apparmor_task_setrlimit(struct task_struct *task,
        return error;
 }
 
+static int apparmor_task_kill(struct task_struct *target, struct siginfo *info,
+                             int sig, u32 secid)
+{
+       struct aa_label *cl, *tl;
+       int error;
+
+       if (secid)
+               /* TODO: after secid to label mapping is done.
+                *  Dealing with USB IO specific behavior
+                */
+               return 0;
+       cl = __begin_current_label_crit_section();
+       tl = aa_get_task_label(target);
+       error = aa_may_signal(cl, tl, sig);
+       aa_put_label(tl);
+       __end_current_label_crit_section(cl);
+
+       return error;
+}
+
 static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
        LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
        LSM_HOOK_INIT(capget, apparmor_capget),
        LSM_HOOK_INIT(capable, apparmor_capable),
 
+       LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
+       LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
+       LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
+
        LSM_HOOK_INIT(path_link, apparmor_path_link),
        LSM_HOOK_INIT(path_unlink, apparmor_path_unlink),
        LSM_HOOK_INIT(path_symlink, apparmor_path_symlink),
@@ -696,6 +780,7 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds),
 
        LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
+       LSM_HOOK_INIT(task_kill, apparmor_task_kill),
 };
 
 /*
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
new file mode 100644 (file)
index 0000000..82a64b5
--- /dev/null
@@ -0,0 +1,696 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor mediation of files
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/domain.h"
+#include "include/file.h"
+#include "include/match.h"
+#include "include/mount.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+
+static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
+{
+       if (flags & MS_RDONLY)
+               audit_log_format(ab, "ro");
+       else
+               audit_log_format(ab, "rw");
+       if (flags & MS_NOSUID)
+               audit_log_format(ab, ", nosuid");
+       if (flags & MS_NODEV)
+               audit_log_format(ab, ", nodev");
+       if (flags & MS_NOEXEC)
+               audit_log_format(ab, ", noexec");
+       if (flags & MS_SYNCHRONOUS)
+               audit_log_format(ab, ", sync");
+       if (flags & MS_REMOUNT)
+               audit_log_format(ab, ", remount");
+       if (flags & MS_MANDLOCK)
+               audit_log_format(ab, ", mand");
+       if (flags & MS_DIRSYNC)
+               audit_log_format(ab, ", dirsync");
+       if (flags & MS_NOATIME)
+               audit_log_format(ab, ", noatime");
+       if (flags & MS_NODIRATIME)
+               audit_log_format(ab, ", nodiratime");
+       if (flags & MS_BIND)
+               audit_log_format(ab, flags & MS_REC ? ", rbind" : ", bind");
+       if (flags & MS_MOVE)
+               audit_log_format(ab, ", move");
+       if (flags & MS_SILENT)
+               audit_log_format(ab, ", silent");
+       if (flags & MS_POSIXACL)
+               audit_log_format(ab, ", acl");
+       if (flags & MS_UNBINDABLE)
+               audit_log_format(ab, flags & MS_REC ? ", runbindable" :
+                                ", unbindable");
+       if (flags & MS_PRIVATE)
+               audit_log_format(ab, flags & MS_REC ? ", rprivate" :
+                                ", private");
+       if (flags & MS_SLAVE)
+               audit_log_format(ab, flags & MS_REC ? ", rslave" :
+                                ", slave");
+       if (flags & MS_SHARED)
+               audit_log_format(ab, flags & MS_REC ? ", rshared" :
+                                ", shared");
+       if (flags & MS_RELATIME)
+               audit_log_format(ab, ", relatime");
+       if (flags & MS_I_VERSION)
+               audit_log_format(ab, ", iversion");
+       if (flags & MS_STRICTATIME)
+               audit_log_format(ab, ", strictatime");
+       if (flags & MS_NOUSER)
+               audit_log_format(ab, ", nouser");
+}
+
+/**
+ * audit_cb - call back for mount specific audit fields
+ * @ab: audit_buffer  (NOT NULL)
+ * @va: audit struct to audit values of  (NOT NULL)
+ */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+       struct common_audit_data *sa = va;
+
+       if (aad(sa)->mnt.type) {
+               audit_log_format(ab, " fstype=");
+               audit_log_untrustedstring(ab, aad(sa)->mnt.type);
+       }
+       if (aad(sa)->mnt.src_name) {
+               audit_log_format(ab, " srcname=");
+               audit_log_untrustedstring(ab, aad(sa)->mnt.src_name);
+       }
+       if (aad(sa)->mnt.trans) {
+               audit_log_format(ab, " trans=");
+               audit_log_untrustedstring(ab, aad(sa)->mnt.trans);
+       }
+       if (aad(sa)->mnt.flags) {
+               audit_log_format(ab, " flags=\"");
+               audit_mnt_flags(ab, aad(sa)->mnt.flags);
+               audit_log_format(ab, "\"");
+       }
+       if (aad(sa)->mnt.data) {
+               audit_log_format(ab, " options=");
+               audit_log_untrustedstring(ab, aad(sa)->mnt.data);
+       }
+}
+
+/**
+ * audit_mount - handle the auditing of mount operations
+ * @profile: the profile being enforced  (NOT NULL)
+ * @op: operation being mediated (NOT NULL)
+ * @name: name of object being mediated (MAYBE NULL)
+ * @src_name: src_name of object being mediated (MAYBE_NULL)
+ * @type: type of filesystem (MAYBE_NULL)
+ * @trans: name of trans (MAYBE NULL)
+ * @flags: filesystem idependent mount flags
+ * @data: filesystem mount flags
+ * @request: permissions requested
+ * @perms: the permissions computed for the request (NOT NULL)
+ * @info: extra information message (MAYBE NULL)
+ * @error: 0 if operation allowed else failure error code
+ *
+ * Returns: %0 or error on failure
+ */
+static int audit_mount(struct aa_profile *profile, const char *op,
+                      const char *name, const char *src_name,
+                      const char *type, const char *trans,
+                      unsigned long flags, const void *data, u32 request,
+                      struct aa_perms *perms, const char *info, int error)
+{
+       int audit_type = AUDIT_APPARMOR_AUTO;
+       DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, op);
+
+       if (likely(!error)) {
+               u32 mask = perms->audit;
+
+               if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
+                       mask = 0xffff;
+
+               /* mask off perms that are not being force audited */
+               request &= mask;
+
+               if (likely(!request))
+                       return 0;
+               audit_type = AUDIT_APPARMOR_AUDIT;
+       } else {
+               /* only report permissions that were denied */
+               request = request & ~perms->allow;
+
+               if (request & perms->kill)
+                       audit_type = AUDIT_APPARMOR_KILL;
+
+               /* quiet known rejects, assumes quiet and kill do not overlap */
+               if ((request & perms->quiet) &&
+                   AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+                   AUDIT_MODE(profile) != AUDIT_ALL)
+                       request &= ~perms->quiet;
+
+               if (!request)
+                       return error;
+       }
+
+       aad(&sa)->name = name;
+       aad(&sa)->mnt.src_name = src_name;
+       aad(&sa)->mnt.type = type;
+       aad(&sa)->mnt.trans = trans;
+       aad(&sa)->mnt.flags = flags;
+       if (data && (perms->audit & AA_AUDIT_DATA))
+               aad(&sa)->mnt.data = data;
+       aad(&sa)->info = info;
+       aad(&sa)->error = error;
+
+       return aa_audit(audit_type, profile, &sa, audit_cb);
+}
+
+/**
+ * match_mnt_flags - Do an ordered match on mount flags
+ * @dfa: dfa to match against
+ * @state: state to start in
+ * @flags: mount flags to match against
+ *
+ * Mount flags are encoded as an ordered match. This is done instead of
+ * checking against a simple bitmask, to allow for logical operations
+ * on the flags.
+ *
+ * Returns: next state after flags match
+ */
+static unsigned int match_mnt_flags(struct aa_dfa *dfa, unsigned int state,
+                                   unsigned long flags)
+{
+       unsigned int i;
+
+       for (i = 0; i <= 31 ; ++i) {
+               if ((1 << i) & flags)
+                       state = aa_dfa_next(dfa, state, i + 1);
+       }
+
+       return state;
+}
+
+/**
+ * compute_mnt_perms - compute mount permission associated with @state
+ * @dfa: dfa to match against (NOT NULL)
+ * @state: state match finished in
+ *
+ * Returns: mount permissions
+ */
+static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa,
+                                          unsigned int state)
+{
+       struct aa_perms perms;
+
+       perms.kill = 0;
+       perms.allow = dfa_user_allow(dfa, state);
+       perms.audit = dfa_user_audit(dfa, state);
+       perms.quiet = dfa_user_quiet(dfa, state);
+       perms.xindex = dfa_user_xindex(dfa, state);
+
+       return perms;
+}
+
+static const char * const mnt_info_table[] = {
+       "match succeeded",
+       "failed mntpnt match",
+       "failed srcname match",
+       "failed type match",
+       "failed flags match",
+       "failed data match"
+};
+
+/*
+ * Returns 0 on success else element that match failed in, this is the
+ * index into the mnt_info_table above
+ */
+static int do_match_mnt(struct aa_dfa *dfa, unsigned int start,
+                       const char *mntpnt, const char *devname,
+                       const char *type, unsigned long flags,
+                       void *data, bool binary, struct aa_perms *perms)
+{
+       unsigned int state;
+
+       AA_BUG(!dfa);
+       AA_BUG(!perms);
+
+       state = aa_dfa_match(dfa, start, mntpnt);
+       state = aa_dfa_null_transition(dfa, state);
+       if (!state)
+               return 1;
+
+       if (devname)
+               state = aa_dfa_match(dfa, state, devname);
+       state = aa_dfa_null_transition(dfa, state);
+       if (!state)
+               return 2;
+
+       if (type)
+               state = aa_dfa_match(dfa, state, type);
+       state = aa_dfa_null_transition(dfa, state);
+       if (!state)
+               return 3;
+
+       state = match_mnt_flags(dfa, state, flags);
+       if (!state)
+               return 4;
+       *perms = compute_mnt_perms(dfa, state);
+       if (perms->allow & AA_MAY_MOUNT)
+               return 0;
+
+       /* only match data if not binary and the DFA flags data is expected */
+       if (data && !binary && (perms->allow & AA_MNT_CONT_MATCH)) {
+               state = aa_dfa_null_transition(dfa, state);
+               if (!state)
+                       return 4;
+
+               state = aa_dfa_match(dfa, state, data);
+               if (!state)
+                       return 5;
+               *perms = compute_mnt_perms(dfa, state);
+               if (perms->allow & AA_MAY_MOUNT)
+                       return 0;
+       }
+
+       /* failed at end of flags match */
+       return 4;
+}
+
+
+static int path_flags(struct aa_profile *profile, const struct path *path)
+{
+       AA_BUG(!profile);
+       AA_BUG(!path);
+
+       return profile->path_flags |
+               (S_ISDIR(path->dentry->d_inode->i_mode) ? PATH_IS_DIR : 0);
+}
+
+/**
+ * match_mnt_path_str - handle path matching for mount
+ * @profile: the confining profile
+ * @mntpath: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+ * @devnme: string for the devname/src_name (MAY BE NULL OR ERRPTR)
+ * @type: string for the dev type (MAYBE NULL)
+ * @flags: mount flags to match
+ * @data: fs mount data (MAYBE NULL)
+ * @binary: whether @data is binary
+ * @devinfo: error str if (IS_ERR(@devname))
+ *
+ * Returns: 0 on success else error
+ */
+static int match_mnt_path_str(struct aa_profile *profile,
+                             const struct path *mntpath, char *buffer,
+                             const char *devname, const char *type,
+                             unsigned long flags, void *data, bool binary,
+                             const char *devinfo)
+{
+       struct aa_perms perms = { };
+       const char *mntpnt = NULL, *info = NULL;
+       int pos, error;
+
+       AA_BUG(!profile);
+       AA_BUG(!mntpath);
+       AA_BUG(!buffer);
+
+       error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
+                            &mntpnt, &info, profile->disconnected);
+       if (error)
+               goto audit;
+       if (IS_ERR(devname)) {
+               error = PTR_ERR(devname);
+               devname = NULL;
+               info = devinfo;
+               goto audit;
+       }
+
+       error = -EACCES;
+       pos = do_match_mnt(profile->policy.dfa,
+                          profile->policy.start[AA_CLASS_MOUNT],
+                          mntpnt, devname, type, flags, data, binary, &perms);
+       if (pos) {
+               info = mnt_info_table[pos];
+               goto audit;
+       }
+       error = 0;
+
+audit:
+       return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL,
+                          flags, data, AA_MAY_MOUNT, &perms, info, error);
+}
+
+/**
+ * match_mnt - handle path matching for mount
+ * @profile: the confining profile
+ * @mntpath: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+ * @devpath: path devname/src_name (MAYBE NULL)
+ * @devbuffer: buffer to be used to lookup devname/src_name
+ * @type: string for the dev type (MAYBE NULL)
+ * @flags: mount flags to match
+ * @data: fs mount data (MAYBE NULL)
+ * @binary: whether @data is binary
+ *
+ * Returns: 0 on success else error
+ */
+static int match_mnt(struct aa_profile *profile, const struct path *path,
+                    char *buffer, struct path *devpath, char *devbuffer,
+                    const char *type, unsigned long flags, void *data,
+                    bool binary)
+{
+       const char *devname = NULL, *info = NULL;
+       int error = -EACCES;
+
+       AA_BUG(!profile);
+       AA_BUG(devpath && !devbuffer);
+
+       if (devpath) {
+               error = aa_path_name(devpath, path_flags(profile, devpath),
+                                    devbuffer, &devname, &info,
+                                    profile->disconnected);
+               if (error)
+                       devname = ERR_PTR(error);
+       }
+
+       return match_mnt_path_str(profile, path, buffer, devname, type, flags,
+                                 data, binary, info);
+}
+
+int aa_remount(struct aa_label *label, const struct path *path,
+              unsigned long flags, void *data)
+{
+       struct aa_profile *profile;
+       char *buffer = NULL;
+       bool binary;
+       int error;
+
+       AA_BUG(!label);
+       AA_BUG(!path);
+
+       binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA;
+
+       get_buffers(buffer);
+       error = fn_for_each_confined(label, profile,
+                       match_mnt(profile, path, buffer, NULL, NULL, NULL,
+                                 flags, data, binary));
+       put_buffers(buffer);
+
+       return error;
+}
+
+int aa_bind_mount(struct aa_label *label, const struct path *path,
+                 const char *dev_name, unsigned long flags)
+{
+       struct aa_profile *profile;
+       char *buffer = NULL, *old_buffer = NULL;
+       struct path old_path;
+       int error;
+
+       AA_BUG(!label);
+       AA_BUG(!path);
+
+       if (!dev_name || !*dev_name)
+               return -EINVAL;
+
+       flags &= MS_REC | MS_BIND;
+
+       error = kern_path(dev_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
+       if (error)
+               return error;
+
+       get_buffers(buffer, old_buffer);
+       error = fn_for_each_confined(label, profile,
+                       match_mnt(profile, path, buffer, &old_path, old_buffer,
+                                 NULL, flags, NULL, false));
+       put_buffers(buffer, old_buffer);
+       path_put(&old_path);
+
+       return error;
+}
+
+int aa_mount_change_type(struct aa_label *label, const struct path *path,
+                        unsigned long flags)
+{
+       struct aa_profile *profile;
+       char *buffer = NULL;
+       int error;
+
+       AA_BUG(!label);
+       AA_BUG(!path);
+
+       /* These are the flags allowed by do_change_type() */
+       flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE |
+                 MS_UNBINDABLE);
+
+       get_buffers(buffer);
+       error = fn_for_each_confined(label, profile,
+                       match_mnt(profile, path, buffer, NULL, NULL, NULL,
+                                 flags, NULL, false));
+       put_buffers(buffer);
+
+       return error;
+}
+
+int aa_move_mount(struct aa_label *label, const struct path *path,
+                 const char *orig_name)
+{
+       struct aa_profile *profile;
+       char *buffer = NULL, *old_buffer = NULL;
+       struct path old_path;
+       int error;
+
+       AA_BUG(!label);
+       AA_BUG(!path);
+
+       if (!orig_name || !*orig_name)
+               return -EINVAL;
+
+       error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path);
+       if (error)
+               return error;
+
+       get_buffers(buffer, old_buffer);
+       error = fn_for_each_confined(label, profile,
+                       match_mnt(profile, path, buffer, &old_path, old_buffer,
+                                 NULL, MS_MOVE, NULL, false));
+       put_buffers(buffer, old_buffer);
+       path_put(&old_path);
+
+       return error;
+}
+
+int aa_new_mount(struct aa_label *label, const char *dev_name,
+                const struct path *path, const char *type, unsigned long flags,
+                void *data)
+{
+       struct aa_profile *profile;
+       char *buffer = NULL, *dev_buffer = NULL;
+       bool binary = true;
+       int error;
+       int requires_dev = 0;
+       struct path tmp_path, *dev_path = NULL;
+
+       AA_BUG(!label);
+       AA_BUG(!path);
+
+       if (type) {
+               struct file_system_type *fstype;
+
+               fstype = get_fs_type(type);
+               if (!fstype)
+                       return -ENODEV;
+               binary = fstype->fs_flags & FS_BINARY_MOUNTDATA;
+               requires_dev = fstype->fs_flags & FS_REQUIRES_DEV;
+               put_filesystem(fstype);
+
+               if (requires_dev) {
+                       if (!dev_name || !*dev_name)
+                               return -ENOENT;
+
+                       error = kern_path(dev_name, LOOKUP_FOLLOW, &tmp_path);
+                       if (error)
+                               return error;
+                       dev_path = &tmp_path;
+               }
+       }
+
+       get_buffers(buffer, dev_buffer);
+       if (dev_path) {
+               error = fn_for_each_confined(label, profile,
+                       match_mnt(profile, path, buffer, dev_path, dev_buffer,
+                                 type, flags, data, binary));
+       } else {
+               error = fn_for_each_confined(label, profile,
+                       match_mnt_path_str(profile, path, buffer, dev_name,
+                                          type, flags, data, binary, NULL));
+       }
+       put_buffers(buffer, dev_buffer);
+       if (dev_path)
+               path_put(dev_path);
+
+       return error;
+}
+
+static int profile_umount(struct aa_profile *profile, struct path *path,
+                         char *buffer)
+{
+       struct aa_perms perms = { };
+       const char *name = NULL, *info = NULL;
+       unsigned int state;
+       int error;
+
+       AA_BUG(!profile);
+       AA_BUG(!path);
+
+       error = aa_path_name(path, path_flags(profile, path), buffer, &name,
+                            &info, profile->disconnected);
+       if (error)
+               goto audit;
+
+       state = aa_dfa_match(profile->policy.dfa,
+                            profile->policy.start[AA_CLASS_MOUNT],
+                            name);
+       perms = compute_mnt_perms(profile->policy.dfa, state);
+       if (AA_MAY_UMOUNT & ~perms.allow)
+               error = -EACCES;
+
+audit:
+       return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL,
+                          AA_MAY_UMOUNT, &perms, info, error);
+}
+
+int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
+{
+       struct aa_profile *profile;
+       char *buffer = NULL;
+       int error;
+       struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
+
+       AA_BUG(!label);
+       AA_BUG(!mnt);
+
+       get_buffers(buffer);
+       error = fn_for_each_confined(label, profile,
+                       profile_umount(profile, &path, buffer));
+       put_buffers(buffer);
+
+       return error;
+}
+
+/* helper fn for transition on pivotroot
+ *
+ * Returns: label for transition or ERR_PTR. Does not return NULL
+ */
+static struct aa_label *build_pivotroot(struct aa_profile *profile,
+                                       const struct path *new_path,
+                                       char *new_buffer,
+                                       const struct path *old_path,
+                                       char *old_buffer)
+{
+       const char *old_name, *new_name = NULL, *info = NULL;
+       const char *trans_name = NULL;
+       struct aa_perms perms = { };
+       unsigned int state;
+       int error;
+
+       AA_BUG(!profile);
+       AA_BUG(!new_path);
+       AA_BUG(!old_path);
+
+       if (profile_unconfined(profile))
+               return aa_get_newest_label(&profile->label);
+
+       error = aa_path_name(old_path, path_flags(profile, old_path),
+                            old_buffer, &old_name, &info,
+                            profile->disconnected);
+       if (error)
+               goto audit;
+       error = aa_path_name(new_path, path_flags(profile, new_path),
+                            new_buffer, &new_name, &info,
+                            profile->disconnected);
+       if (error)
+               goto audit;
+
+       error = -EACCES;
+       state = aa_dfa_match(profile->policy.dfa,
+                            profile->policy.start[AA_CLASS_MOUNT],
+                            new_name);
+       state = aa_dfa_null_transition(profile->policy.dfa, state);
+       state = aa_dfa_match(profile->policy.dfa, state, old_name);
+       perms = compute_mnt_perms(profile->policy.dfa, state);
+
+       if (AA_MAY_PIVOTROOT & perms.allow)
+               error = 0;
+
+audit:
+       error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name,
+                           NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT,
+                           &perms, info, error);
+       if (error)
+               return ERR_PTR(error);
+
+       return aa_get_newest_label(&profile->label);
+}
+
+int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+                const struct path *new_path)
+{
+       struct aa_profile *profile;
+       struct aa_label *target = NULL;
+       char *old_buffer = NULL, *new_buffer = NULL, *info = NULL;
+       int error;
+
+       AA_BUG(!label);
+       AA_BUG(!old_path);
+       AA_BUG(!new_path);
+
+       get_buffers(old_buffer, new_buffer);
+       target = fn_label_build(label, profile, GFP_ATOMIC,
+                       build_pivotroot(profile, new_path, new_buffer,
+                                       old_path, old_buffer));
+       if (!target) {
+               info = "label build failed";
+               error = -ENOMEM;
+               goto fail;
+       } else if (!IS_ERR(target)) {
+               error = aa_replace_current_label(target);
+               if (error) {
+                       /* TODO: audit target */
+                       aa_put_label(target);
+                       goto out;
+               }
+       } else
+               /* already audited error */
+               error = PTR_ERR(target);
+out:
+       put_buffers(old_buffer, new_buffer);
+
+       return error;
+
+fail:
+       /* TODO: add back in auditing of new_name and old_name */
+       error = fn_for_each(label, profile,
+                       audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */,
+                                   NULL /* old_name */,
+                                   NULL, NULL,
+                                   0, NULL, AA_MAY_PIVOTROOT, &nullperms, info,
+                                   error));
+       goto out;
+}
index 244ea4a4a8f0f73fdfacccd8c49cd55e1f7c695e..4243b0c3f0e4acc6d66c70ea878f32d548bebdd4 100644 (file)
@@ -289,85 +289,6 @@ fail:
        return NULL;
 }
 
-/**
- * aa_new_null_profile - create or find a null-X learning profile
- * @parent: profile that caused this profile to be created (NOT NULL)
- * @hat: true if the null- learning profile is a hat
- * @base: name to base the null profile off of
- * @gfp: type of allocation
- *
- * Find/Create a null- complain mode profile used in learning mode.  The
- * name of the profile is unique and follows the format of parent//null-XXX.
- * where XXX is based on the @name or if that fails or is not supplied
- * a unique number
- *
- * null profiles are added to the profile list but the list does not
- * hold a count on them so that they are automatically released when
- * not in use.
- *
- * Returns: new refcounted profile else NULL on failure
- */
-struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
-                                      const char *base, gfp_t gfp)
-{
-       struct aa_profile *profile;
-       char *name;
-
-       AA_BUG(!parent);
-
-       if (base) {
-               name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base),
-                              gfp);
-               if (name) {
-                       sprintf(name, "%s//null-%s", parent->base.hname, base);
-                       goto name;
-               }
-               /* fall through to try shorter uniq */
-       }
-
-       name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp);
-       if (!name)
-               return NULL;
-       sprintf(name, "%s//null-%x", parent->base.hname,
-               atomic_inc_return(&parent->ns->uniq_null));
-
-name:
-       /* lookup to see if this is a dup creation */
-       profile = aa_find_child(parent, basename(name));
-       if (profile)
-               goto out;
-
-       profile = aa_alloc_profile(name, NULL, gfp);
-       if (!profile)
-               goto fail;
-
-       profile->mode = APPARMOR_COMPLAIN;
-       profile->label.flags |= FLAG_NULL;
-       if (hat)
-               profile->label.flags |= FLAG_HAT;
-       profile->path_flags = parent->path_flags;
-
-       /* released on free_profile */
-       rcu_assign_pointer(profile->parent, aa_get_profile(parent));
-       profile->ns = aa_get_ns(parent->ns);
-       profile->file.dfa = aa_get_dfa(nulldfa);
-       profile->policy.dfa = aa_get_dfa(nulldfa);
-
-       mutex_lock(&profile->ns->lock);
-       __add_profile(&parent->base.profiles, profile);
-       mutex_unlock(&profile->ns->lock);
-
-       /* refcount released by caller */
-out:
-       kfree(name);
-
-       return profile;
-
-fail:
-       aa_free_profile(profile);
-       return NULL;
-}
-
 /* TODO: profile accounting - setup in remove */
 
 /**
@@ -558,6 +479,93 @@ struct aa_profile *aa_fqlookupn_profile(struct aa_label *base,
        return profile;
 }
 
+/**
+ * aa_new_null_profile - create or find a null-X learning profile
+ * @parent: profile that caused this profile to be created (NOT NULL)
+ * @hat: true if the null- learning profile is a hat
+ * @base: name to base the null profile off of
+ * @gfp: type of allocation
+ *
+ * Find/Create a null- complain mode profile used in learning mode.  The
+ * name of the profile is unique and follows the format of parent//null-XXX.
+ * where XXX is based on the @name or if that fails or is not supplied
+ * a unique number
+ *
+ * null profiles are added to the profile list but the list does not
+ * hold a count on them so that they are automatically released when
+ * not in use.
+ *
+ * Returns: new refcounted profile else NULL on failure
+ */
+struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
+                                      const char *base, gfp_t gfp)
+{
+       struct aa_profile *p, *profile;
+       const char *bname;
+       char *name;
+
+       AA_BUG(!parent);
+
+       if (base) {
+               name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base),
+                              gfp);
+               if (name) {
+                       sprintf(name, "%s//null-%s", parent->base.hname, base);
+                       goto name;
+               }
+               /* fall through to try shorter uniq */
+       }
+
+       name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp);
+       if (!name)
+               return NULL;
+       sprintf(name, "%s//null-%x", parent->base.hname,
+               atomic_inc_return(&parent->ns->uniq_null));
+
+name:
+       /* lookup to see if this is a dup creation */
+       bname = basename(name);
+       profile = aa_find_child(parent, bname);
+       if (profile)
+               goto out;
+
+       profile = aa_alloc_profile(name, NULL, gfp);
+       if (!profile)
+               goto fail;
+
+       profile->mode = APPARMOR_COMPLAIN;
+       profile->label.flags |= FLAG_NULL;
+       if (hat)
+               profile->label.flags |= FLAG_HAT;
+       profile->path_flags = parent->path_flags;
+
+       /* released on free_profile */
+       rcu_assign_pointer(profile->parent, aa_get_profile(parent));
+       profile->ns = aa_get_ns(parent->ns);
+       profile->file.dfa = aa_get_dfa(nulldfa);
+       profile->policy.dfa = aa_get_dfa(nulldfa);
+
+       mutex_lock(&profile->ns->lock);
+       p = __find_child(&parent->base.profiles, bname);
+       if (p) {
+               aa_free_profile(profile);
+               profile = aa_get_profile(p);
+       } else {
+               __add_profile(&parent->base.profiles, profile);
+       }
+       mutex_unlock(&profile->ns->lock);
+
+       /* refcount released by caller */
+out:
+       kfree(name);
+
+       return profile;
+
+fail:
+       aa_free_profile(profile);
+       return NULL;
+}
+
 /**
  * replacement_allowed - test to see if replacement is allowed
  * @profile: profile to test if it can be replaced  (MAYBE NULL)
index 351d3bab3a3d2565aa970fe7011453c8fb721ea4..62a3589c62ab624156c0c01eaa8cadd72276866a 100644 (file)
@@ -112,6 +112,8 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name)
        ns->unconfined->label.flags |= FLAG_IX_ON_NAME_ERROR |
                FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED;
        ns->unconfined->mode = APPARMOR_UNCONFINED;
+       ns->unconfined->file.dfa = aa_get_dfa(nulldfa);
+       ns->unconfined->policy.dfa = aa_get_dfa(nulldfa);
 
        /* ns and ns->unconfined share ns->unconfined refcount */
        ns->unconfined->ns = ns;
index c600f4dd1783252a1fc429379cb1cca8b9728c9b..4ede87c30f8b890a63e1aaae0a75a9ed6ba1045c 100644 (file)
@@ -85,9 +85,9 @@ static void audit_cb(struct audit_buffer *ab, void *va)
                audit_log_format(ab, " ns=");
                audit_log_untrustedstring(ab, aad(sa)->iface.ns);
        }
-       if (aad(sa)->iface.name) {
+       if (aad(sa)->name) {
                audit_log_format(ab, " name=");
-               audit_log_untrustedstring(ab, aad(sa)->iface.name);
+               audit_log_untrustedstring(ab, aad(sa)->name);
        }
        if (aad(sa)->iface.pos)
                audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
@@ -114,9 +114,9 @@ static int audit_iface(struct aa_profile *new, const char *ns_name,
                aad(&sa)->iface.pos = e->pos - e->start;
        aad(&sa)->iface.ns = ns_name;
        if (new)
-               aad(&sa)->iface.name = new->base.hname;
+               aad(&sa)->name = new->base.hname;
        else
-               aad(&sa)->iface.name = name;
+               aad(&sa)->name = name;
        aad(&sa)->info = info;
        aad(&sa)->error = error;
 
@@ -448,7 +448,7 @@ fail:
  */
 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
 {
-       void *pos = e->pos;
+       void *saved_pos = e->pos;
 
        /* exec table is optional */
        if (unpack_nameX(e, AA_STRUCT, "xtable")) {
@@ -511,7 +511,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
 
 fail:
        aa_free_domain_entries(&profile->file.trans);
-       e->pos = pos;
+       e->pos = saved_pos;
        return 0;
 }
 
@@ -583,6 +583,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
 {
        struct aa_profile *profile = NULL;
        const char *tmpname, *tmpns = NULL, *name = NULL;
+       const char *info = "failed to unpack profile";
        size_t ns_len;
        struct rhashtable_params params = { 0 };
        char *key = NULL;
@@ -604,8 +605,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
        tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
        if (tmpns) {
                *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
-               if (!*ns_name)
+               if (!*ns_name) {
+                       info = "out of memory";
                        goto fail;
+               }
                name = tmpname;
        }
 
@@ -624,12 +627,15 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
        if (IS_ERR(profile->xmatch)) {
                error = PTR_ERR(profile->xmatch);
                profile->xmatch = NULL;
+               info = "bad xmatch";
                goto fail;
        }
        /* xmatch_len is not optional if xmatch is set */
        if (profile->xmatch) {
-               if (!unpack_u32(e, &tmp, NULL))
+               if (!unpack_u32(e, &tmp, NULL)) {
+                       info = "missing xmatch len";
                        goto fail;
+               }
                profile->xmatch_len = tmp;
        }
 
@@ -637,8 +643,11 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
        (void) unpack_str(e, &profile->disconnected, "disconnected");
 
        /* per profile debug flags (complain, audit) */
-       if (!unpack_nameX(e, AA_STRUCT, "flags"))
+       if (!unpack_nameX(e, AA_STRUCT, "flags")) {
+               info = "profile missing flags";
                goto fail;
+       }
+       info = "failed to unpack profile flags";
        if (!unpack_u32(e, &tmp, NULL))
                goto fail;
        if (tmp & PACKED_FLAG_HAT)
@@ -667,6 +676,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
                /* set a default value if path_flags field is not present */
                profile->path_flags = PATH_MEDIATE_DELETED;
 
+       info = "failed to unpack profile capabilities";
        if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
                goto fail;
        if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
@@ -676,6 +686,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
        if (!unpack_u32(e, &tmpcap.cap[0], NULL))
                goto fail;
 
+       info = "failed to unpack upper profile capabilities";
        if (unpack_nameX(e, AA_STRUCT, "caps64")) {
                /* optional upper half of 64 bit caps */
                if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
@@ -690,6 +701,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
                        goto fail;
        }
 
+       info = "failed to unpack extended profile capabilities";
        if (unpack_nameX(e, AA_STRUCT, "capsx")) {
                /* optional extended caps mediation mask */
                if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
@@ -700,11 +712,14 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
                        goto fail;
        }
 
-       if (!unpack_rlimits(e, profile))
+       if (!unpack_rlimits(e, profile)) {
+               info = "failed to unpack profile rlimits";
                goto fail;
+       }
 
        if (unpack_nameX(e, AA_STRUCT, "policydb")) {
                /* generic policy dfa - optional and may be NULL */
+               info = "failed to unpack policydb";
                profile->policy.dfa = unpack_dfa(e);
                if (IS_ERR(profile->policy.dfa)) {
                        error = PTR_ERR(profile->policy.dfa);
@@ -734,6 +749,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
        if (IS_ERR(profile->file.dfa)) {
                error = PTR_ERR(profile->file.dfa);
                profile->file.dfa = NULL;
+               info = "failed to unpack profile file rules";
                goto fail;
        } else if (profile->file.dfa) {
                if (!unpack_u32(e, &profile->file.start, "dfa_start"))
@@ -746,10 +762,13 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
        } else
                profile->file.dfa = aa_get_dfa(nulldfa);
 
-       if (!unpack_trans_table(e, profile))
+       if (!unpack_trans_table(e, profile)) {
+               info = "failed to unpack profile transition table";
                goto fail;
+       }
 
        if (unpack_nameX(e, AA_STRUCT, "data")) {
+               info = "out of memory";
                profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
                if (!profile->data)
                        goto fail;
@@ -761,8 +780,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
                params.hashfn = strhash;
                params.obj_cmpfn = datacmp;
 
-               if (rhashtable_init(profile->data, &params))
+               if (rhashtable_init(profile->data, &params)) {
+                       info = "failed to init key, value hash table";
                        goto fail;
+               }
 
                while (unpack_strdup(e, &key, NULL)) {
                        data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -784,12 +805,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
                                               profile->data->p);
                }
 
-               if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+               if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
+                       info = "failed to unpack end of key, value data table";
                        goto fail;
+               }
        }
 
-       if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+       if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
+               info = "failed to unpack end of profile";
                goto fail;
+       }
 
        return profile;
 
@@ -798,8 +823,7 @@ fail:
                name = NULL;
        else if (!name)
                name = "unknown";
-       audit_iface(profile, NULL, name, "failed to unpack profile", e,
-                   error);
+       audit_iface(profile, NULL, name, info, e, error);
        aa_free_profile(profile);
 
        return ERR_PTR(error);
@@ -832,7 +856,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
         * if not specified use previous version
         * Mask off everything that is not kernel abi version
         */
-       if (VERSION_LT(e->version, v5) && VERSION_GT(e->version, v7)) {
+       if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
                audit_iface(NULL, NULL, NULL, "unsupported interface version",
                            e, error);
                return error;
index 6bf72b175b49caf336e7ef8f3d83cf256c955120..fc46f5b85251049265a53929e37d5822240a64e3 100644 (file)
@@ -294,10 +294,10 @@ int cap_capset(struct cred *new,
  *
  * Determine if an inode having a change applied that's marked ATTR_KILL_PRIV
  * affects the security markings on that inode, and if it is, should
- * inode_killpriv() be invoked or the change rejected?
+ * inode_killpriv() be invoked or the change rejected.
  *
- * Returns 0 if granted; +ve if granted, but inode_killpriv() is required; and
- * -ve to deny the change.
+ * Returns 1 if security.capability has a value, meaning inode_killpriv()
+ * is required, 0 otherwise, meaning inode_killpriv() is not required.
  */
 int cap_inode_need_killpriv(struct dentry *dentry)
 {
@@ -585,13 +585,14 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
        struct vfs_ns_cap_data data, *nscaps = &data;
        struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
        kuid_t rootkuid;
-       struct user_namespace *fs_ns = inode->i_sb->s_user_ns;
+       struct user_namespace *fs_ns;
 
        memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
 
        if (!inode)
                return -ENODATA;
 
+       fs_ns = inode->i_sb->s_user_ns;
        size = __vfs_getxattr((struct dentry *)dentry, inode,
                              XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
        if (size == -ENODATA || size == -EOPNOTSUPP)
index a7a23b5541f85a4994e0cc83d7d132e8a5681938..6462e6654ccf46df67db56959f5405b2f7d4e4c2 100644 (file)
@@ -45,10 +45,9 @@ config BIG_KEYS
        bool "Large payload keys"
        depends on KEYS
        depends on TMPFS
-       depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
+       select CRYPTO
        select CRYPTO_AES
-       select CRYPTO_ECB
-       select CRYPTO_RNG
+       select CRYPTO_GCM
        help
          This option provides support for holding large keys within the kernel
          (for example Kerberos ticket caches).  The data may be stored out to
index 6acb00f6f22cdcd223426756a49deebf53b86595..929e14978c421b227e592e937d9157ca2235b2f7 100644 (file)
@@ -1,5 +1,6 @@
 /* Large capacity key type
  *
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
 #include <linux/shmem_fs.h>
 #include <linux/err.h>
 #include <linux/scatterlist.h>
+#include <linux/random.h>
 #include <keys/user-type.h>
 #include <keys/big_key-type.h>
-#include <crypto/rng.h>
-#include <crypto/skcipher.h>
+#include <crypto/aead.h>
 
 /*
  * Layout of key payload words.
@@ -49,7 +50,12 @@ enum big_key_op {
 /*
  * Key size for big_key data encryption
  */
-#define ENC_KEY_SIZE   16
+#define ENC_KEY_SIZE 32
+
+/*
+ * Authentication tag length
+ */
+#define ENC_AUTHTAG_SIZE 16
 
 /*
  * big_key defined keys take an arbitrary string as the description and an
@@ -64,57 +70,62 @@ struct key_type key_type_big_key = {
        .destroy                = big_key_destroy,
        .describe               = big_key_describe,
        .read                   = big_key_read,
+       /* no ->update(); don't add it without changing big_key_crypt() nonce */
 };
 
 /*
- * Crypto names for big_key data encryption
+ * Crypto names for big_key data authenticated encryption
  */
-static const char big_key_rng_name[] = "stdrng";
-static const char big_key_alg_name[] = "ecb(aes)";
+static const char big_key_alg_name[] = "gcm(aes)";
 
 /*
- * Crypto algorithms for big_key data encryption
+ * Crypto algorithms for big_key data authenticated encryption
  */
-static struct crypto_rng *big_key_rng;
-static struct crypto_skcipher *big_key_skcipher;
+static struct crypto_aead *big_key_aead;
 
 /*
- * Generate random key to encrypt big_key data
+ * Since changing the key affects the entire object, we need a mutex.
  */
-static inline int big_key_gen_enckey(u8 *key)
-{
-       return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
-}
+static DEFINE_MUTEX(big_key_aead_lock);
 
 /*
  * Encrypt/decrypt big_key data
  */
 static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
 {
-       int ret = -EINVAL;
+       int ret;
        struct scatterlist sgio;
-       SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher);
-
-       if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) {
+       struct aead_request *aead_req;
+       /* We always use a zero nonce. The reason we can get away with this is
+        * because we're using a different randomly generated key for every
+        * different encryption. Notably, too, key_type_big_key doesn't define
+        * an .update function, so there's no chance we'll wind up reusing the
+        * key to encrypt updated data. Simply put: one key, one encryption.
+        */
+       u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
+
+       aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
+       if (!aead_req)
+               return -ENOMEM;
+
+       memset(zero_nonce, 0, sizeof(zero_nonce));
+       sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
+       aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
+       aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+       aead_request_set_ad(aead_req, 0);
+
+       mutex_lock(&big_key_aead_lock);
+       if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
                ret = -EAGAIN;
                goto error;
        }
-
-       skcipher_request_set_tfm(req, big_key_skcipher);
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
-                                     NULL, NULL);
-
-       sg_init_one(&sgio, data, datalen);
-       skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
-
        if (op == BIG_KEY_ENC)
-               ret = crypto_skcipher_encrypt(req);
+               ret = crypto_aead_encrypt(aead_req);
        else
-               ret = crypto_skcipher_decrypt(req);
-
-       skcipher_request_zero(req);
-
+               ret = crypto_aead_decrypt(aead_req);
 error:
+       mutex_unlock(&big_key_aead_lock);
+       aead_request_free(aead_req);
        return ret;
 }
 
@@ -146,16 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                 *
                 * File content is stored encrypted with randomly generated key.
                 */
-               size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
+               size_t enclen = datalen + ENC_AUTHTAG_SIZE;
                loff_t pos = 0;
 
-               /* prepare aligned data to encrypt */
                data = kmalloc(enclen, GFP_KERNEL);
                if (!data)
                        return -ENOMEM;
-
                memcpy(data, prep->data, datalen);
-               memset(data + datalen, 0x00, enclen - datalen);
 
                /* generate random key */
                enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
@@ -163,13 +171,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                        ret = -ENOMEM;
                        goto error;
                }
-
-               ret = big_key_gen_enckey(enckey);
-               if (ret)
+               ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
+               if (unlikely(ret))
                        goto err_enckey;
 
                /* encrypt aligned data */
-               ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
+               ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
                if (ret)
                        goto err_enckey;
 
@@ -195,7 +202,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                *path = file->f_path;
                path_get(path);
                fput(file);
-               kfree(data);
+               kzfree(data);
        } else {
                /* Just store the data in a buffer */
                void *data = kmalloc(datalen, GFP_KERNEL);
@@ -211,9 +218,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
 err_fput:
        fput(file);
 err_enckey:
-       kfree(enckey);
+       kzfree(enckey);
 error:
-       kfree(data);
+       kzfree(data);
        return ret;
 }
 
@@ -227,7 +234,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
 
                path_put(path);
        }
-       kfree(prep->payload.data[big_key_data]);
+       kzfree(prep->payload.data[big_key_data]);
 }
 
 /*
@@ -240,7 +247,7 @@ void big_key_revoke(struct key *key)
 
        /* clear the quota */
        key_payload_reserve(key, 0);
-       if (key_is_instantiated(key) &&
+       if (key_is_positive(key) &&
            (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
                vfs_truncate(path, 0);
 }
@@ -259,7 +266,7 @@ void big_key_destroy(struct key *key)
                path->mnt = NULL;
                path->dentry = NULL;
        }
-       kfree(key->payload.data[big_key_data]);
+       kzfree(key->payload.data[big_key_data]);
        key->payload.data[big_key_data] = NULL;
 }
 
@@ -272,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
 
        seq_puts(m, key->description);
 
-       if (key_is_instantiated(key))
+       if (key_is_positive(key))
                seq_printf(m, ": %zu [%s]",
                           datalen,
                           datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
@@ -295,7 +302,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
                struct file *file;
                u8 *data;
                u8 *enckey = (u8 *)key->payload.data[big_key_data];
-               size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
+               size_t enclen = datalen + ENC_AUTHTAG_SIZE;
                loff_t pos = 0;
 
                data = kmalloc(enclen, GFP_KERNEL);
@@ -328,7 +335,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
 err_fput:
                fput(file);
 error:
-               kfree(data);
+               kzfree(data);
        } else {
                ret = datalen;
                if (copy_to_user(buffer, key->payload.data[big_key_data],
@@ -344,47 +351,31 @@ error:
  */
 static int __init big_key_init(void)
 {
-       struct crypto_skcipher *cipher;
-       struct crypto_rng *rng;
        int ret;
 
-       rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
-       if (IS_ERR(rng)) {
-               pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
-               return PTR_ERR(rng);
-       }
-
-       big_key_rng = rng;
-
-       /* seed RNG */
-       ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
-       if (ret) {
-               pr_err("Can't reset rng: %d\n", ret);
-               goto error_rng;
-       }
-
        /* init block cipher */
-       cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(cipher)) {
-               ret = PTR_ERR(cipher);
+       big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(big_key_aead)) {
+               ret = PTR_ERR(big_key_aead);
                pr_err("Can't alloc crypto: %d\n", ret);
-               goto error_rng;
+               return ret;
+       }
+       ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
+       if (ret < 0) {
+               pr_err("Can't set crypto auth tag len: %d\n", ret);
+               goto free_aead;
        }
-
-       big_key_skcipher = cipher;
 
        ret = register_key_type(&key_type_big_key);
        if (ret < 0) {
                pr_err("Can't register type: %d\n", ret);
-               goto error_cipher;
+               goto free_aead;
        }
 
        return 0;
 
-error_cipher:
-       crypto_free_skcipher(big_key_skcipher);
-error_rng:
-       crypto_free_rng(big_key_rng);
+free_aead:
+       crypto_free_aead(big_key_aead);
        return ret;
 }
 
index 69855ba0d3b3fcba2190844ae5665ead90dd38f9..d92cbf9687c33f090865f6d0caa99d9936f49e3a 100644 (file)
@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
 
        down_read(&ukey->sem);
        upayload = user_key_payload_locked(ukey);
+       if (!upayload) {
+               /* key was revoked before we acquired its semaphore */
+               up_read(&ukey->sem);
+               key_put(ukey);
+               ukey = ERR_PTR(-EKEYREVOKED);
+               goto error;
+       }
        *master_key = upayload->data;
        *master_keylen = upayload->datalen;
 error:
@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
        size_t datalen = prep->datalen;
        int ret = 0;
 
-       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+       if (key_is_negative(key))
                return -ENOKEY;
        if (datalen <= 0 || datalen > 32767 || !prep->data)
                return -EINVAL;
index 87cb260e4890f3ac464e8d3f3244077653510b74..f01d48cb3de1aac09266b59bcfa732597c6534b4 100644 (file)
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
        while (!list_empty(keys)) {
                struct key *key =
                        list_entry(keys->next, struct key, graveyard_link);
+               short state = key->state;
+
                list_del(&key->graveyard_link);
 
                kdebug("- %u", key->serial);
                key_check(key);
 
                /* Throw away the key data if the key is instantiated */
-               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
-                   !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
-                   key->type->destroy)
+               if (state == KEY_IS_POSITIVE && key->type->destroy)
                        key->type->destroy(key);
 
                security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                }
 
                atomic_dec(&key->user->nkeys);
-               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+               if (state != KEY_IS_UNINSTANTIATED)
                        atomic_dec(&key->user->nikeys);
 
                key_user_put(key->user);
index 1c02c65470384aa076e2de6885ecba356eb44342..503adbae7b0dd0b096aa7fd672fddb0d3513d115 100644 (file)
@@ -141,7 +141,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
 extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
 extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
 
-extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
+extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
 
 extern int install_user_keyrings(void);
 extern int install_thread_keyring_to_cred(struct cred *);
index 83da68d98b40b452a1c8b37121a6ca270387d4f6..83bf4b4afd49d24ba80209a9bff71a558bd01b6c 100644 (file)
@@ -54,10 +54,10 @@ void __key_check(const struct key *key)
 struct key_user *key_user_lookup(kuid_t uid)
 {
        struct key_user *candidate = NULL, *user;
-       struct rb_node *parent = NULL;
-       struct rb_node **p;
+       struct rb_node *parent, **p;
 
 try_again:
+       parent = NULL;
        p = &key_user_tree.rb_node;
        spin_lock(&key_user_lock);
 
@@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
                key->flags |= 1 << KEY_FLAG_IN_QUOTA;
        if (flags & KEY_ALLOC_BUILT_IN)
                key->flags |= 1 << KEY_FLAG_BUILTIN;
+       if (flags & KEY_ALLOC_UID_KEYRING)
+               key->flags |= 1 << KEY_FLAG_UID_KEYRING;
 
 #ifdef KEY_DEBUGGING
        key->magic = KEY_DEBUG_MAGIC;
@@ -399,6 +401,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
 }
 EXPORT_SYMBOL(key_payload_reserve);
 
+/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+       /* Commit the payload before setting the state; barrier versus
+        * key_read_state().
+        */
+       smp_store_release(&key->state,
+                         (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
 /*
  * Instantiate a key and link it into the target keyring atomically.  Must be
  * called with the target keyring's semaphore writelocked.  The target key's
@@ -422,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key,
        mutex_lock(&key_construction_mutex);
 
        /* can't instantiate twice */
-       if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+       if (key->state == KEY_IS_UNINSTANTIATED) {
                /* instantiate the key */
                ret = key->type->instantiate(key, prep);
 
                if (ret == 0) {
                        /* mark the key as being instantiated */
                        atomic_inc(&key->user->nikeys);
-                       set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+                       mark_key_instantiated(key, 0);
 
                        if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
                                awaken = 1;
@@ -575,13 +589,10 @@ int key_reject_and_link(struct key *key,
        mutex_lock(&key_construction_mutex);
 
        /* can't instantiate twice */
-       if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+       if (key->state == KEY_IS_UNINSTANTIATED) {
                /* mark the key as being negatively instantiated */
                atomic_inc(&key->user->nikeys);
-               key->reject_error = -error;
-               smp_wmb();
-               set_bit(KEY_FLAG_NEGATIVE, &key->flags);
-               set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+               mark_key_instantiated(key, -error);
                now = current_kernel_time();
                key->expiry = now.tv_sec + timeout;
                key_schedule_gc(key->expiry + key_gc_delay);
@@ -750,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
 
        ret = key->type->update(key, prep);
        if (ret == 0)
-               /* updating a negative key instantiates it */
-               clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+               /* Updating a negative key positively instantiates it */
+               mark_key_instantiated(key, 0);
 
        up_write(&key->sem);
 
@@ -934,6 +945,16 @@ error:
         */
        __key_link_end(keyring, &index_key, edit);
 
+       key = key_ref_to_ptr(key_ref);
+       if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+               ret = wait_for_key_construction(key, true);
+               if (ret < 0) {
+                       key_ref_put(key_ref);
+                       key_ref = ERR_PTR(ret);
+                       goto error_free_prep;
+               }
+       }
+
        key_ref = __key_update(key_ref, &prep);
        goto error_free_prep;
 }
@@ -984,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
 
        ret = key->type->update(key, &prep);
        if (ret == 0)
-               /* updating a negative key instantiates it */
-               clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+               /* Updating a negative key positively instantiates it */
+               mark_key_instantiated(key, 0);
 
        up_write(&key->sem);
 
index ab0b337c84b4c02e4856719398edb94d1caf101a..76d22f726ae49d7e112c648c59e9c0d3124063f1 100644 (file)
@@ -766,12 +766,16 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
 
        key = key_ref_to_ptr(key_ref);
 
+       ret = key_read_state(key);
+       if (ret < 0)
+               goto error2; /* Negatively instantiated */
+
        /* see if we can read it directly */
        ret = key_permission(key_ref, KEY_NEED_READ);
        if (ret == 0)
                goto can_read_key;
        if (ret != -EACCES)
-               goto error;
+               goto error2;
 
        /* we can't; see if it's searchable from this process's keyrings
         * - we automatically take account of the fact that it may be
@@ -896,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
                atomic_dec(&key->user->nkeys);
                atomic_inc(&newowner->nkeys);
 
-               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+               if (key->state != KEY_IS_UNINSTANTIATED) {
                        atomic_dec(&key->user->nikeys);
                        atomic_inc(&newowner->nikeys);
                }
@@ -1406,11 +1410,9 @@ long keyctl_assume_authority(key_serial_t id)
        }
 
        ret = keyctl_change_reqkey_auth(authkey);
-       if (ret < 0)
-               goto error;
+       if (ret == 0)
+               ret = authkey->serial;
        key_put(authkey);
-
-       ret = authkey->serial;
 error:
        return ret;
 }
index de81793f9920787101dec77eca28ddfbe91ebbc7..a7e51f7938671c76fa917ea66a7a1d6524f21364 100644 (file)
@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
        else
                seq_puts(m, "[anon]");
 
-       if (key_is_instantiated(keyring)) {
+       if (key_is_positive(keyring)) {
                if (keyring->keys.nr_leaves_on_tree != 0)
                        seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
                else
@@ -423,7 +423,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
 }
 
 struct keyring_read_iterator_context {
-       size_t                  qty;
+       size_t                  buflen;
        size_t                  count;
        key_serial_t __user     *buffer;
 };
@@ -435,9 +435,9 @@ static int keyring_read_iterator(const void *object, void *data)
        int ret;
 
        kenter("{%s,%d},,{%zu/%zu}",
-              key->type->name, key->serial, ctx->count, ctx->qty);
+              key->type->name, key->serial, ctx->count, ctx->buflen);
 
-       if (ctx->count >= ctx->qty)
+       if (ctx->count >= ctx->buflen)
                return 1;
 
        ret = put_user(key->serial, ctx->buffer);
@@ -472,16 +472,12 @@ static long keyring_read(const struct key *keyring,
                return 0;
 
        /* Calculate how much data we could return */
-       ctx.qty = nr_keys * sizeof(key_serial_t);
-
        if (!buffer || !buflen)
-               return ctx.qty;
-
-       if (buflen > ctx.qty)
-               ctx.qty = buflen;
+               return nr_keys * sizeof(key_serial_t);
 
        /* Copy the IDs of the subscribed keys into the buffer */
        ctx.buffer = (key_serial_t __user *)buffer;
+       ctx.buflen = buflen;
        ctx.count = 0;
        ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
        if (ret < 0) {
@@ -557,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
 {
        struct keyring_search_context *ctx = iterator_data;
        const struct key *key = keyring_ptr_to_key(object);
-       unsigned long kflags = key->flags;
+       unsigned long kflags = READ_ONCE(key->flags);
+       short state = READ_ONCE(key->state);
 
        kenter("{%d}", key->serial);
 
@@ -569,6 +566,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
 
        /* skip invalidated, revoked and expired keys */
        if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+               time_t expiry = READ_ONCE(key->expiry);
+
                if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
                              (1 << KEY_FLAG_REVOKED))) {
                        ctx->result = ERR_PTR(-EKEYREVOKED);
@@ -576,7 +575,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
                        goto skipped;
                }
 
-               if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+               if (expiry && ctx->now.tv_sec >= expiry) {
                        if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
                                ctx->result = ERR_PTR(-EKEYEXPIRED);
                        kleave(" = %d [expire]", ctx->skipped_ret);
@@ -601,9 +600,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
 
        if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
                /* we set a different error code if we pass a negative key */
-               if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
-                       smp_rmb();
-                       ctx->result = ERR_PTR(key->reject_error);
+               if (state < 0) {
+                       ctx->result = ERR_PTR(state);
                        kleave(" = %d [neg]", ctx->skipped_ret);
                        goto skipped;
                }
@@ -1101,15 +1099,15 @@ found:
 /*
  * Find a keyring with the specified name.
  *
- * All named keyrings in the current user namespace are searched, provided they
- * grant Search permission directly to the caller (unless this check is
- * skipped).  Keyrings whose usage points have reached zero or who have been
- * revoked are skipped.
+ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
+ * user in the current user namespace are considered.  If @uid_keyring is %true,
+ * the keyring additionally must have been allocated as a user or user session
+ * keyring; otherwise, it must grant Search permission directly to the caller.
  *
  * Returns a pointer to the keyring with the keyring's refcount having being
  * incremented on success.  -ENOKEY is returned if a key could not be found.
  */
-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
+struct key *find_keyring_by_name(const char *name, bool uid_keyring)
 {
        struct key *keyring;
        int bucket;
@@ -1137,10 +1135,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
                        if (strcmp(keyring->description, name) != 0)
                                continue;
 
-                       if (!skip_perm_check &&
-                           key_permission(make_key_ref(keyring, 0),
-                                          KEY_NEED_SEARCH) < 0)
-                               continue;
+                       if (uid_keyring) {
+                               if (!test_bit(KEY_FLAG_UID_KEYRING,
+                                             &keyring->flags))
+                                       continue;
+                       } else {
+                               if (key_permission(make_key_ref(keyring, 0),
+                                                  KEY_NEED_SEARCH) < 0)
+                                       continue;
+                       }
 
                        /* we've got a match but we might end up racing with
                         * key_cleanup() if the keyring is currently 'dead'
index 732cc0beffdfc74b74eddbf173ee30bd9d0455db..a72b4dd70c8abfab5d05503415c4a5fcef7b6df3 100644 (file)
@@ -88,7 +88,8 @@ EXPORT_SYMBOL(key_task_permission);
  */
 int key_validate(const struct key *key)
 {
-       unsigned long flags = key->flags;
+       unsigned long flags = READ_ONCE(key->flags);
+       time_t expiry = READ_ONCE(key->expiry);
 
        if (flags & (1 << KEY_FLAG_INVALIDATED))
                return -ENOKEY;
@@ -99,9 +100,9 @@ int key_validate(const struct key *key)
                return -EKEYREVOKED;
 
        /* check it hasn't expired */
-       if (key->expiry) {
+       if (expiry) {
                struct timespec now = current_kernel_time();
-               if (now.tv_sec >= key->expiry)
+               if (now.tv_sec >= expiry)
                        return -EKEYEXPIRED;
        }
 
index bf08d02b6646ae2077b15903471bfb4575b04f5a..6d1fcbba1e0961927c9b3a6027fcd8084a21f80a 100644 (file)
@@ -179,15 +179,18 @@ static int proc_keys_show(struct seq_file *m, void *v)
        struct rb_node *_p = v;
        struct key *key = rb_entry(_p, struct key, serial_node);
        struct timespec now;
+       time_t expiry;
        unsigned long timo;
+       unsigned long flags;
        key_ref_t key_ref, skey_ref;
        char xbuf[16];
+       short state;
        int rc;
 
        struct keyring_search_context ctx = {
                .index_key.type         = key->type,
                .index_key.description  = key->description,
-               .cred                   = current_cred(),
+               .cred                   = m->file->f_cred,
                .match_data.cmp         = lookup_user_key_possessed,
                .match_data.raw_data    = key,
                .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
@@ -207,11 +210,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
                }
        }
 
-       /* check whether the current task is allowed to view the key (assuming
-        * non-possession)
-        * - the caller holds a spinlock, and thus the RCU read lock, making our
-        *   access to __current_cred() safe
-        */
+       /* check whether the current task is allowed to view the key */
        rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
        if (rc < 0)
                return 0;
@@ -221,12 +220,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
        rcu_read_lock();
 
        /* come up with a suitable timeout value */
-       if (key->expiry == 0) {
+       expiry = READ_ONCE(key->expiry);
+       if (expiry == 0) {
                memcpy(xbuf, "perm", 5);
-       } else if (now.tv_sec >= key->expiry) {
+       } else if (now.tv_sec >= expiry) {
                memcpy(xbuf, "expd", 5);
        } else {
-               timo = key->expiry - now.tv_sec;
+               timo = expiry - now.tv_sec;
 
                if (timo < 60)
                        sprintf(xbuf, "%lus", timo);
@@ -240,18 +240,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
                        sprintf(xbuf, "%luw", timo / (60*60*24*7));
        }
 
-#define showflag(KEY, LETTER, FLAG) \
-       (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
+       state = key_read_state(key);
+
+#define showflag(FLAGS, LETTER, FLAG) \
+       ((FLAGS & (1 << FLAG)) ? LETTER : '-')
 
+       flags = READ_ONCE(key->flags);
        seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
                   key->serial,
-                  showflag(key, 'I', KEY_FLAG_INSTANTIATED),
-                  showflag(key, 'R', KEY_FLAG_REVOKED),
-                  showflag(key, 'D', KEY_FLAG_DEAD),
-                  showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
-                  showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
-                  showflag(key, 'N', KEY_FLAG_NEGATIVE),
-                  showflag(key, 'i', KEY_FLAG_INVALIDATED),
+                  state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
+                  showflag(flags, 'R', KEY_FLAG_REVOKED),
+                  showflag(flags, 'D', KEY_FLAG_DEAD),
+                  showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
+                  showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
+                  state < 0 ? 'N' : '-',
+                  showflag(flags, 'i', KEY_FLAG_INVALIDATED),
                   refcount_read(&key->usage),
                   xbuf,
                   key->perm,
index 86bced9fdbdf22eb60170584d87730e1179a2744..740affd65ee98464e19fc32f830f86a1ad3f24f4 100644 (file)
@@ -77,7 +77,8 @@ int install_user_keyrings(void)
                if (IS_ERR(uid_keyring)) {
                        uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
                                                    cred, user_keyring_perm,
-                                                   KEY_ALLOC_IN_QUOTA,
+                                                   KEY_ALLOC_UID_KEYRING |
+                                                       KEY_ALLOC_IN_QUOTA,
                                                    NULL, NULL);
                        if (IS_ERR(uid_keyring)) {
                                ret = PTR_ERR(uid_keyring);
@@ -94,7 +95,8 @@ int install_user_keyrings(void)
                        session_keyring =
                                keyring_alloc(buf, user->uid, INVALID_GID,
                                              cred, user_keyring_perm,
-                                             KEY_ALLOC_IN_QUOTA,
+                                             KEY_ALLOC_UID_KEYRING |
+                                                 KEY_ALLOC_IN_QUOTA,
                                              NULL, NULL);
                        if (IS_ERR(session_keyring)) {
                                ret = PTR_ERR(session_keyring);
@@ -728,7 +730,7 @@ try_again:
 
        ret = -EIO;
        if (!(lflags & KEY_LOOKUP_PARTIAL) &&
-           !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+           key_read_state(key) == KEY_IS_UNINSTANTIATED)
                goto invalid_key;
 
        /* check the permissions */
index 63e63a42db3c0aa4b1ed069e03e8e1f8e9139a32..e8036cd0ad5430a87ec2e2ea1496e921ae941b3d 100644 (file)
@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr)
                          intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
        if (ret)
                return -ERESTARTSYS;
-       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
-               smp_rmb();
-               return key->reject_error;
-       }
+       ret = key_read_state(key);
+       if (ret < 0)
+               return ret;
        return key_validate(key);
 }
 EXPORT_SYMBOL(wait_for_key_construction);
index afe9d22ab3611f2dc621db0092e1b102e005be78..424e1d90412ea0c40e4e20453f47b566b0b25834 100644 (file)
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
 
        seq_puts(m, "key:");
        seq_puts(m, key->description);
-       if (key_is_instantiated(key))
+       if (key_is_positive(key))
                seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
 }
 
@@ -120,6 +120,18 @@ static void request_key_auth_revoke(struct key *key)
        }
 }
 
+static void free_request_key_auth(struct request_key_auth *rka)
+{
+       if (!rka)
+               return;
+       key_put(rka->target_key);
+       key_put(rka->dest_keyring);
+       if (rka->cred)
+               put_cred(rka->cred);
+       kfree(rka->callout_info);
+       kfree(rka);
+}
+
 /*
  * Destroy an instantiation authorisation token key.
  */
@@ -129,15 +141,7 @@ static void request_key_auth_destroy(struct key *key)
 
        kenter("{%d}", key->serial);
 
-       if (rka->cred) {
-               put_cred(rka->cred);
-               rka->cred = NULL;
-       }
-
-       key_put(rka->target_key);
-       key_put(rka->dest_keyring);
-       kfree(rka->callout_info);
-       kfree(rka);
+       free_request_key_auth(rka);
 }
 
 /*
@@ -151,22 +155,18 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
        const struct cred *cred = current->cred;
        struct key *authkey = NULL;
        char desc[20];
-       int ret;
+       int ret = -ENOMEM;
 
        kenter("%d,", target->serial);
 
        /* allocate a auth record */
-       rka = kmalloc(sizeof(*rka), GFP_KERNEL);
-       if (!rka) {
-               kleave(" = -ENOMEM");
-               return ERR_PTR(-ENOMEM);
-       }
-       rka->callout_info = kmalloc(callout_len, GFP_KERNEL);
-       if (!rka->callout_info) {
-               kleave(" = -ENOMEM");
-               kfree(rka);
-               return ERR_PTR(-ENOMEM);
-       }
+       rka = kzalloc(sizeof(*rka), GFP_KERNEL);
+       if (!rka)
+               goto error;
+       rka->callout_info = kmemdup(callout_info, callout_len, GFP_KERNEL);
+       if (!rka->callout_info)
+               goto error_free_rka;
+       rka->callout_len = callout_len;
 
        /* see if the calling process is already servicing the key request of
         * another process */
@@ -176,8 +176,12 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
 
                /* if the auth key has been revoked, then the key we're
                 * servicing is already instantiated */
-               if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags))
-                       goto auth_key_revoked;
+               if (test_bit(KEY_FLAG_REVOKED,
+                            &cred->request_key_auth->flags)) {
+                       up_read(&cred->request_key_auth->sem);
+                       ret = -EKEYREVOKED;
+                       goto error_free_rka;
+               }
 
                irka = cred->request_key_auth->payload.data[0];
                rka->cred = get_cred(irka->cred);
@@ -193,8 +197,6 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
 
        rka->target_key = key_get(target);
        rka->dest_keyring = key_get(dest_keyring);
-       memcpy(rka->callout_info, callout_info, callout_len);
-       rka->callout_len = callout_len;
 
        /* allocate the auth key */
        sprintf(desc, "%x", target->serial);
@@ -205,32 +207,22 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
                            KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL);
        if (IS_ERR(authkey)) {
                ret = PTR_ERR(authkey);
-               goto error_alloc;
+               goto error_free_rka;
        }
 
        /* construct the auth key */
        ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL);
        if (ret < 0)
-               goto error_inst;
+               goto error_put_authkey;
 
        kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage));
        return authkey;
 
-auth_key_revoked:
-       up_read(&cred->request_key_auth->sem);
-       kfree(rka->callout_info);
-       kfree(rka);
-       kleave("= -EKEYREVOKED");
-       return ERR_PTR(-EKEYREVOKED);
-
-error_inst:
-       key_revoke(authkey);
+error_put_authkey:
        key_put(authkey);
-error_alloc:
-       key_put(rka->target_key);
-       key_put(rka->dest_keyring);
-       kfree(rka->callout_info);
-       kfree(rka);
+error_free_rka:
+       free_request_key_auth(rka);
+error:
        kleave("= %d", ret);
        return ERR_PTR(ret);
 }
index ddfaebf60fc8649cf6159d2cba18e916931a3b4e..bd85315cbfeb3e1bad3b01e7d9a6538bbb5d3577 100644 (file)
@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
        char *datablob;
        int ret = 0;
 
-       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+       if (key_is_negative(key))
                return -ENOKEY;
        p = key->payload.data[0];
        if (!p->migratable)
index 3d8c68eba5160286fa7af79c8da1ead6e6b05236..9f558bedba23a338da5980ab11dd1b716c7b0b67 100644 (file)
@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
 
        /* attach the new data, displacing the old */
        key->expiry = prep->expiry;
-       if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+       if (key_is_positive(key))
                zap = dereference_key_locked(key);
        rcu_assign_keypointer(key, prep->payload.data[0]);
        prep->payload.data[0] = NULL;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
 void user_describe(const struct key *key, struct seq_file *m)
 {
        seq_puts(m, key->description);
-       if (key_is_instantiated(key))
+       if (key_is_positive(key))
                seq_printf(m, ": %u", key->datalen);
 }
 
index 319add31b4a4ee7b5464d2a15782289ecb110ce7..286171a16ed255e20c396fca595600488c059c2d 100644 (file)
@@ -1473,7 +1473,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
  * @inode: the object
  * @name: attribute name
  * @buffer: where to put the result
- * @alloc: unused
+ * @alloc: duplicate memory
  *
  * Returns the size of the attribute or an error code
  */
@@ -1486,43 +1486,38 @@ static int smack_inode_getsecurity(struct inode *inode,
        struct super_block *sbp;
        struct inode *ip = (struct inode *)inode;
        struct smack_known *isp;
-       int ilen;
-       int rc = 0;
 
-       if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+       if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
                isp = smk_of_inode(inode);
-               ilen = strlen(isp->smk_known);
-               *buffer = isp->smk_known;
-               return ilen;
-       }
+       else {
+               /*
+                * The rest of the Smack xattrs are only on sockets.
+                */
+               sbp = ip->i_sb;
+               if (sbp->s_magic != SOCKFS_MAGIC)
+                       return -EOPNOTSUPP;
 
-       /*
-        * The rest of the Smack xattrs are only on sockets.
-        */
-       sbp = ip->i_sb;
-       if (sbp->s_magic != SOCKFS_MAGIC)
-               return -EOPNOTSUPP;
+               sock = SOCKET_I(ip);
+               if (sock == NULL || sock->sk == NULL)
+                       return -EOPNOTSUPP;
 
-       sock = SOCKET_I(ip);
-       if (sock == NULL || sock->sk == NULL)
-               return -EOPNOTSUPP;
-
-       ssp = sock->sk->sk_security;
+               ssp = sock->sk->sk_security;
 
-       if (strcmp(name, XATTR_SMACK_IPIN) == 0)
-               isp = ssp->smk_in;
-       else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
-               isp = ssp->smk_out;
-       else
-               return -EOPNOTSUPP;
+               if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+                       isp = ssp->smk_in;
+               else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+                       isp = ssp->smk_out;
+               else
+                       return -EOPNOTSUPP;
+       }
 
-       ilen = strlen(isp->smk_known);
-       if (rc == 0) {
-               *buffer = isp->smk_known;
-               rc = ilen;
+       if (alloc) {
+               *buffer = kstrdup(isp->smk_known, GFP_KERNEL);
+               if (*buffer == NULL)
+                       return -ENOMEM;
        }
 
-       return rc;
+       return strlen(isp->smk_known);
 }
 
 
index fec1dfdb14adfa7c7edc9f7aa2aba35c9e0cb2a5..4490a699030b10725015e323a18a09286ea02db0 100644 (file)
@@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = {
 static int snd_compress_dev_register(struct snd_device *device)
 {
        int ret = -EINVAL;
-       char str[16];
        struct snd_compr *compr;
 
        if (snd_BUG_ON(!device || !device->device_data))
                return -EBADFD;
        compr = device->device_data;
 
-       pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
+       pr_debug("reg device %s, direction %d\n", compr->name,
                        compr->direction);
        /* register compressed device */
        ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
index 3a1cc7b97e468bc19fad2fadfd9fb68053e6bc6d..b719d0bd833ecb6d7560380db5eb3b78a9b040c4 100644 (file)
@@ -547,6 +547,7 @@ struct snd_pcm_mmap_status_x32 {
        u32 pad2; /* alignment */
        struct timespec tstamp;
        s32 suspended_state;
+       s32 pad3;
        struct timespec audio_tstamp;
 } __packed;
 
index ea2d0ae85bd367d5ea70068ee74d925a349789c3..6c9cba2166d95b3b9175cc9eb39ca14e3a14f0f5 100644 (file)
@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
        struct snd_seq_port_info *info = arg;
        struct snd_seq_client_port *port;
        struct snd_seq_port_callback *callback;
+       int port_idx;
 
        /* it is not allowed to create the port for an another client */
        if (info->addr.client != client->number)
@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
                return -ENOMEM;
 
        if (client->type == USER_CLIENT && info->kernel) {
-               snd_seq_delete_port(client, port->addr.port);
+               port_idx = port->addr.port;
+               snd_seq_port_unlock(port);
+               snd_seq_delete_port(client, port_idx);
                return -EINVAL;
        }
        if (client->type == KERNEL_CLIENT) {
@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
 
        snd_seq_set_port_info(port, info);
        snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
+       snd_seq_port_unlock(port);
 
        return 0;
 }
index 0ff7926a5a69ad8dfb2f18a768bf4d9a391c23ee..cda64b489e4200563de5e0bd2f43ea692b8abbce 100644 (file)
@@ -23,8 +23,6 @@
 #include <sound/core.h>
 #include "seq_lock.h"
 
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
 /* wait until all locks are released */
 void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
 {
@@ -41,5 +39,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
        }
 }
 EXPORT_SYMBOL(snd_use_lock_sync_helper);
-
-#endif
index 54044bc2c9ef24892a7e6747dcd67e92fb01b0f1..ac38031c370e681984da86f2b8b6981aad1e9ccc 100644 (file)
@@ -3,8 +3,6 @@
 
 #include <linux/sched.h>
 
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
 typedef atomic_t snd_use_lock_t;
 
 /* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
 void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
 #define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
 
-#else /* SMP || CONFIG_SND_DEBUG */
-
-typedef spinlock_t snd_use_lock_t;     /* dummy */
-#define snd_use_lock_init(lockp) /**/
-#define snd_use_lock_use(lockp) /**/
-#define snd_use_lock_free(lockp) /**/
-#define snd_use_lock_sync(lockp) /**/
-
-#endif /* SMP || CONFIG_SND_DEBUG */
-
 #endif /* __SND_SEQ_LOCK_H */
index 0a7020c82bfc76ac295d084fa2d98abe5dc5f647..d21ece9f8d7365e5e621156f1e44b85aa11eb1a1 100644 (file)
@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
 }
 
 
-/* create a port, port number is returned (-1 on failure) */
+/* create a port, port number is returned (-1 on failure);
+ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
+ */
 struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
                                                int port)
 {
@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
        snd_use_lock_init(&new_port->use_lock);
        port_subs_info_init(&new_port->c_src);
        port_subs_info_init(&new_port->c_dest);
+       snd_use_lock_use(&new_port->use_lock);
 
        num = port >= 0 ? port : 0;
        mutex_lock(&client->ports_mutex);
@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
        list_add_tail(&new_port->list, &p->list);
        client->num_ports++;
        new_port->addr.port = num;      /* store the port number in the port */
+       sprintf(new_port->name, "port-%d", num);
        write_unlock_irqrestore(&client->ports_lock, flags);
        mutex_unlock(&client->ports_mutex);
-       sprintf(new_port->name, "port-%d", num);
 
        return new_port;
 }
index 8d93a4021c78ab58cab236c3b9d170bde8a81cb1..f48a4cd24ffce2d50639e177822bb96b31a93d02 100644 (file)
@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
  * decode input event and put to read buffer of each opened file
  */
 static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
-                                        struct snd_seq_event *ev)
+                                        struct snd_seq_event *ev,
+                                        bool atomic)
 {
        struct snd_virmidi *vmidi;
        unsigned char msg[4];
        int len;
 
-       read_lock(&rdev->filelist_lock);
+       if (atomic)
+               read_lock(&rdev->filelist_lock);
+       else
+               down_read(&rdev->filelist_sem);
        list_for_each_entry(vmidi, &rdev->filelist, list) {
                if (!vmidi->trigger)
                        continue;
@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
                                snd_rawmidi_receive(vmidi->substream, msg, len);
                }
        }
-       read_unlock(&rdev->filelist_lock);
+       if (atomic)
+               read_unlock(&rdev->filelist_lock);
+       else
+               up_read(&rdev->filelist_sem);
 
        return 0;
 }
@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
        struct snd_virmidi_dev *rdev;
 
        rdev = rmidi->private_data;
-       return snd_virmidi_dev_receive_event(rdev, ev);
+       return snd_virmidi_dev_receive_event(rdev, ev, true);
 }
 #endif  /*  0  */
 
@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
        rdev = private_data;
        if (!(rdev->flags & SNDRV_VIRMIDI_USE))
                return 0; /* ignored */
-       return snd_virmidi_dev_receive_event(rdev, ev);
+       return snd_virmidi_dev_receive_event(rdev, ev, atomic);
 }
 
 /*
@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
        struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
        struct snd_virmidi *vmidi;
-       unsigned long flags;
 
        vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
        if (vmidi == NULL)
@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
        vmidi->client = rdev->client;
        vmidi->port = rdev->port;       
        runtime->private_data = vmidi;
-       write_lock_irqsave(&rdev->filelist_lock, flags);
+       down_write(&rdev->filelist_sem);
+       write_lock_irq(&rdev->filelist_lock);
        list_add_tail(&vmidi->list, &rdev->filelist);
-       write_unlock_irqrestore(&rdev->filelist_lock, flags);
+       write_unlock_irq(&rdev->filelist_lock);
+       up_write(&rdev->filelist_sem);
        vmidi->rdev = rdev;
        return 0;
 }
@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
        struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
        struct snd_virmidi *vmidi = substream->runtime->private_data;
 
+       down_write(&rdev->filelist_sem);
        write_lock_irq(&rdev->filelist_lock);
        list_del(&vmidi->list);
        write_unlock_irq(&rdev->filelist_lock);
+       up_write(&rdev->filelist_sem);
        snd_midi_event_free(vmidi->parser);
        substream->runtime->private_data = NULL;
        kfree(vmidi);
@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
        rdev->rmidi = rmidi;
        rdev->device = device;
        rdev->client = -1;
+       init_rwsem(&rdev->filelist_sem);
        rwlock_init(&rdev->filelist_lock);
        INIT_LIST_HEAD(&rdev->filelist);
        rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
index 6c58e6f73a013bd33e47cc9a7ea8c1b46b7f17df..e43af18d43836367e263356eb2377cc4e08e8368 100644 (file)
@@ -484,3 +484,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only)
                master->hook(master->hook_private_data, master->val);
 }
 EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
+
+/**
+ * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave
+ * @kctl: vmaster kctl element
+ * @func: function to apply
+ * @arg: optional function argument
+ *
+ * Apply the function @func to each slave kctl of the given vmaster kctl.
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
+                                int (*func)(struct snd_kcontrol *, void *),
+                                void *arg)
+{
+       struct link_master *master;
+       struct link_slave *slave;
+       int err;
+
+       master = snd_kcontrol_chip(kctl);
+       err = master_init(master);
+       if (err < 0)
+               return err;
+       list_for_each_entry(slave, &master->slaves, list) {
+               err = func(&slave->slave, arg);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves);
index 978dc1801b3aceb8e2245b819097954c76595d77..f6d2985b2520cf1130a9a6c7d05781b6eafd44fb 100644 (file)
@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
                dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
                        (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
 
+               if (cur_cap == -1) {
+                       dev_dbg(bus->dev, "Invalid capability reg read\n");
+                       break;
+               }
+
                switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
                case AZX_ML_CAP_ID:
                        dev_dbg(bus->dev, "Found ML capability\n");
index 7e3aa50b21f9d2d2f5ca49f3f9a779ab1276ee4a..5badd08e1d69cc12657410359255ab691eff62f0 100644 (file)
@@ -103,6 +103,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        void __user *puhr;
        union hpi_message_buffer_v1 *hm;
        union hpi_response_buffer_v1 *hr;
+       u16 msg_size;
        u16 res_max_size;
        u32 uncopied_bytes;
        int err = 0;
@@ -127,22 +128,25 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
 
        /* Now read the message size and data from user space.  */
-       if (get_user(hm->h.size, (u16 __user *)puhm)) {
+       if (get_user(msg_size, (u16 __user *)puhm)) {
                err = -EFAULT;
                goto out;
        }
-       if (hm->h.size > sizeof(*hm))
-               hm->h.size = sizeof(*hm);
+       if (msg_size > sizeof(*hm))
+               msg_size = sizeof(*hm);
 
        /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */
 
-       uncopied_bytes = copy_from_user(hm, puhm, hm->h.size);
+       uncopied_bytes = copy_from_user(hm, puhm, msg_size);
        if (uncopied_bytes) {
                HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
                err = -EFAULT;
                goto out;
        }
 
+       /* Override h.size in case it is changed between two userspace fetches */
+       hm->h.size = msg_size;
+
        if (get_user(res_max_size, (u16 __user *)puhr)) {
                err = -EFAULT;
                goto out;
index 7326695bca3371d192904d6d351dd4a87144e720..d68f99e076a879def2241365e0dd78b3aeaa5611 100644 (file)
@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
 
        chip = snd_kcontrol_chip(kcontrol);
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+       uinfo->count = 1;
        uinfo->value.integer.min = ECHOGAIN_MINOUT;
        uinfo->value.integer.max = ECHOGAIN_MAXOUT;
        uinfo->dimen.d[0] = num_busses_out(chip);
        uinfo->dimen.d[1] = num_busses_in(chip);
-       uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
        return 0;
 }
 
@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
 
        chip = snd_kcontrol_chip(kcontrol);
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+       uinfo->count = 1;
        uinfo->value.integer.min = ECHOGAIN_MINOUT;
        uinfo->value.integer.max = ECHOGAIN_MAXOUT;
        uinfo->dimen.d[0] = num_busses_out(chip);
        uinfo->dimen.d[1] = num_pipes_out(chip);
-       uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
        return 0;
 }
 
@@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
                                  struct snd_ctl_elem_info *uinfo)
 {
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+       uinfo->count = 96;
        uinfo->value.integer.min = ECHOGAIN_MINOUT;
        uinfo->value.integer.max = 0;
 #ifdef ECHOCARD_HAS_VMIXER
@@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
 #endif
        uinfo->dimen.d[1] = 16; /* 16 channels */
        uinfo->dimen.d[2] = 2;  /* 0=level, 1=peak */
-       uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
        return 0;
 }
 
index 3db26c451837cf0e2893d1071c1a699465d89e1b..a0989d231fd00fa94259ccea2ddc397336d633b4 100644 (file)
@@ -1803,36 +1803,6 @@ static int check_slave_present(struct hda_codec *codec,
        return 1;
 }
 
-/* guess the value corresponding to 0dB */
-static int get_kctl_0dB_offset(struct hda_codec *codec,
-                              struct snd_kcontrol *kctl, int *step_to_check)
-{
-       int _tlv[4];
-       const int *tlv = NULL;
-       int val = -1;
-
-       if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
-           kctl->tlv.c == snd_hda_mixer_amp_tlv) {
-               get_ctl_amp_tlv(kctl, _tlv);
-               tlv = _tlv;
-       } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
-               tlv = kctl->tlv.p;
-       if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
-               int step = tlv[3];
-               step &= ~TLV_DB_SCALE_MUTE;
-               if (!step)
-                       return -1;
-               if (*step_to_check && *step_to_check != step) {
-                       codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
--                                 *step_to_check, step);
-                       return -1;
-               }
-               *step_to_check = step;
-               val = -tlv[2] / step;
-       }
-       return val;
-}
-
 /* call kctl->put with the given value(s) */
 static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
 {
@@ -1847,19 +1817,58 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
        return 0;
 }
 
-/* initialize the slave volume with 0dB */
-static int init_slave_0dB(struct hda_codec *codec,
-                         void *data, struct snd_kcontrol *slave)
+struct slave_init_arg {
+       struct hda_codec *codec;
+       int step;
+};
+
+/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
+static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
 {
-       int offset = get_kctl_0dB_offset(codec, slave, data);
-       if (offset > 0)
-               put_kctl_with_value(slave, offset);
+       struct slave_init_arg *arg = _arg;
+       int _tlv[4];
+       const int *tlv = NULL;
+       int step;
+       int val;
+
+       if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+               if (kctl->tlv.c != snd_hda_mixer_amp_tlv) {
+                       codec_err(arg->codec,
+                                 "Unexpected TLV callback for slave %s:%d\n",
+                                 kctl->id.name, kctl->id.index);
+                       return 0; /* ignore */
+               }
+               get_ctl_amp_tlv(kctl, _tlv);
+               tlv = _tlv;
+       } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
+               tlv = kctl->tlv.p;
+
+       if (!tlv || tlv[0] != SNDRV_CTL_TLVT_DB_SCALE)
+               return 0;
+
+       step = tlv[3];
+       step &= ~TLV_DB_SCALE_MUTE;
+       if (!step)
+               return 0;
+       if (arg->step && arg->step != step) {
+               codec_err(arg->codec,
+                         "Mismatching dB step for vmaster slave (%d!=%d)\n",
+                         arg->step, step);
+               return 0;
+       }
+
+       arg->step = step;
+       val = -tlv[2] / step;
+       if (val > 0) {
+               put_kctl_with_value(kctl, val);
+               return val;
+       }
+
        return 0;
 }
 
-/* unmute the slave */
-static int init_slave_unmute(struct hda_codec *codec,
-                            void *data, struct snd_kcontrol *slave)
+/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
+static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
 {
        return put_kctl_with_value(slave, 1);
 }
@@ -1919,9 +1928,13 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
        /* init with master mute & zero volume */
        put_kctl_with_value(kctl, 0);
        if (init_slave_vol) {
-               int step = 0;
-               map_slaves(codec, slaves, suffix,
-                          tlv ? init_slave_0dB : init_slave_unmute, &step);
+               struct slave_init_arg arg = {
+                       .codec = codec,
+                       .step = 0,
+               };
+               snd_ctl_apply_vmaster_slaves(kctl,
+                                            tlv ? init_slave_0dB : init_slave_unmute,
+                                            &arg);
        }
 
        if (ctl_ret)
index 2b64fabd5faa5f1e85681592910fd61ad2aa89f9..c19c81d230bd7423b4153d2266a45e09333f8714 100644 (file)
@@ -906,6 +906,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
                              hda_nid_t pin_nid, u32 stream_tag, int format)
 {
        struct hdmi_spec *spec = codec->spec;
+       unsigned int param;
        int err;
 
        err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
@@ -915,6 +916,26 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
                return err;
        }
 
+       if (is_haswell_plus(codec)) {
+
+               /*
+                * on recent platforms IEC Coding Type is required for HBR
+                * support, read current Digital Converter settings and set
+                * ICT bitfield if needed.
+                */
+               param = snd_hda_codec_read(codec, cvt_nid, 0,
+                                          AC_VERB_GET_DIGI_CONVERT_1, 0);
+
+               param = (param >> 16) & ~(AC_DIG3_ICT);
+
+               /* on recent platforms ICT mode is required for HBR support */
+               if (is_hbr_format(format))
+                       param |= 0x1;
+
+               snd_hda_codec_write(codec, cvt_nid, 0,
+                                   AC_VERB_SET_DIGI_CONVERT_3, param);
+       }
+
        snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format);
        return 0;
 }
index 0ce71111b4e3ec0fd58940b27a0638cc9d483fd1..546d515f3c1fb810641375e5fcf548ef4e360807 100644 (file)
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0215:
        case 0x10ec0225:
        case 0x10ec0233:
+       case 0x10ec0236:
        case 0x10ec0255:
        case 0x10ec0256:
        case 0x10ec0282:
@@ -911,6 +912,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
        { 0x10ec0275, 0x1028, 0, "ALC3260" },
        { 0x10ec0899, 0x1028, 0, "ALC3861" },
        { 0x10ec0298, 0x1028, 0, "ALC3266" },
+       { 0x10ec0236, 0x1028, 0, "ALC3204" },
        { 0x10ec0256, 0x1028, 0, "ALC3246" },
        { 0x10ec0225, 0x1028, 0, "ALC3253" },
        { 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3930,6 +3932,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
                alc_process_coef_fw(codec, coef0255_1);
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0236:
        case 0x10ec0256:
                alc_process_coef_fw(codec, coef0256);
                alc_process_coef_fw(codec, coef0255);
@@ -4028,6 +4031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
        };
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0236:
        case 0x10ec0255:
        case 0x10ec0256:
                alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -4160,6 +4164,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
                alc_process_coef_fw(codec, alc225_pre_hsmode);
                alc_process_coef_fw(codec, coef0225);
                break;
+       case 0x10ec0236:
        case 0x10ec0255:
        case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
@@ -4256,6 +4261,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
        case 0x10ec0255:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0236:
        case 0x10ec0256:
                alc_process_coef_fw(codec, coef0256);
                break;
@@ -4366,6 +4372,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
        case 0x10ec0255:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0236:
        case 0x10ec0256:
                alc_process_coef_fw(codec, coef0256);
                break;
@@ -4451,6 +4458,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
        };
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0236:
        case 0x10ec0255:
        case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
@@ -4705,6 +4713,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
        case 0x10ec0255:
                alc_process_coef_fw(codec, alc255fw);
                break;
+       case 0x10ec0236:
        case 0x10ec0256:
                alc_process_coef_fw(codec, alc256fw);
                break;
@@ -6419,6 +6428,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                ALC225_STANDARD_PINS,
                {0x12, 0xb7a60130},
                {0x1b, 0x90170110}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60140},
+               {0x14, 0x90170110},
+               {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60140},
+               {0x14, 0x90170150},
+               {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
@@ -6806,6 +6823,7 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0255:
                spec->codec_variant = ALC269_TYPE_ALC255;
                break;
+       case 0x10ec0236:
        case 0x10ec0256:
                spec->codec_variant = ALC269_TYPE_ALC256;
                spec->shutup = alc256_shutup;
@@ -7857,6 +7875,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
index 2c1bd27638648796dd89e117613578655429232d..6758f789b712cd32af6940ef501619a5af6b8502 100644 (file)
@@ -90,6 +90,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
+static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct adau *adau = snd_soc_codec_get_drvdata(codec);
+
+       /*
+        * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
+        * avoid losing SNR (workaround from ADI). This must be done after
+        * the ADC(s) have been enabled. According to the data sheet, it is
+        * normally illegal to set this bit when the sampling rate is 96 kHz,
+        * but according to ADI it is acceptable for this workaround.
+        */
+       regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+               ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
+       regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+               ADAU17X1_CONVERTER0_ADOSR, 0);
+
+       return 0;
+}
+
 static const char * const adau17x1_mono_stereo_text[] = {
        "Stereo",
        "Mono Left Channel (L+R)",
@@ -121,7 +142,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
        SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
                &adau17x1_dac_mode_mux),
 
-       SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
+       SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
+                          adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
        SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
        SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
index bf04b7efee4084cf77fa934eadf6ab7a3506b1ad..db350035fad7c5e477b4106bcfba9d1090b543c0 100644 (file)
@@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau);
 
 #define ADAU17X1_CONVERTER0_CONVSR_MASK                0x7
 
+#define ADAU17X1_CONVERTER0_ADOSR              BIT(3)
+
 
 #endif
index ed6e5373916c390fb15dfc9efd943c9c55f7d819..12f2ecf3a4feeb3a6133012b33b5b10cad33999f 100644 (file)
@@ -145,9 +145,8 @@ done:
        mutex_unlock(&rt5514_dsp->dma_lock);
 }
 
-static irqreturn_t rt5514_spi_irq(int irq, void *data)
+static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
 {
-       struct rt5514_dsp *rt5514_dsp = data;
        u8 buf[8];
 
        rt5514_dsp->get_size = 0;
@@ -180,6 +179,13 @@ static irqreturn_t rt5514_spi_irq(int irq, void *data)
        if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit &&
                rt5514_dsp->buf_rp && rt5514_dsp->buf_size)
                schedule_delayed_work(&rt5514_dsp->copy_work, 0);
+}
+
+static irqreturn_t rt5514_spi_irq(int irq, void *data)
+{
+       struct rt5514_dsp *rt5514_dsp = data;
+
+       rt5514_schedule_copy(rt5514_dsp);
 
        return IRQ_HANDLED;
 }
@@ -199,12 +205,19 @@ static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
        struct rt5514_dsp *rt5514_dsp =
                        snd_soc_platform_get_drvdata(rtd->platform);
        int ret;
+       u8 buf[8];
 
        mutex_lock(&rt5514_dsp->dma_lock);
        ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
                        params_buffer_bytes(hw_params));
        rt5514_dsp->substream = substream;
        rt5514_dsp->dma_offset = 0;
+
+       /* Read IRQ status and schedule copy accordingly. */
+       rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf));
+       if (buf[0] & RT5514_IRQ_STATUS_BIT)
+               rt5514_schedule_copy(rt5514_dsp);
+
        mutex_unlock(&rt5514_dsp->dma_lock);
 
        return ret;
index a6434ee6ff037c11c945a3358a90070de316b09c..c1a36647c1197f925985a623ffe52336fd167c68 100644 (file)
@@ -20,6 +20,9 @@
 #define RT5514_BUFFER_VOICE_BASE       0x18000200
 #define RT5514_BUFFER_VOICE_LIMIT      0x18000204
 #define RT5514_BUFFER_VOICE_WP         0x1800020c
+#define RT5514_IRQ_CTRL                        0x18002094
+
+#define RT5514_IRQ_STATUS_BIT          (0x1 << 5)
 
 /* SPI Command */
 enum {
index 0945d212b8dca9d34d19b273d9075977075964e1..d7956ababd11775b0b9d04552faa68f2fadd2545 100644 (file)
@@ -338,39 +338,6 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
                                fw = NULL;
                        }
 
-                       if (rt5514->model_buf && rt5514->model_len) {
-#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
-                               int ret;
-
-                               ret = rt5514_spi_burst_write(0x4ff80000,
-                                       rt5514->model_buf,
-                                       ((rt5514->model_len / 8) + 1) * 8);
-                               if (ret) {
-                                       dev_err(codec->dev,
-                                               "Model load failed %d\n", ret);
-                                       return ret;
-                               }
-#else
-                               dev_err(codec->dev,
-                                       "No SPI driver for loading firmware\n");
-#endif
-                       } else {
-                               request_firmware(&fw, RT5514_FIRMWARE3,
-                                                codec->dev);
-                               if (fw) {
-#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
-                                       rt5514_spi_burst_write(0x4ff80000,
-                                               fw->data,
-                                               ((fw->size/8)+1)*8);
-#else
-                                       dev_err(codec->dev,
-                                               "No SPI driver to load fw\n");
-#endif
-                                       release_firmware(fw);
-                                       fw = NULL;
-                               }
-                       }
-
                        /* DSP run */
                        regmap_write(rt5514->i2c_regmap, 0x18002f00,
                                0x00055148);
@@ -385,34 +352,6 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
-static int rt5514_hotword_model_put(struct snd_kcontrol *kcontrol,
-               const unsigned int __user *bytes, unsigned int size)
-{
-       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
-       struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
-       struct snd_soc_codec *codec = rt5514->codec;
-       int ret = 0;
-
-       if (rt5514->model_buf || rt5514->model_len < size) {
-               if (rt5514->model_buf)
-                       devm_kfree(codec->dev, rt5514->model_buf);
-               rt5514->model_buf = devm_kmalloc(codec->dev, size, GFP_KERNEL);
-               if (!rt5514->model_buf) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
-       }
-
-       /* Skips the TLV header. */
-       bytes += 2;
-
-       if (copy_from_user(rt5514->model_buf, bytes, size))
-               ret = -EFAULT;
-done:
-       rt5514->model_len = (ret ? 0 : size);
-       return ret;
-}
-
 static const struct snd_kcontrol_new rt5514_snd_controls[] = {
        SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST,
                RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv),
@@ -424,8 +363,6 @@ static const struct snd_kcontrol_new rt5514_snd_controls[] = {
                adc_vol_tlv),
        SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0,
                rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put),
-       SND_SOC_BYTES_TLV("Hotword Model", 0x8504,
-               NULL, rt5514_hotword_model_put),
 };
 
 /* ADC Mixer*/
index 803311cb7e2a075aa405847a673b31cac66afc6a..2dc40e6d8b3f69835ded69ac71012cb765ac0a36 100644 (file)
 
 #define RT5514_FIRMWARE1       "rt5514_dsp_fw1.bin"
 #define RT5514_FIRMWARE2       "rt5514_dsp_fw2.bin"
-#define RT5514_FIRMWARE3       "rt5514_dsp_fw3.bin"
 
 /* System Clock Source */
 enum {
@@ -282,8 +281,6 @@ struct rt5514_priv {
        int pll_in;
        int pll_out;
        int dsp_enabled;
-       u8 *model_buf;
-       unsigned int model_len;
 };
 
 #endif /* __RT5514_H__ */
index c94e94fe8297877c75f929498628b5d5adf4b654..0e5f54a9bc7efdf1db73307030310b21472c862c 100644 (file)
@@ -98,7 +98,7 @@ static const struct reg_default rt5616_reg[] = {
        { 0x8e, 0x0004 },
        { 0x8f, 0x1100 },
        { 0x90, 0x0000 },
-       { 0x91, 0x0000 },
+       { 0x91, 0x0c00 },
        { 0x92, 0x0000 },
        { 0x93, 0x2000 },
        { 0x94, 0x0200 },
index 71216db15eab8811775275575b573754b85889b5..fa66b11df8d447d1a41af835d085d76afca2ba5e 100644 (file)
@@ -2744,7 +2744,8 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
                SND_SOC_DAPM_PRE_PMU),
        SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5659_hp_event,
                SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
-       SND_SOC_DAPM_PGA("LOUT Amp", SND_SOC_NOPM, 0, 0, NULL, 0),
+       SND_SOC_DAPM_PGA_S("LOUT Amp", 1,  RT5659_PWR_ANLG_1, RT5659_PWR_LM_BIT,
+               0,  NULL, 0),
 
        SND_SOC_DAPM_SUPPLY("Charge Pump", SND_SOC_NOPM, 0, 0,
                rt5659_charge_pump_event, SND_SOC_DAPM_PRE_PMU |
@@ -3208,6 +3209,7 @@ static const struct snd_soc_dapm_route rt5659_dapm_routes[] = {
        { "LOUT R MIX", "OUTVOL R Switch", "OUTVOL R" },
        { "LOUT Amp", NULL, "LOUT L MIX" },
        { "LOUT Amp", NULL, "LOUT R MIX" },
+       { "LOUT Amp", NULL, "Charge Pump" },
        { "LOUT Amp", NULL, "SYS CLK DET" },
        { "LOUT L Playback", "Switch", "LOUT Amp" },
        { "LOUT R Playback", "Switch", "LOUT Amp" },
index dd471d2c02665631535fb446dcd5fca40e522f8e..01a50413c66f6612762791b2d1d905b077eb87a2 100644 (file)
@@ -1301,7 +1301,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
                /* validate kcontrol */
                if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
                            SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
-                       return NULL;
+                       goto err;
 
                se = kzalloc(sizeof(*se), GFP_KERNEL);
                if (se == NULL)
@@ -1378,6 +1378,9 @@ err_se:
        for (; i >= 0; i--) {
                /* free values and texts */
                se = (struct soc_enum *)kc[i].private_value;
+               if (!se)
+                       continue;
+
                kfree(se->dobj.control.dvalues);
                for (j = 0; j < ec->items; j++)
                        kfree(se->dobj.control.dtexts[j]);
index 0fb6b1b7926170030661c6724e1be6260da6cfa6..d8409d9ae55b63fc86e7971f8b94b593bd681de6 100644 (file)
@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
 
        err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
        if (err)
-               return err;
+               goto err_kill_urb;
 
-       if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
-               return -ENODEV;
+       if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
+               err = -ENODEV;
+               goto err_kill_urb;
+       }
 
        usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
                   cdev->vendor_name, CAIAQ_USB_STR_LEN);
@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
 
        setup_card(cdev);
        return 0;
+
+ err_kill_urb:
+       usb_kill_urb(&cdev->ep1_in_urb);
+       return err;
 }
 
 static int snd_probe(struct usb_interface *intf,
index 3dc36d91355020e14ba0db0a1372a689ed66d84c..23d1d23aefec375c2c857f090f4920894e4bc7f8 100644 (file)
@@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        struct usb_interface_descriptor *altsd;
        void *control_header;
        int i, protocol;
+       int rest_bytes;
 
        /* find audiocontrol interface */
        host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
@@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
                return -EINVAL;
        }
 
+       rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
+               control_header;
+
+       /* just to be sure -- this shouldn't hit at all */
+       if (rest_bytes <= 0) {
+               dev_err(&dev->dev, "invalid control header\n");
+               return -EINVAL;
+       }
+
        switch (protocol) {
        default:
                dev_warn(&dev->dev,
@@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        case UAC_VERSION_1: {
                struct uac1_ac_header_descriptor *h1 = control_header;
 
+               if (rest_bytes < sizeof(*h1)) {
+                       dev_err(&dev->dev, "too short v1 buffer descriptor\n");
+                       return -EINVAL;
+               }
+
                if (!h1->bInCollection) {
                        dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
                        return -EINVAL;
                }
 
+               if (rest_bytes < h1->bLength) {
+                       dev_err(&dev->dev, "invalid buffer length (v1)\n");
+                       return -EINVAL;
+               }
+
                if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
                        dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
                        return -EINVAL;
index 0ff5a7d2e19fe1cc584a6420461841be26781b9b..c8f723c3a0336905d02afc8d33a51031985430e6 100644 (file)
@@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface,
        return 0;
 
  error:
-       if (line6->disconnect)
-               line6->disconnect(line6);
-       snd_card_free(card);
+       /* we can call disconnect callback here because no close-sync is
+        * needed yet at this point
+        */
+       line6_disconnect(interface);
        return ret;
 }
 EXPORT_SYMBOL_GPL(line6_probe);
index 956f847a96e45fdfbd171d81c525fd869acad1f8..451007c2774344be753e38fd6863b03eaa284955 100644 (file)
@@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6)
 
                intf = usb_ifnum_to_if(line6->usbdev,
                                        pod->line6.properties->ctrl_if);
-               usb_driver_release_interface(&podhd_driver, intf);
+               if (intf)
+                       usb_driver_release_interface(&podhd_driver, intf);
        }
 }
 
@@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6,
 
        line6->disconnect = podhd_disconnect;
 
+       init_timer(&pod->startup_timer);
+       INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
+
        if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
                /* claim the data interface */
                intf = usb_ifnum_to_if(line6->usbdev,
@@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6,
        }
 
        /* init device and delay registering */
-       init_timer(&pod->startup_timer);
-       INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
        podhd_startup(pod);
        return 0;
 }
index 9732edf77f860dbb668ea14206097914e2fcbdf5..91bc8f18791e46bc80adfb8503e310e2fdc7a298 100644 (file)
@@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
 
 static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
 {
+       /* kill pending URBs */
+       snd_usb_mixer_disconnect(mixer);
+
        kfree(mixer->id_elems);
        if (mixer->urb) {
                kfree(mixer->urb->transfer_buffer);
@@ -2584,8 +2587,13 @@ _error:
 
 void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
 {
-       usb_kill_urb(mixer->urb);
-       usb_kill_urb(mixer->rc_urb);
+       if (mixer->disconnected)
+               return;
+       if (mixer->urb)
+               usb_kill_urb(mixer->urb);
+       if (mixer->rc_urb)
+               usb_kill_urb(mixer->rc_urb);
+       mixer->disconnected = true;
 }
 
 #ifdef CONFIG_PM
index 2b4b067646ab099653fe7ea79d9af1570e2971f6..545d99b09706b37cee252e08b75e43ced50e93d3 100644 (file)
@@ -22,6 +22,8 @@ struct usb_mixer_interface {
        struct urb *rc_urb;
        struct usb_ctrlrequest *rc_setup_packet;
        u8 rc_buffer[6];
+
+       bool disconnected;
 };
 
 #define MAX_CHANNELS   16      /* max logical channels */
index 9135520782854233bb246f976d1044f7b8762aa4..4f5f18f22974ef8e2e7b95e4c0ea261e7a04a388 100644 (file)
@@ -1137,6 +1137,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
        case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+       case USB_ID(0x047F, 0xC022): /* Plantronics C310 */
+       case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */
+       case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
@@ -1351,6 +1354,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
        case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
        case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+       case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index 4dab490807009a96b4d43ba86dc53bcaf52b94e1..e229abd216526cae577aafd3dd2f2f3ed3abc5ba 100644 (file)
@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
        }
 
        pg = get_order(read_size);
-       sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+       sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+                                         __GFP_NOWARN, pg);
        if (!sk->s) {
                snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
                goto out;
@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
        pg = get_order(write_size);
 
        sk->write_page =
-               (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+               (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+                                        __GFP_NOWARN, pg);
        if (!sk->write_page) {
                snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
                usb_stream_free(sk);
index 69d09c39bbcd00858121ebf9f6bb1f7a57ff20e1..cd7359e23d869465d1cb70b637c64c85283e37ac 100644 (file)
@@ -88,6 +88,12 @@ struct kvm_s390_io_adapter_req {
 /* kvm attributes for KVM_S390_VM_TOD */
 #define KVM_S390_VM_TOD_LOW            0
 #define KVM_S390_VM_TOD_HIGH           1
+#define KVM_S390_VM_TOD_EXT            2
+
+struct kvm_s390_vm_tod_clock {
+       __u8  epoch_idx;
+       __u64 tod;
+};
 
 /* kvm attributes for KVM_S390_VM_CPU_MODEL */
 /* processor related attributes are r/w */
index 8ea315a11fe0d4461abe7aeea2738be97a79b189..2519c6c801c917d7a30b3826c86679edf18616d5 100644 (file)
 
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_SME                ( 7*32+10) /* AMD Secure Memory Encryption */
 
 #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
 #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
 #define X86_FEATURE_AVIC       (15*32+13) /* Virtual Interrupt Controller */
 #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
+#define X86_FEATURE_VGIF       (15*32+16) /* Virtual GIF */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
 #define X86_FEATURE_AVX512VBMI  (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
index 5dff775af7cd6456f7177d9ce5888ae78dc6bc10..c10c9128f54e6b7296014a74e7a253a1eedaacd9 100644 (file)
 # define DISABLE_K6_MTRR       (1<<(X86_FEATURE_K6_MTRR & 31))
 # define DISABLE_CYRIX_ARR     (1<<(X86_FEATURE_CYRIX_ARR & 31))
 # define DISABLE_CENTAUR_MCR   (1<<(X86_FEATURE_CENTAUR_MCR & 31))
+# define DISABLE_PCID          0
 #else
 # define DISABLE_VME           0
 # define DISABLE_K6_MTRR       0
 # define DISABLE_CYRIX_ARR     0
 # define DISABLE_CENTAUR_MCR   0
+# define DISABLE_PCID          (1<<(X86_FEATURE_PCID & 31))
 #endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -49,7 +51,7 @@
 #define DISABLED_MASK1 0
 #define DISABLED_MASK2 0
 #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4 0
+#define DISABLED_MASK4 (DISABLE_PCID)
 #define DISABLED_MASK5 0
 #define DISABLED_MASK6 0
 #define DISABLED_MASK7 0
diff --git a/tools/include/asm-generic/hugetlb_encode.h b/tools/include/asm-generic/hugetlb_encode.h
new file mode 100644 (file)
index 0000000..e4732d3
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef _ASM_GENERIC_HUGETLB_ENCODE_H_
+#define _ASM_GENERIC_HUGETLB_ENCODE_H_
+
+/*
+ * Several system calls take a flag to request "hugetlb" huge pages.
+ * Without further specification, these system calls will use the
+ * system's default huge page size.  If a system supports multiple
+ * huge page sizes, the desired huge page size can be specified in
+ * bits [26:31] of the flag arguments.  The value in these 6 bits
+ * will encode the log2 of the huge page size.
+ *
+ * The following definitions are associated with this huge page size
+ * encoding in flag arguments.  System call specific header files
+ * that use this encoding should include this file.  They can then
+ * provide definitions based on these with their own specific prefix.
+ * for example:
+ * #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
+ */
+
+#define HUGETLB_FLAG_ENCODE_SHIFT      26
+#define HUGETLB_FLAG_ENCODE_MASK       0x3f
+
+#define HUGETLB_FLAG_ENCODE_64KB       (16 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB      (19 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB                (20 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB                (21 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB                (23 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB       (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB      (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB                (30 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB                (31 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB       (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+
+#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
index 8c27db0c5c08ce84fc7299bd9bff2cee571b9397..203268f9231e155d72307995989feab4857defe7 100644 (file)
                                           overrides the coredump filter bits */
 #define MADV_DODUMP    17              /* Clear the MADV_DONTDUMP flag */
 
+#define MADV_WIPEONFORK 18             /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19             /* Undo MADV_WIPEONFORK */
+
 /* compatibility flags */
 #define MAP_FILE       0
 
-/*
- * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define MAP_HUGE_SHIFT 26
-#define MAP_HUGE_MASK  0x3f
-
 #define PKEY_DISABLE_ACCESS    0x1
 #define PKEY_DISABLE_WRITE     0x2
 #define PKEY_ACCESS_MASK       (PKEY_DISABLE_ACCESS |\
index 101593ab10ac795808db33848092eb62ac542560..97677cd6964db099689f96f11b9731c748bebcfa 100644 (file)
@@ -700,6 +700,7 @@ struct drm_prime_handle {
 
 struct drm_syncobj_create {
        __u32 handle;
+#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
        __u32 flags;
 };
 
@@ -718,6 +719,24 @@ struct drm_syncobj_handle {
        __u32 pad;
 };
 
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+struct drm_syncobj_wait {
+       __u64 handles;
+       /* absolute timeout */
+       __s64 timeout_nsec;
+       __u32 count_handles;
+       __u32 flags;
+       __u32 first_signaled; /* only valid when not waiting all */
+       __u32 pad;
+};
+
+struct drm_syncobj_array {
+       __u64 handles;
+       __u32 count_handles;
+       __u32 pad;
+};
+
 #if defined(__cplusplus)
 }
 #endif
@@ -840,6 +859,9 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_DESTROY      DRM_IOWR(0xC0, struct drm_syncobj_destroy)
 #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
 #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
+#define DRM_IOCTL_SYNCOBJ_WAIT         DRM_IOWR(0xC3, struct drm_syncobj_wait)
+#define DRM_IOCTL_SYNCOBJ_RESET                DRM_IOWR(0xC4, struct drm_syncobj_array)
+#define DRM_IOCTL_SYNCOBJ_SIGNAL       DRM_IOWR(0xC5, struct drm_syncobj_array)
 
 /**
  * Device specific ioctls should only be in their respective headers
index 7ccbd6a2bbe07b387b43bb8b4ac5eba0f52d1cd0..6598fb76d2c27741d2c916f914b70c5f472911f9 100644 (file)
@@ -260,6 +260,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_GEM_CONTEXT_GETPARAM  0x34
 #define DRM_I915_GEM_CONTEXT_SETPARAM  0x35
 #define DRM_I915_PERF_OPEN             0x36
+#define DRM_I915_PERF_ADD_CONFIG       0x37
+#define DRM_I915_PERF_REMOVE_CONFIG    0x38
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +317,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM    DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM    DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
 #define DRM_IOCTL_I915_PERF_OPEN       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
+#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
+#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG      DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -431,6 +435,11 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_HAS_EXEC_BATCH_FIRST         48
 
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
+
 typedef struct drm_i915_getparam {
        __s32 param;
        /*
@@ -812,6 +821,17 @@ struct drm_i915_gem_exec_object2 {
        __u64 rsvd2;
 };
 
+struct drm_i915_gem_exec_fence {
+       /**
+        * User's handle for a drm_syncobj to wait on or signal.
+        */
+       __u32 handle;
+
+#define I915_EXEC_FENCE_WAIT            (1<<0)
+#define I915_EXEC_FENCE_SIGNAL          (1<<1)
+       __u32 flags;
+};
+
 struct drm_i915_gem_execbuffer2 {
        /**
         * List of gem_exec_object2 structs
@@ -826,7 +846,11 @@ struct drm_i915_gem_execbuffer2 {
        __u32 DR1;
        __u32 DR4;
        __u32 num_cliprects;
-       /** This is a struct drm_clip_rect *cliprects */
+       /**
+        * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
+        * is not set.  If I915_EXEC_FENCE_ARRAY is set, then this is a
+        * struct drm_i915_gem_exec_fence *fences.
+        */
        __u64 cliprects_ptr;
 #define I915_EXEC_RING_MASK              (7<<0)
 #define I915_EXEC_DEFAULT                (0<<0)
@@ -927,7 +951,14 @@ struct drm_i915_gem_execbuffer2 {
  * element).
  */
 #define I915_EXEC_BATCH_FIRST          (1<<18)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1))
+
+/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
+ * define an array of i915_gem_exec_fence structures which specify a set of
+ * dma fences to wait upon or signal.
+ */
+#define I915_EXEC_FENCE_ARRAY   (1<<19)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
 
 #define I915_EXEC_CONTEXT_ID_MASK      (0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1467,6 +1498,22 @@ enum drm_i915_perf_record_type {
        DRM_I915_PERF_RECORD_MAX /* non-ABI */
 };
 
+/**
+ * Structure to upload perf dynamic configuration into the kernel.
+ */
+struct drm_i915_perf_oa_config {
+       /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+       char uuid[36];
+
+       __u32 n_mux_regs;
+       __u32 n_boolean_regs;
+       __u32 n_flex_regs;
+
+       __u64 __user mux_regs_ptr;
+       __u64 __user boolean_regs_ptr;
+       __u64 __user flex_regs_ptr;
+};
+
 #if defined(__cplusplus)
 }
 #endif
index 461811e5714092f17619902593b9ac47e8572a6d..c174971afbe657d2523503b94a3a2c960a6cf9e1 100644 (file)
@@ -143,12 +143,6 @@ enum bpf_attach_type {
 
 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
 
-enum bpf_sockmap_flags {
-       BPF_SOCKMAP_UNSPEC,
-       BPF_SOCKMAP_STRPARSER,
-       __MAX_BPF_SOCKMAP_FLAG
-};
-
 /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
  * to the given target_fd cgroup the descendent cgroup will be able to
  * override effective bpf program that was inherited from this cgroup
@@ -318,7 +312,7 @@ union bpf_attr {
  *     jump into another BPF program
  *     @ctx: context pointer passed to next program
  *     @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
- *     @index: index inside array that selects specific program to run
+ *     @index: 32-bit index inside array that selects specific program to run
  *     Return: 0 on success or negative error
  *
  * int bpf_clone_redirect(skb, ifindex, flags)
@@ -368,9 +362,20 @@ union bpf_attr {
  * int bpf_redirect(ifindex, flags)
  *     redirect to another netdev
  *     @ifindex: ifindex of the net device
- *     @flags: bit 0 - if set, redirect to ingress instead of egress
- *             other bits - reserved
- *     Return: TC_ACT_REDIRECT
+ *     @flags:
+ *       cls_bpf:
+ *          bit 0 - if set, redirect to ingress instead of egress
+ *          other bits - reserved
+ *       xdp_bpf:
+ *         all bits - reserved
+ *     Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
+ *            xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
+ * int bpf_redirect_map(map, key, flags)
+ *     redirect to endpoint in map
+ *     @map: pointer to dev map
+ *     @key: index in map to lookup
+ *     @flags: --
+ *     Return: XDP_REDIRECT on success or XDP_ABORT on error
  *
  * u32 bpf_get_route_realm(skb)
  *     retrieve a dst's tclassid
@@ -564,9 +569,10 @@ union bpf_attr {
  *     @flags: reserved for future use
  *     Return: 0 on success or negative error code
  *
- * int bpf_sk_redirect_map(map, key, flags)
+ * int bpf_sk_redirect_map(skb, map, key, flags)
  *     Redirect skb to a sock in map using key as a lookup key for the
  *     sock in map.
+ *     @skb: pointer to skb
  *     @map: pointer to sockmap
  *     @key: key to lookup sock in map
  *     @flags: reserved for future use
@@ -632,7 +638,7 @@ union bpf_attr {
        FN(skb_adjust_room),            \
        FN(redirect_map),               \
        FN(sk_redirect_map),            \
-       FN(sock_map_update),
+       FN(sock_map_update),            \
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -753,20 +759,23 @@ struct bpf_sock {
        __u32 family;
        __u32 type;
        __u32 protocol;
+       __u32 mark;
+       __u32 priority;
 };
 
 #define XDP_PACKET_HEADROOM 256
 
 /* User return codes for XDP prog type.
  * A valid XDP program must return one of these defined values. All other
- * return codes are reserved for future use. Unknown return codes will result
- * in packet drop.
+ * return codes are reserved for future use. Unknown return codes will
+ * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
  */
 enum xdp_action {
        XDP_ABORTED = 0,
        XDP_DROP,
        XDP_PASS,
        XDP_TX,
+       XDP_REDIRECT,
 };
 
 /* user accessible metadata for XDP packet hook
@@ -778,8 +787,8 @@ struct xdp_md {
 };
 
 enum sk_action {
-       SK_ABORTED = 0,
-       SK_DROP,
+       SK_DROP = 0,
+       SK_PASS,
        SK_REDIRECT,
 };
 
index 6cd63c18708ae1d23dbc280ed49aed55f817a2f5..83888758741184f969f3b8fd1738e38bdc24da78 100644 (file)
@@ -711,7 +711,8 @@ struct kvm_ppc_one_seg_page_size {
 struct kvm_ppc_smmu_info {
        __u64 flags;
        __u32 slb_size;
-       __u32 pad;
+       __u16 data_keys;        /* # storage keys supported for data */
+       __u16 instr_keys;       /* # storage keys supported for instructions */
        struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
 };
 
index 81d8edf11789aaae96a25da00b8a8f3ec0ed83aa..a937480d7cd345f4e1b12c2e409f58c0489e8a4e 100644 (file)
@@ -1,7 +1,8 @@
 #ifndef _UAPI_LINUX_MMAN_H
 #define _UAPI_LINUX_MMAN_H
 
-#include <uapi/asm/mman.h>
+#include <asm/mman.h>
+#include <asm-generic/hugetlb_encode.h>
 
 #define MREMAP_MAYMOVE 1
 #define MREMAP_FIXED   2
 #define OVERCOMMIT_ALWAYS              1
 #define OVERCOMMIT_NEVER               2
 
+/*
+ * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
+ * size other than the default is desired.  See hugetlb_encode.h.
+ * All known huge page size encodings are provided here.  It is the
+ * responsibility of the application to know which sizes are supported on
+ * the running system.  See mmap(2) man page for details.
+ */
+#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
+#define MAP_HUGE_MASK  HUGETLB_FLAG_ENCODE_MASK
+
+#define MAP_HUGE_64KB  HUGETLB_FLAG_ENCODE_64KB
+#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
+#define MAP_HUGE_1MB   HUGETLB_FLAG_ENCODE_1MB
+#define MAP_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
+#define MAP_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
+#define MAP_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
+#define MAP_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
+#define MAP_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
+
 #endif /* _UAPI_LINUX_MMAN_H */
index 6a1af43862df759239b8848921b0492e23b95e6b..3995735a878fe796f36513b2bb22b20deaf642d1 100644 (file)
@@ -194,10 +194,10 @@ they mean, and suggestions for how to fix them.
    If it's a GCC-compiled .c file, the error may be because the function
    uses an inline asm() statement which has a "call" instruction.  An
    asm() statement with a call instruction must declare the use of the
-   stack pointer in its output operand.  For example, on x86_64:
+   stack pointer in its output operand.  On x86_64, this means adding
+   the ASM_CALL_CONSTRAINT as an output constraint:
 
-     register void *__sp asm("rsp");
-     asm volatile("call func" : "+r" (__sp));
+     asm volatile("call func" : ASM_CALL_CONSTRAINT);
 
    Otherwise the stack frame may not get created before the call.
 
index 0e8c8ec4fd4e6ac1d101b2514d43d870762c293b..34a579f806e390337bdee738ae507364c02e7ad7 100644 (file)
@@ -208,14 +208,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                break;
 
        case 0x89:
-               if (rex == 0x48 && modrm == 0xe5) {
+               if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) {
 
-                       /* mov %rsp, %rbp */
+                       /* mov %rsp, reg */
                        *type = INSN_STACK;
                        op->src.type = OP_SRC_REG;
                        op->src.reg = CFI_SP;
                        op->dest.type = OP_DEST_REG;
-                       op->dest.reg = CFI_BP;
+                       op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
                        break;
                }
 
@@ -284,11 +284,16 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
        case 0x8d:
                if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
 
-                       /* lea disp(%rsp), reg */
                        *type = INSN_STACK;
-                       op->src.type = OP_SRC_ADD;
+                       if (!insn.displacement.value) {
+                               /* lea (%rsp), reg */
+                               op->src.type = OP_SRC_REG;
+                       } else {
+                               /* lea disp(%rsp), reg */
+                               op->src.type = OP_SRC_ADD;
+                               op->src.offset = insn.displacement.value;
+                       }
                        op->src.reg = CFI_SP;
-                       op->src.offset = insn.displacement.value;
                        op->dest.type = OP_DEST_REG;
                        op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
 
index f744617c9946d7eaaa293b3aba5b63dddf19278b..c0e26ad1fa7e3dbd212b3a4fd02fbcc4d8afc74c 100644 (file)
@@ -267,12 +267,13 @@ static int decode_instructions(struct objtool_file *file)
                                                      &insn->immediate,
                                                      &insn->stack_op);
                        if (ret)
-                               return ret;
+                               goto err;
 
                        if (!insn->type || insn->type > INSN_LAST) {
                                WARN_FUNC("invalid instruction type %d",
                                          insn->sec, insn->offset, insn->type);
-                               return -1;
+                               ret = -1;
+                               goto err;
                        }
 
                        hash_add(file->insn_hash, &insn->hash, insn->offset);
@@ -296,6 +297,10 @@ static int decode_instructions(struct objtool_file *file)
        }
 
        return 0;
+
+err:
+       free(insn);
+       return ret;
 }
 
 /*
@@ -1203,24 +1208,39 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
                switch (op->src.type) {
 
                case OP_SRC_REG:
-                       if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP) {
+                       if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
+                           cfa->base == CFI_SP &&
+                           regs[CFI_BP].base == CFI_CFA &&
+                           regs[CFI_BP].offset == -cfa->offset) {
+
+                               /* mov %rsp, %rbp */
+                               cfa->base = op->dest.reg;
+                               state->bp_scratch = false;
+                       }
 
-                               if (cfa->base == CFI_SP &&
-                                   regs[CFI_BP].base == CFI_CFA &&
-                                   regs[CFI_BP].offset == -cfa->offset) {
+                       else if (op->src.reg == CFI_SP &&
+                                op->dest.reg == CFI_BP && state->drap) {
 
-                                       /* mov %rsp, %rbp */
-                                       cfa->base = op->dest.reg;
-                                       state->bp_scratch = false;
-                               }
+                               /* drap: mov %rsp, %rbp */
+                               regs[CFI_BP].base = CFI_BP;
+                               regs[CFI_BP].offset = -state->stack_size;
+                               state->bp_scratch = false;
+                       }
 
-                               else if (state->drap) {
+                       else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
 
-                                       /* drap: mov %rsp, %rbp */
-                                       regs[CFI_BP].base = CFI_BP;
-                                       regs[CFI_BP].offset = -state->stack_size;
-                                       state->bp_scratch = false;
-                               }
+                               /*
+                                * mov %rsp, %reg
+                                *
+                                * This is needed for the rare case where GCC
+                                * does:
+                                *
+                                *   mov    %rsp, %rax
+                                *   ...
+                                *   mov    %rax, %rsp
+                                */
+                               state->vals[op->dest.reg].base = CFI_CFA;
+                               state->vals[op->dest.reg].offset = -state->stack_size;
                        }
 
                        else if (op->dest.reg == cfa->base) {
index 6e9f980a7d26fdc4384fdb15d96baaca09846117..24460155c82c9b2305a2484d037be9017a00becb 100644 (file)
@@ -175,19 +175,20 @@ static int read_sections(struct elf *elf)
                        return -1;
                }
 
-               sec->data = elf_getdata(s, NULL);
-               if (!sec->data) {
-                       WARN_ELF("elf_getdata");
-                       return -1;
-               }
-
-               if (sec->data->d_off != 0 ||
-                   sec->data->d_size != sec->sh.sh_size) {
-                       WARN("unexpected data attributes for %s", sec->name);
-                       return -1;
+               if (sec->sh.sh_size != 0) {
+                       sec->data = elf_getdata(s, NULL);
+                       if (!sec->data) {
+                               WARN_ELF("elf_getdata");
+                               return -1;
+                       }
+                       if (sec->data->d_off != 0 ||
+                           sec->data->d_size != sec->sh.sh_size) {
+                               WARN("unexpected data attributes for %s",
+                                    sec->name);
+                               return -1;
+                       }
                }
-
-               sec->len = sec->data->d_size;
+               sec->len = sec->sh.sh_size;
        }
 
        /* sanity check, one more call to elf_nextscn() should return NULL */
@@ -508,6 +509,7 @@ struct section *elf_create_rela_section(struct elf *elf, struct section *base)
        strcat(relaname, base->name);
 
        sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0);
+       free(relaname);
        if (!sec)
                return NULL;
 
@@ -561,6 +563,7 @@ int elf_write(struct elf *elf)
        struct section *sec;
        Elf_Scn *s;
 
+       /* Update section headers for changed sections: */
        list_for_each_entry(sec, &elf->sections, list) {
                if (sec->changed) {
                        s = elf_getscn(elf->elf, sec->idx);
@@ -568,13 +571,17 @@ int elf_write(struct elf *elf)
                                WARN_ELF("elf_getscn");
                                return -1;
                        }
-                       if (!gelf_update_shdr (s, &sec->sh)) {
+                       if (!gelf_update_shdr(s, &sec->sh)) {
                                WARN_ELF("gelf_update_shdr");
                                return -1;
                        }
                }
        }
 
+       /* Make sure the new section header entries get updated properly. */
+       elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY);
+
+       /* Write all changes to the file. */
        if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
                WARN_ELF("elf_update");
                return -1;
index e397453e5a465513af8d84103fd3feccb510d793..63526f4416ea4fb81292fafcfe6fdd55d9955639 100644 (file)
@@ -8,8 +8,8 @@ perf-record - Run a command and record its profile into perf.data
 SYNOPSIS
 --------
 [verse]
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+'perf record' [-e <EVENT> | --event=EVENT] [-a] <command>
+'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
 
 DESCRIPTION
 -----------
index 62072822dc85d986671b6b15587229f81cfe19ec..627b7cada1442b65dbbcc600661f35caf8429cdc 100644 (file)
@@ -1,34 +1,8 @@
 tools/perf
-tools/arch/alpha/include/asm/barrier.h
-tools/arch/arm/include/asm/barrier.h
-tools/arch/arm64/include/asm/barrier.h
-tools/arch/ia64/include/asm/barrier.h
-tools/arch/mips/include/asm/barrier.h
-tools/arch/powerpc/include/asm/barrier.h
-tools/arch/s390/include/asm/barrier.h
-tools/arch/sh/include/asm/barrier.h
-tools/arch/sparc/include/asm/barrier.h
-tools/arch/sparc/include/asm/barrier_32.h
-tools/arch/sparc/include/asm/barrier_64.h
-tools/arch/tile/include/asm/barrier.h
-tools/arch/x86/include/asm/barrier.h
-tools/arch/x86/include/asm/cmpxchg.h
-tools/arch/x86/include/asm/cpufeatures.h
-tools/arch/x86/include/asm/disabled-features.h
-tools/arch/x86/include/asm/required-features.h
-tools/arch/x86/include/uapi/asm/svm.h
-tools/arch/x86/include/uapi/asm/vmx.h
-tools/arch/x86/include/uapi/asm/kvm.h
-tools/arch/x86/include/uapi/asm/kvm_perf.h
-tools/arch/x86/lib/memcpy_64.S
-tools/arch/x86/lib/memset_64.S
-tools/arch/s390/include/uapi/asm/kvm_perf.h
-tools/arch/s390/include/uapi/asm/sie.h
-tools/arch/xtensa/include/asm/barrier.h
+tools/arch
 tools/scripts
 tools/build
-tools/arch/x86/include/asm/atomic.h
-tools/arch/x86/include/asm/rmwcc.h
+tools/include
 tools/lib/traceevent
 tools/lib/api
 tools/lib/bpf
@@ -42,60 +16,3 @@ tools/lib/find_bit.c
 tools/lib/bitmap.c
 tools/lib/str_error_r.c
 tools/lib/vsprintf.c
-tools/include/asm/alternative-asm.h
-tools/include/asm/atomic.h
-tools/include/asm/barrier.h
-tools/include/asm/bug.h
-tools/include/asm-generic/atomic-gcc.h
-tools/include/asm-generic/barrier.h
-tools/include/asm-generic/bitops/arch_hweight.h
-tools/include/asm-generic/bitops/atomic.h
-tools/include/asm-generic/bitops/const_hweight.h
-tools/include/asm-generic/bitops/__ffs.h
-tools/include/asm-generic/bitops/__ffz.h
-tools/include/asm-generic/bitops/__fls.h
-tools/include/asm-generic/bitops/find.h
-tools/include/asm-generic/bitops/fls64.h
-tools/include/asm-generic/bitops/fls.h
-tools/include/asm-generic/bitops/hweight.h
-tools/include/asm-generic/bitops.h
-tools/include/linux/atomic.h
-tools/include/linux/bitops.h
-tools/include/linux/compiler.h
-tools/include/linux/compiler-gcc.h
-tools/include/linux/coresight-pmu.h
-tools/include/linux/bug.h
-tools/include/linux/filter.h
-tools/include/linux/hash.h
-tools/include/linux/kernel.h
-tools/include/linux/list.h
-tools/include/linux/log2.h
-tools/include/uapi/asm-generic/fcntl.h
-tools/include/uapi/asm-generic/ioctls.h
-tools/include/uapi/asm-generic/mman-common.h
-tools/include/uapi/asm-generic/mman.h
-tools/include/uapi/drm/drm.h
-tools/include/uapi/drm/i915_drm.h
-tools/include/uapi/linux/bpf.h
-tools/include/uapi/linux/bpf_common.h
-tools/include/uapi/linux/fcntl.h
-tools/include/uapi/linux/hw_breakpoint.h
-tools/include/uapi/linux/kvm.h
-tools/include/uapi/linux/mman.h
-tools/include/uapi/linux/perf_event.h
-tools/include/uapi/linux/sched.h
-tools/include/uapi/linux/stat.h
-tools/include/uapi/linux/vhost.h
-tools/include/uapi/sound/asound.h
-tools/include/linux/poison.h
-tools/include/linux/rbtree.h
-tools/include/linux/rbtree_augmented.h
-tools/include/linux/refcount.h
-tools/include/linux/string.h
-tools/include/linux/stringify.h
-tools/include/linux/types.h
-tools/include/linux/err.h
-tools/include/linux/bitmap.h
-tools/include/linux/time64.h
-tools/arch/*/include/uapi/asm/mman.h
-tools/arch/*/include/uapi/asm/perf_regs.h
index bd518b623d7a203e576653705866f910409fa144..5bd7b9260cc0858c36730ee367aecc56df6c91bb 100644 (file)
@@ -1,5 +1,4 @@
 libperf-y += header.o
-libperf-y += sym-handling.o
 libperf-y += kvm-stat.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/s390/util/sym-handling.c b/tools/perf/arch/s390/util/sym-handling.c
deleted file mode 100644 (file)
index e103f6e..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Architecture specific ELF symbol handling and relocation mapping.
- *
- * Copyright 2017 IBM Corp.
- * Author(s): Thomas Richter <tmricht@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- */
-
-#include "symbol.h"
-
-#ifdef HAVE_LIBELF_SUPPORT
-bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
-{
-       if (ehdr.e_type == ET_EXEC)
-               return false;
-       return ehdr.e_type == ET_REL || ehdr.e_type == ET_DYN;
-}
-
-void arch__adjust_sym_map_offset(GElf_Sym *sym,
-                                GElf_Shdr *shdr __maybe_unused,
-                                struct map *map)
-{
-       if (map->type == MAP__FUNCTION)
-               sym->st_value += map->start;
-}
-#endif
index 3d4c3b5e186832d82336ae346865f697564253c2..0c977b6e0f8b32856be122400462f673f8d54a3c 100644 (file)
@@ -586,7 +586,7 @@ static void print_sample_brstack(struct perf_sample *sample,
                        thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
                }
 
-               printf("0x%"PRIx64, from);
+               printf(" 0x%"PRIx64, from);
                if (PRINT_FIELD(DSO)) {
                        printf("(");
                        map__fprintf_dsoname(alf.map, stdout);
@@ -681,7 +681,7 @@ static void print_sample_brstackoff(struct perf_sample *sample,
                if (alt.map && !alt.map->dso->adjust_symbols)
                        to = map__map_ip(alt.map, to);
 
-               printf("0x%"PRIx64, from);
+               printf(" 0x%"PRIx64, from);
                if (PRINT_FIELD(DSO)) {
                        printf("(");
                        map__fprintf_dsoname(alf.map, stdout);
index 462fc755092e0b95cdf9ffd2ecf2317f4a4cf5df..7a84d73324e3c1209781296dcc54260745150b53 100755 (executable)
@@ -10,6 +10,9 @@
 
 . $(dirname $0)/lib/probe.sh
 
+ld=$(realpath /lib64/ld*.so.* | uniq)
+libc=$(echo $ld | sed 's/ld/libc/g')
+
 trace_libc_inet_pton_backtrace() {
        idx=0
        expected[0]="PING.*bytes"
@@ -18,8 +21,8 @@ trace_libc_inet_pton_backtrace() {
        expected[3]=".*packets transmitted.*"
        expected[4]="rtt min.*"
        expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
-       expected[6]=".*inet_pton[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
-       expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
+       expected[6]=".*inet_pton[[:space:]]\($libc\)$"
+       expected[7]="getaddrinfo[[:space:]]\($libc\)$"
        expected[8]=".*\(.*/bin/ping.*\)$"
 
        perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
@@ -35,7 +38,7 @@ trace_libc_inet_pton_backtrace() {
 }
 
 skip_if_no_perf_probe && \
-perf probe -q /lib64/libc-*.so inet_pton && \
+perf probe -q $libc inet_pton && \
 trace_libc_inet_pton_backtrace
 err=$?
 rm -f ${file}
index ddb2c6fbdf919e8124ffb40eee47c801b43bd6c6..db79017a6e56fc1b836fbbeb178e9eb7c57a7a89 100644 (file)
@@ -532,7 +532,7 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
 
 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
 {
-       list_del(&format->list);
+       list_del_init(&format->list);
 }
 
 void perf_hpp__cancel_cumulate(void)
@@ -606,6 +606,13 @@ next:
 
 static void fmt_free(struct perf_hpp_fmt *fmt)
 {
+       /*
+        * At this point fmt should be completely
+        * unhooked, if not it's a bug.
+        */
+       BUG_ON(!list_empty(&fmt->list));
+       BUG_ON(!list_empty(&fmt->sort_list));
+
        if (fmt->free)
                fmt->free(fmt);
 }
index 510b513e0f01fe96e110b9eab45db527b8c86104..a971caf3759dc2fc9645856c9296c3ad0aee1445 100644 (file)
@@ -65,8 +65,6 @@ static int parse_callchain_mode(const char *value)
                callchain_param.mode = CHAIN_FOLDED;
                return 0;
        }
-
-       pr_err("Invalid callchain mode: %s\n", value);
        return -1;
 }
 
@@ -82,8 +80,6 @@ static int parse_callchain_order(const char *value)
                callchain_param.order_set = true;
                return 0;
        }
-
-       pr_err("Invalid callchain order: %s\n", value);
        return -1;
 }
 
@@ -105,8 +101,6 @@ static int parse_callchain_sort_key(const char *value)
                callchain_param.branch_callstack = 1;
                return 0;
        }
-
-       pr_err("Invalid callchain sort key: %s\n", value);
        return -1;
 }
 
@@ -124,8 +118,6 @@ static int parse_callchain_value(const char *value)
                callchain_param.value = CCVAL_COUNT;
                return 0;
        }
-
-       pr_err("Invalid callchain config key: %s\n", value);
        return -1;
 }
 
@@ -319,12 +311,27 @@ int perf_callchain_config(const char *var, const char *value)
 
                return ret;
        }
-       if (!strcmp(var, "print-type"))
-               return parse_callchain_mode(value);
-       if (!strcmp(var, "order"))
-               return parse_callchain_order(value);
-       if (!strcmp(var, "sort-key"))
-               return parse_callchain_sort_key(value);
+       if (!strcmp(var, "print-type")){
+               int ret;
+               ret = parse_callchain_mode(value);
+               if (ret == -1)
+                       pr_err("Invalid callchain mode: %s\n", value);
+               return ret;
+       }
+       if (!strcmp(var, "order")){
+               int ret;
+               ret = parse_callchain_order(value);
+               if (ret == -1)
+                       pr_err("Invalid callchain order: %s\n", value);
+               return ret;
+       }
+       if (!strcmp(var, "sort-key")){
+               int ret;
+               ret = parse_callchain_sort_key(value);
+               if (ret == -1)
+                       pr_err("Invalid callchain sort key: %s\n", value);
+               return ret;
+       }
        if (!strcmp(var, "threshold")) {
                callchain_param.min_percent = strtod(value, &endptr);
                if (value == endptr) {
@@ -678,6 +685,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
 {
        struct symbol *sym = node->sym;
        u64 left, right;
+       struct dso *left_dso = NULL;
+       struct dso *right_dso = NULL;
 
        if (callchain_param.key == CCKEY_SRCLINE) {
                enum match_result match = match_chain_srcline(node, cnode);
@@ -689,12 +698,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
        if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) {
                left = cnode->ms.sym->start;
                right = sym->start;
+               left_dso = cnode->ms.map->dso;
+               right_dso = node->map->dso;
        } else {
                left = cnode->ip;
                right = node->ip;
        }
 
-       if (left == right) {
+       if (left == right && left_dso == right_dso) {
                if (node->branch) {
                        cnode->branch_count++;
 
index 4bb89373eb52893e7a3c7206da85af1e72afdfc3..0dccdb89572cdb455724a6a48a86d96821fc53e2 100644 (file)
@@ -271,12 +271,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
        return evsel;
 }
 
+static bool perf_event_can_profile_kernel(void)
+{
+       return geteuid() == 0 || perf_event_paranoid() == -1;
+}
+
 struct perf_evsel *perf_evsel__new_cycles(bool precise)
 {
        struct perf_event_attr attr = {
                .type   = PERF_TYPE_HARDWARE,
                .config = PERF_COUNT_HW_CPU_CYCLES,
-               .exclude_kernel = geteuid() != 0,
+               .exclude_kernel = !perf_event_can_profile_kernel(),
        };
        struct perf_evsel *evsel;
 
index f6257fb4f08ceedde23f790b1aae9255f9565d13..39b15968eab1d4e32745317c18e53dfcef84322d 100644 (file)
@@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms)
 static struct perf_evsel *
 __add_event(struct list_head *list, int *idx,
            struct perf_event_attr *attr,
-           char *name, struct cpu_map *cpus,
+           char *name, struct perf_pmu *pmu,
            struct list_head *config_terms, bool auto_merge_stats)
 {
        struct perf_evsel *evsel;
+       struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
 
        event_attr_init(attr);
 
@@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx,
        (*idx)++;
        evsel->cpus        = cpu_map__get(cpus);
        evsel->own_cpus    = cpu_map__get(cpus);
-       evsel->system_wide = !!cpus;
+       evsel->system_wide = pmu ? pmu->is_uncore : false;
        evsel->auto_merge_stats = auto_merge_stats;
 
        if (name)
@@ -1233,7 +1234,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
 
        if (!head_config) {
                attr.type = pmu->type;
-               evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu->cpus, NULL, auto_merge_stats);
+               evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
                return evsel ? 0 : -ENOMEM;
        }
 
@@ -1254,7 +1255,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
                return -EINVAL;
 
        evsel = __add_event(list, &parse_state->idx, &attr,
-                           get_config_name(head_config), pmu->cpus,
+                           get_config_name(head_config), pmu,
                            &config_terms, auto_merge_stats);
        if (evsel) {
                evsel->unit = info.unit;
index c42edeac451fc809c2581f492cdc2e1741774bf7..dcfdafdc2f1c2f91b0090fc1f573fbdafde236de 100644 (file)
@@ -8,6 +8,9 @@
 
 %{
 #include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
 #include "../perf.h"
 #include "parse-events.h"
 #include "parse-events-bison.h"
@@ -53,9 +56,8 @@ static int str(yyscan_t scanner, int token)
        return token;
 }
 
-static bool isbpf(yyscan_t scanner)
+static bool isbpf_suffix(char *text)
 {
-       char *text = parse_events_get_text(scanner);
        int len = strlen(text);
 
        if (len < 2)
@@ -68,6 +70,17 @@ static bool isbpf(yyscan_t scanner)
        return false;
 }
 
+static bool isbpf(yyscan_t scanner)
+{
+       char *text = parse_events_get_text(scanner);
+       struct stat st;
+
+       if (!isbpf_suffix(text))
+               return false;
+
+       return stat(text, &st) == 0;
+}
+
 /*
  * This function is called when the parser gets two kind of input:
  *
index ac16a9db1fb566ca14d272e33bc34e5166ea2b81..1c4d7b4e4fb5725e9b952ef3b0fc69c5a63c2f26 100644 (file)
@@ -470,17 +470,36 @@ static void pmu_read_sysfs(void)
        closedir(dir);
 }
 
+static struct cpu_map *__pmu_cpumask(const char *path)
+{
+       FILE *file;
+       struct cpu_map *cpus;
+
+       file = fopen(path, "r");
+       if (!file)
+               return NULL;
+
+       cpus = cpu_map__read(file);
+       fclose(file);
+       return cpus;
+}
+
+/*
+ * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
+ * may have a "cpus" file.
+ */
+#define CPUS_TEMPLATE_UNCORE   "%s/bus/event_source/devices/%s/cpumask"
+#define CPUS_TEMPLATE_CPU      "%s/bus/event_source/devices/%s/cpus"
+
 static struct cpu_map *pmu_cpumask(const char *name)
 {
-       struct stat st;
        char path[PATH_MAX];
-       FILE *file;
        struct cpu_map *cpus;
        const char *sysfs = sysfs__mountpoint();
        const char *templates[] = {
-                "%s/bus/event_source/devices/%s/cpumask",
-                "%s/bus/event_source/devices/%s/cpus",
-                NULL
+               CPUS_TEMPLATE_UNCORE,
+               CPUS_TEMPLATE_CPU,
+               NULL
        };
        const char **template;
 
@@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name)
 
        for (template = templates; *template; template++) {
                snprintf(path, PATH_MAX, *template, sysfs, name);
-               if (stat(path, &st) == 0)
-                       break;
+               cpus = __pmu_cpumask(path);
+               if (cpus)
+                       return cpus;
        }
 
-       if (!*template)
-               return NULL;
+       return NULL;
+}
 
-       file = fopen(path, "r");
-       if (!file)
-               return NULL;
+static bool pmu_is_uncore(const char *name)
+{
+       char path[PATH_MAX];
+       struct cpu_map *cpus;
+       const char *sysfs = sysfs__mountpoint();
 
-       cpus = cpu_map__read(file);
-       fclose(file);
-       return cpus;
+       snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
+       cpus = __pmu_cpumask(path);
+       cpu_map__put(cpus);
+
+       return !!cpus;
 }
 
 /*
@@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name)
 
        pmu->cpus = pmu_cpumask(name);
 
+       pmu->is_uncore = pmu_is_uncore(name);
+
        INIT_LIST_HEAD(&pmu->format);
        INIT_LIST_HEAD(&pmu->aliases);
        list_splice(&format, &pmu->format);
index 389e9729331f45c464e74b2a20538aabb6f0bb6d..fe0de0502ce26b66fc2caa94a99ae1325807ab50 100644 (file)
@@ -22,6 +22,7 @@ struct perf_pmu {
        char *name;
        __u32 type;
        bool selectable;
+       bool is_uncore;
        struct perf_event_attr *default_config;
        struct cpu_map *cpus;
        struct list_head format;  /* HEAD struct perf_pmu_format -> list */
index a7ebd9fe8e40ee56f79a108c247d528571d8ff64..76ab0709a20cbd2e6fd9b4d566188b37329d4163 100644 (file)
@@ -374,6 +374,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                tool->mmap2 = process_event_stub;
        if (tool->comm == NULL)
                tool->comm = process_event_stub;
+       if (tool->namespaces == NULL)
+               tool->namespaces = process_event_stub;
        if (tool->fork == NULL)
                tool->fork = process_event_stub;
        if (tool->exit == NULL)
index 5c39f420111e5f679fa4a486383174d9c10d68b5..9cf781f0d8a2d797c88bf0bef5cab8759a6cbf96 100644 (file)
@@ -810,12 +810,6 @@ static u64 ref_reloc(struct kmap *kmap)
 void __weak arch__sym_update(struct symbol *s __maybe_unused,
                GElf_Sym *sym __maybe_unused) { }
 
-void __weak arch__adjust_sym_map_offset(GElf_Sym *sym, GElf_Shdr *shdr,
-                                      struct map *map __maybe_unused)
-{
-       sym->st_value -= shdr->sh_addr - shdr->sh_offset;
-}
-
 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                  struct symsrc *runtime_ss, int kmodule)
 {
@@ -996,7 +990,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
 
                        /* Adjust symbol to map to file offset */
                        if (adjust_kernel_syms)
-                               arch__adjust_sym_map_offset(&sym, &shdr, map);
+                               sym.st_value -= shdr.sh_addr - shdr.sh_offset;
 
                        if (strcmp(section_name,
                                   (curr_dso->short_name +
index 2bd6a1f01a1cc5b5bb15ee0aae59ae921ebe2a86..aad99e7e179bbb8f95bd39cf0d659d136ed2ef6d 100644 (file)
@@ -344,9 +344,6 @@ int setup_intlist(struct intlist **list, const char *list_str,
 #ifdef HAVE_LIBELF_SUPPORT
 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
 void arch__sym_update(struct symbol *s, GElf_Sym *sym);
-void arch__adjust_sym_map_offset(GElf_Sym *sym,
-                                GElf_Shdr *shdr __maybe_unused,
-                                struct map *map __maybe_unused);
 #endif
 
 #define SYMBOL_A 0
index 19e5db90394c2bf03ec1c08810c270cb965f65c8..6eea7cff3d4e96473644fcd7c034a8a32abfcb10 100644 (file)
@@ -15,9 +15,9 @@
 
 #include "syscalltbl.h"
 #include <stdlib.h>
+#include <linux/compiler.h>
 
 #ifdef HAVE_SYSCALL_TABLE
-#include <linux/compiler.h>
 #include <string.h>
 #include "string2.h"
 #include "util.h"
index 4ba726c9087072184b445ddc4e788d7000ade01d..54af604621304a6eaed9dc67a6c43a17c902df65 100644 (file)
@@ -23,12 +23,12 @@ static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
 
 static inline int xyarray__max_y(struct xyarray *xy)
 {
-       return xy->max_x;
+       return xy->max_y;
 }
 
 static inline int xyarray__max_x(struct xyarray *xy)
 {
-       return xy->max_y;
+       return xy->max_x;
 }
 
 #endif /* _PERF_XYARRAY_H_ */
index 4c5a481a850c6d4490db28ae0d6448197e4b4517..d6e1c02ddcfead4532cdc73f2d81207f62a8db5f 100644 (file)
@@ -26,7 +26,7 @@ endif
 
 ifneq ($(OUTPUT),)
 # check that the output directory actually exists
-OUTDIR := $(realpath $(OUTPUT))
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
 $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 endif
 
index 0dafba2c1e7d28c4eda6904274baf7537ee3062c..bd9c6b31a504df654e16a9389abbb9028cfa2bb7 100644 (file)
@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons;
 unsigned int crystal_hz;
 unsigned long long tsc_hz;
 int base_cpu;
-int do_migrate;
 double discover_bclk(unsigned int family, unsigned int model);
 unsigned int has_hwp;  /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
                        /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
 
 int cpu_migrate(int cpu)
 {
-       if (!do_migrate)
-               return 0;
-
        CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
        CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
        if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv)
                {"hide",        required_argument,      0, 'H'},        // meh, -h taken by --help
                {"Joules",      no_argument,            0, 'J'},
                {"list",        no_argument,            0, 'l'},
-               {"migrate",     no_argument,            0, 'm'},
                {"out",         required_argument,      0, 'o'},
                {"quiet",       no_argument,            0, 'q'},
                {"show",        required_argument,      0, 's'},
@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v",
+       while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
                                long_options, &option_index)) != -1) {
                switch (opt) {
                case 'a':
@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv)
                        list_header_only++;
                        quiet++;
                        break;
-               case 'm':
-                       do_migrate = 1;
-                       break;
                case 'o':
                        outf = fopen_or_die(optarg, "w");
                        break;
index 9dc8f078a83c87e361883569e06fd86889a9164e..1e8b6116ba3c4ee03e137b737b84aaa26d133b94 100644 (file)
@@ -1,7 +1,7 @@
 ifneq ($(O),)
 ifeq ($(origin O), command line)
-       ABSOLUTE_O := $(realpath $(O))
-       dummy := $(if $(ABSOLUTE_O),,$(error O=$(O) does not exist))
+       dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
+       ABSOLUTE_O := $(shell cd $(O) ; pwd)
        OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
        COMMAND_O := O=$(ABSOLUTE_O)
 ifeq ($(objtree),)
@@ -12,7 +12,7 @@ endif
 
 # check that the output directory actually exists
 ifneq ($(OUTPUT),)
-OUTDIR := $(realpath $(OUTPUT))
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
 $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 endif
 
index d20791c3f4990bdced0308989205db61f07087a0..bef419d4266df97a207d6aa3033ae3998ff7dc16 100644 (file)
@@ -1527,9 +1527,6 @@ static void nfit_test1_setup(struct nfit_test *t)
        set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
-       set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
-       set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
-       set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
 }
 
 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
index 26ce4f7168be534de2eef4be6650de9617b03b33..ff805643b5f723a1eb5420f50f88819e529b7299 100644 (file)
@@ -52,6 +52,10 @@ override LDFLAGS =
 override MAKEFLAGS =
 endif
 
+ifneq ($(KBUILD_SRC),)
+override LDFLAGS =
+endif
+
 BUILD := $(O)
 ifndef BUILD
   BUILD := $(KBUILD_OUTPUT)
@@ -62,32 +66,32 @@ endif
 
 export BUILD
 all:
-       for TARGET in $(TARGETS); do            \
+       @for TARGET in $(TARGETS); do           \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
                mkdir $$BUILD_TARGET  -p;       \
                make OUTPUT=$$BUILD_TARGET -C $$TARGET;\
        done;
 
 run_tests: all
-       for TARGET in $(TARGETS); do \
+       @for TARGET in $(TARGETS); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
                make OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
        done;
 
 hotplug:
-       for TARGET in $(TARGETS_HOTPLUG); do \
+       @for TARGET in $(TARGETS_HOTPLUG); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
                make OUTPUT=$$BUILD_TARGET -C $$TARGET;\
        done;
 
 run_hotplug: hotplug
-       for TARGET in $(TARGETS_HOTPLUG); do \
+       @for TARGET in $(TARGETS_HOTPLUG); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
                make OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\
        done;
 
 clean_hotplug:
-       for TARGET in $(TARGETS_HOTPLUG); do \
+       @for TARGET in $(TARGETS_HOTPLUG); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
                make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
        done;
@@ -103,7 +107,7 @@ install:
 ifdef INSTALL_PATH
        @# Ask all targets to install their files
        mkdir -p $(INSTALL_PATH)
-       for TARGET in $(TARGETS); do \
+       @for TARGET in $(TARGETS); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
                make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
        done;
@@ -128,7 +132,7 @@ else
 endif
 
 clean:
-       for TARGET in $(TARGETS); do \
+       @for TARGET in $(TARGETS); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
                make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
        done;
index 36fb9161b34acf8c14a585f4778905db9185b163..b2e02bdcd098f6380c80bc190e3986acb1007188 100644 (file)
@@ -65,7 +65,7 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
 static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
                             int optlen) =
        (void *) BPF_FUNC_setsockopt;
-static int (*bpf_sk_redirect_map)(void *map, int key, int flags) =
+static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
        (void *) BPF_FUNC_sk_redirect_map;
 static int (*bpf_sock_map_update)(void *map, void *key, void *value,
                                  unsigned long long flags) =
index 20ecbaa0d85d72b860678caf49c3e421105802b8..6c53a8906eff4c4fe6d98863050132d928de50f8 100644 (file)
@@ -12,6 +12,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
        unsigned int start, end, possible_cpus = 0;
        char buff[128];
        FILE *fp;
+       int n;
 
        fp = fopen(fcpu, "r");
        if (!fp) {
@@ -20,17 +21,17 @@ static inline unsigned int bpf_num_possible_cpus(void)
        }
 
        while (fgets(buff, sizeof(buff), fp)) {
-               if (sscanf(buff, "%u-%u", &start, &end) == 2) {
-                       possible_cpus = start == 0 ? end + 1 : 0;
-                       break;
+               n = sscanf(buff, "%u-%u", &start, &end);
+               if (n == 0) {
+                       printf("Failed to retrieve # possible CPUs!\n");
+                       exit(1);
+               } else if (n == 1) {
+                       end = start;
                }
+               possible_cpus = start == 0 ? end + 1 : 0;
+               break;
        }
-
        fclose(fp);
-       if (!possible_cpus) {
-               printf("Failed to retrieve # possible CPUs!\n");
-               exit(1);
-       }
 
        return possible_cpus;
 }
index 9b99bd10807d8990a82ba1a390e9f1304e04d214..2cd2d552938b19211da94c4b51ea7895408d35b0 100644 (file)
@@ -61,8 +61,8 @@ int bpf_prog2(struct __sk_buff *skb)
        bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk);
 
        if (!map)
-               return bpf_sk_redirect_map(&sock_map_rx, sk, 0);
-       return bpf_sk_redirect_map(&sock_map_tx, sk, 0);
+               return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
+       return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
 }
 
 char _license[] SEC("license") = "GPL";
index fe3a443a110228efb99bc893d2f0820cd03e6763..50ce52d2013d6feddadcb6a2f61cdc4667b3d78d 100644 (file)
@@ -466,7 +466,7 @@ static void test_sockmap(int tasks, void *data)
        int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
        struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
        int ports[] = {50200, 50201, 50202, 50204};
-       int err, i, fd, sfd[6] = {0xdeadbeef};
+       int err, i, fd, udp, sfd[6] = {0xdeadbeef};
        u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
        int parse_prog, verdict_prog;
        struct sockaddr_in addr;
@@ -548,6 +548,16 @@ static void test_sockmap(int tasks, void *data)
                goto out_sockmap;
        }
 
+       /* Test update with unsupported UDP socket */
+       udp = socket(AF_INET, SOCK_DGRAM, 0);
+       i = 0;
+       err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
+       if (!err) {
+               printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
+                      i, udp);
+               goto out_sockmap;
+       }
+
        /* Test update without programs */
        for (i = 0; i < 6; i++) {
                err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
index 26f3250bdcd2546c4f316eed8caffd9eb583e0d9..64ae21f644896a74bb624d6f873463b5b2d07312 100644 (file)
@@ -1130,15 +1130,27 @@ static struct bpf_test tests[] = {
                .errstr = "invalid bpf_context access",
        },
        {
-               "check skb->mark is writeable by SK_SKB",
+               "invalid access of skb->mark for SK_SKB",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result =  REJECT,
+               .prog_type = BPF_PROG_TYPE_SK_SKB,
+               .errstr = "invalid bpf_context access",
+       },
+       {
+               "check skb->mark is not writeable by SK_SKB",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
                                    offsetof(struct __sk_buff, mark)),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .result =  REJECT,
                .prog_type = BPF_PROG_TYPE_SK_SKB,
+               .errstr = "invalid bpf_context access",
        },
        {
                "check skb->tc_index is writeable by SK_SKB",
@@ -6645,6 +6657,500 @@ static struct bpf_test tests[] = {
                .errstr = "BPF_END uses reserved fields",
                .result = REJECT,
        },
+       {
+               "arithmetic ops make PTR_TO_CTX unusable",
+               .insns = {
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                                     offsetof(struct __sk_buff, data) -
+                                     offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "XDP pkt read, pkt_end mangling, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end mangling, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' > pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' > pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' > pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end > pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end > pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end > pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' < pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' < pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end < pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end < pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end < pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' >= pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end >= pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end >= pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end >= pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' <= pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end <= pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
index 6b214b7b10fb7c1c7ad6330c3769fbd1c46838d7..247b0a1899d70f55a5a02e2aac35a3136854652d 100644 (file)
@@ -2,14 +2,14 @@
 uname_M := $(shell uname -m 2>/dev/null || echo not)
 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
 
+TEST_GEN_PROGS := step_after_suspend_test
+
 ifeq ($(ARCH),x86)
-TEST_GEN_PROGS := breakpoint_test
+TEST_GEN_PROGS += breakpoint_test
 endif
 ifneq (,$(filter $(ARCH),aarch64 arm64))
-TEST_GEN_PROGS := breakpoint_test_arm64
+TEST_GEN_PROGS += breakpoint_test_arm64
 endif
 
-TEST_GEN_PROGS += step_after_suspend_test
-
 include ../lib.mk
 
index 2a1cb990874613c5ee9da2d371b95899d8eaa81a..a4fd4c851a5b7f27520801858d8f1f24e34ba56e 100644 (file)
@@ -1,6 +1,8 @@
 #!/bin/sh
 # description: Register/unregister many kprobe events
 
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
 # ftrace fentry skip size depends on the machine architecture.
 # Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le
 case `uname -m` in
index 7c647f619d63faf3eff4c135c1b302565b7f78d5..f0c0369ccb7972d6deffb12534d1c64cc913a382 100644 (file)
@@ -7,14 +7,17 @@ TEST_PROGS := run.sh
 include ../lib.mk
 
 all:
-       for DIR in $(SUBDIRS); do               \
+       @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
+               if [ -e $$DIR/$(TEST_PROGS) ]; then
+                       rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/;
+               fi
        done
 
 override define RUN_TESTS
-       $(OUTPUT)/run.sh
+       @cd $(OUTPUT); ./run.sh
 endef
 
 override define INSTALL_RULE
@@ -33,7 +36,7 @@ override define EMIT_TESTS
 endef
 
 override define CLEAN
-       for DIR in $(SUBDIRS); do               \
+       @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
index 849a90ffe8dd2d311b9b66bed9058fefdf28ba7d..a97e24edde39ed9d9d51441b01547856641b9606 100644 (file)
@@ -1,7 +1,9 @@
 CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
 LDLIBS := $(LDLIBS) -lm
 
+ifeq (,$(filter $(ARCH),x86))
 TEST_GEN_FILES := msr aperf
+endif
 
 TEST_PROGS := run.sh
 
index 7868c106b8b1b80c41a8af739c8def48fd378bd8..d3ab48f91cd6a6ef1742653256e15418933de2cf 100755 (executable)
 
 EVALUATE_ONLY=0
 
-max_cpus=$(($(nproc)-1))
+if ! uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ | grep -q x86; then
+       echo "$0 # Skipped: Test can only run on x86 architectures."
+       exit 0
+fi
 
-# compile programs
-gcc aperf.c -Wall -D_GNU_SOURCE -o aperf  -lm
-[ $? -ne 0 ] && echo "Problem compiling aperf.c." && exit 1
-gcc -o msr msr.c -lm
-[ $? -ne 0 ] && echo "Problem compiling msr.c." && exit 1
+max_cpus=$(($(nproc)-1))
 
 function run_test () {
 
index 693616651da5cbc27a779868e64697df80582ef3..f65886af7c0cac60e4502e8973b4a602fe67c7b6 100644 (file)
@@ -6,7 +6,14 @@ ifeq (0,$(MAKELEVEL))
 OUTPUT := $(shell pwd)
 endif
 
+# The following are built by lib.mk common compile rules.
+# TEST_CUSTOM_PROGS should be used by tests that require
+# custom build rule and prevent common build rule use.
+# TEST_PROGS are for test shell scripts.
+# TEST_CUSTOM_PROGS and TEST_PROGS will be run by common run_tests
+# and install targets. Common clean doesn't touch them.
 TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
+TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
 TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
 
 all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
@@ -20,17 +27,28 @@ define RUN_TESTS
                test_num=`echo $$test_num+1 | bc`;      \
                echo "selftests: $$BASENAME_TEST";      \
                echo "========================================";        \
-               if [ ! -x $$BASENAME_TEST ]; then       \
+               if [ ! -x $$TEST ]; then        \
                        echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\
                        echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; \
                else                                    \
-                       cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests:  $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\
+                       cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests:  $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\
                fi;                                     \
        done;
 endef
 
 run_tests: all
-       $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_PROGS))
+ifneq ($(KBUILD_SRC),)
+       @if [ "X$(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)" != "X" ]; then
+               @rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT)
+       fi
+       @if [ "X$(TEST_PROGS)" != "X" ]; then
+               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS))
+       else
+               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS))
+       fi
+else
+       $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
+endif
 
 define INSTALL_RULE
        @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then                                        \
@@ -38,10 +56,10 @@ define INSTALL_RULE
                echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/";    \
                rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/;           \
        fi
-       @if [ "X$(TEST_GEN_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then                                    \
+       @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then                                        \
                mkdir -p ${INSTALL_PATH};                                                                               \
-               echo "rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/";        \
-               rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/;               \
+               echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/";   \
+               rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/;          \
        fi
 endef
 
@@ -53,15 +71,20 @@ else
 endif
 
 define EMIT_TESTS
-       @for TEST in $(TEST_GEN_PROGS) $(TEST_PROGS); do \
+       @for TEST in $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS); do \
                BASENAME_TEST=`basename $$TEST`;        \
-               echo "(./$$BASENAME_TEST && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \
+               echo "(./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \
        done;
 endef
 
 emit_tests:
        $(EMIT_TESTS)
 
+# define if isn't already. It is undefined in make O= case.
+ifeq ($(RM),)
+RM := rm -f
+endif
+
 define CLEAN
        $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
 endef
@@ -69,6 +92,15 @@ endef
 clean:
        $(CLEAN)
 
+# When make O= with kselftest target from main level
+# the following aren't defined.
+#
+ifneq ($(KBUILD_SRC),)
+LINK.c = $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH)
+COMPILE.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c
+LINK.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH)
+endif
+
 $(OUTPUT)/%:%.c
        $(LINK.c) $^ $(LDLIBS) -o $@
 
old mode 100644 (file)
new mode 100755 (executable)
index 79a664aeb8d76509a2f8e46aadf742571765a26a..152823b6cb2152d1661e0fc9dd287ed23831ef75 100644 (file)
@@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests
 include ../lib.mk
 
 override define RUN_TESTS
-       @./mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
-       @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
+       @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
+       @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
 endef
 
 override define EMIT_TESTS
index 9801253e48021035b650a88e3d3d0c2affea8675..c612d6e38c6282cad7b56396e1d3e2c1c9d3a46c 100644 (file)
@@ -6,3 +6,4 @@ reuseport_bpf
 reuseport_bpf_cpu
 reuseport_bpf_numa
 reuseport_dualstack
+reuseaddr_conflict
index de1f5772b878ee1f4aee9e1452d1f0e83696af62..d86bca991f456a70206c790f733b0032bc7ad224 100644 (file)
@@ -5,9 +5,9 @@ CFLAGS += -I../../../../usr/include/
 
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
 TEST_GEN_FILES =  socket
-TEST_GEN_FILES += psock_fanout psock_tpacket
-TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_FILES += reuseport_dualstack msg_zerocopy
+TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
+TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict
 
 include ../lib.mk
 
index 40232af5b023ee251bfc369fd73860d912809891..3ab6ec4039059cf127345652bccf6c34d5c4d273 100644 (file)
@@ -55,7 +55,7 @@
 #include <unistd.h>
 
 #ifndef SO_EE_ORIGIN_ZEROCOPY
-#define SO_EE_ORIGIN_ZEROCOPY          SO_EE_ORIGIN_UPAGE
+#define SO_EE_ORIGIN_ZEROCOPY          5
 #endif
 
 #ifndef SO_ZEROCOPY
index 4e00568d70c2c398651675ad763f646453b45b3b..90cb903c33815bcecc3a64a9f153c779476a6848 100755 (executable)
@@ -178,7 +178,7 @@ if [ "$(id -u)" -ne 0 ];then
        exit 0
 fi
 
-ip -Version 2>/dev/null >/dev/null
+ip link show 2>/dev/null >/dev/null
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without the ip tool"
        exit 0
diff --git a/tools/testing/selftests/net/reuseaddr_conflict.c b/tools/testing/selftests/net/reuseaddr_conflict.c
new file mode 100644 (file)
index 0000000..7c5b126
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Test for the regression introduced by
+ *
+ * b9470c27607b ("inet: kill smallest_size and smallest_port")
+ *
+ * If we open an ipv4 socket on a port with reuseaddr we shouldn't reset the tb
+ * when we open the ipv6 conterpart, which is what was happening previously.
+ */
+#include <errno.h>
+#include <error.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#define PORT 9999
+
+int open_port(int ipv6, int any)
+{
+       int fd = -1;
+       int reuseaddr = 1;
+       int v6only = 1;
+       int addrlen;
+       int ret = -1;
+       struct sockaddr *addr;
+       int family = ipv6 ? AF_INET6 : AF_INET;
+
+       struct sockaddr_in6 addr6 = {
+               .sin6_family = AF_INET6,
+               .sin6_port = htons(PORT),
+               .sin6_addr = in6addr_any
+       };
+       struct sockaddr_in addr4 = {
+               .sin_family = AF_INET,
+               .sin_port = htons(PORT),
+               .sin_addr.s_addr = any ? htonl(INADDR_ANY) : inet_addr("127.0.0.1"),
+       };
+
+
+       if (ipv6) {
+               addr = (struct sockaddr*)&addr6;
+               addrlen = sizeof(addr6);
+       } else {
+               addr = (struct sockaddr*)&addr4;
+               addrlen = sizeof(addr4);
+       }
+
+       if ((fd = socket(family, SOCK_STREAM, IPPROTO_TCP)) < 0) {
+               perror("socket");
+               goto out;
+       }
+
+       if (ipv6 && setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&v6only,
+                              sizeof(v6only)) < 0) {
+               perror("setsockopt IPV6_V6ONLY");
+               goto out;
+       }
+
+       if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr,
+                      sizeof(reuseaddr)) < 0) {
+               perror("setsockopt SO_REUSEADDR");
+               goto out;
+       }
+
+       if (bind(fd, addr, addrlen) < 0) {
+               perror("bind");
+               goto out;
+       }
+
+       if (any)
+               return fd;
+
+       if (listen(fd, 1) < 0) {
+               perror("listen");
+               goto out;
+       }
+       return fd;
+out:
+       close(fd);
+       return ret;
+}
+
+int main(void)
+{
+       int listenfd;
+       int fd1, fd2;
+
+       fprintf(stderr, "Opening 127.0.0.1:%d\n", PORT);
+       listenfd = open_port(0, 0);
+       if (listenfd < 0)
+               error(1, errno, "Couldn't open listen socket");
+       fprintf(stderr, "Opening INADDR_ANY:%d\n", PORT);
+       fd1 = open_port(0, 1);
+       if (fd1 >= 0)
+               error(1, 0, "Was allowed to create an ipv4 reuseport on a already bound non-reuseport socket");
+       fprintf(stderr, "Opening in6addr_any:%d\n", PORT);
+       fd1 = open_port(1, 1);
+       if (fd1 < 0)
+               error(1, errno, "Couldn't open ipv6 reuseport");
+       fprintf(stderr, "Opening INADDR_ANY:%d\n", PORT);
+       fd2 = open_port(0, 1);
+       if (fd2 >= 0)
+               error(1, 0, "Was allowed to create an ipv4 reuseport on a already bound non-reuseport socket");
+       close(fd1);
+       fprintf(stderr, "Opening INADDR_ANY:%d after closing ipv6 socket\n", PORT);
+       fd1 = open_port(0, 1);
+       if (fd1 >= 0)
+               error(1, 0, "Was allowed to create an ipv4 reuseport on an already bound non-reuseport socket with no ipv6");
+       fprintf(stderr, "Success");
+       return 0;
+}
index 00f286661dcd2c0459941e03dcccf0af4d17fd37..dd4162fc0419bca7a7dccd17db8b99b61fb7f4a3 100644 (file)
@@ -341,7 +341,7 @@ int main(int argc, char **argv)
                        return 0;
                case 'n':
                        t = atoi(optarg);
-                       if (t > ARRAY_SIZE(test_cases))
+                       if (t >= ARRAY_SIZE(test_cases))
                                error(1, 0, "Invalid test case: %d", t);
                        all_tests = false;
                        test_cases[t].enabled = true;
index aeb0c805f3ca0f9471ae0f211e5633aa9d7a52f8..553d870b4ca983252a19dbe3a52ddb23e492a06a 100644 (file)
@@ -1,8 +1,16 @@
-TEST_GEN_PROGS := seccomp_bpf
-CFLAGS += -Wl,-no-as-needed -Wall
-LDFLAGS += -lpthread
+all:
 
 include ../lib.mk
 
-$(TEST_GEN_PROGS): seccomp_bpf.c ../kselftest_harness.h
-       $(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+.PHONY: all clean
+
+BINARIES := seccomp_bpf seccomp_benchmark
+CFLAGS += -Wl,-no-as-needed -Wall
+
+seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
+       $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@
+
+TEST_PROGS += $(BINARIES)
+EXTRA_CLEAN := $(BINARIES)
+
+all: $(BINARIES)
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c
new file mode 100644 (file)
index 0000000..5838c86
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Strictly speaking, this is not a test. But it can report during test
+ * runs so relative performace can be measured.
+ */
+#define _GNU_SOURCE
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+
+#define ARRAY_SIZE(a)    (sizeof(a) / sizeof(a[0]))
+
+unsigned long long timing(clockid_t clk_id, unsigned long long samples)
+{
+       pid_t pid, ret;
+       unsigned long long i;
+       struct timespec start, finish;
+
+       pid = getpid();
+       assert(clock_gettime(clk_id, &start) == 0);
+       for (i = 0; i < samples; i++) {
+               ret = syscall(__NR_getpid);
+               assert(pid == ret);
+       }
+       assert(clock_gettime(clk_id, &finish) == 0);
+
+       i = finish.tv_sec - start.tv_sec;
+       i *= 1000000000;
+       i += finish.tv_nsec - start.tv_nsec;
+
+       printf("%lu.%09lu - %lu.%09lu = %llu\n",
+               finish.tv_sec, finish.tv_nsec,
+               start.tv_sec, start.tv_nsec,
+               i);
+
+       return i;
+}
+
+unsigned long long calibrate(void)
+{
+       unsigned long long i;
+
+       printf("Calibrating reasonable sample size...\n");
+
+       for (i = 5; ; i++) {
+               unsigned long long samples = 1 << i;
+
+               /* Find something that takes more than 5 seconds to run. */
+               if (timing(CLOCK_REALTIME, samples) / 1000000000ULL > 5)
+                       return samples;
+       }
+}
+
+int main(int argc, char *argv[])
+{
+       struct sock_filter filter[] = {
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+       };
+       struct sock_fprog prog = {
+               .len = (unsigned short)ARRAY_SIZE(filter),
+               .filter = filter,
+       };
+       long ret;
+       unsigned long long samples;
+       unsigned long long native, filtered;
+
+       if (argc > 1)
+               samples = strtoull(argv[1], NULL, 0);
+       else
+               samples = calibrate();
+
+       printf("Benchmarking %llu samples...\n", samples);
+
+       native = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
+       printf("getpid native: %llu ns\n", native);
+
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       assert(ret == 0);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
+       assert(ret == 0);
+
+       filtered = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
+       printf("getpid RET_ALLOW: %llu ns\n", filtered);
+
+       printf("Estimated seccomp overhead per syscall: %llu ns\n",
+               filtered - native);
+
+       if (filtered == native)
+               printf("Trying running again with more samples.\n");
+
+       return 0;
+}
index 4d6f92a9df6b4aaa0bec897b108bf7cafcae41ef..24dbf634e2dd8c869a7201e7d89efe4102fa6373 100644 (file)
@@ -6,10 +6,18 @@
  */
 
 #include <sys/types.h>
-#include <asm/siginfo.h>
-#define __have_siginfo_t 1
-#define __have_sigval_t 1
-#define __have_sigevent_t 1
+
+/*
+ * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
+ * we need to use the kernel's siginfo.h file and trick glibc
+ * into accepting it.
+ */
+#if !__GLIBC_PREREQ(2, 26)
+# include <asm/siginfo.h>
+# define __have_siginfo_t 1
+# define __have_sigval_t 1
+# define __have_sigevent_t 1
+#endif
 
 #include <errno.h>
 #include <linux/filter.h>
 #define SECCOMP_MODE_FILTER 2
 #endif
 
-#ifndef SECCOMP_RET_KILL
-#define SECCOMP_RET_KILL        0x00000000U /* kill the task immediately */
-#define SECCOMP_RET_TRAP        0x00030000U /* disallow and force a SIGSYS */
-#define SECCOMP_RET_ERRNO       0x00050000U /* returns an errno */
-#define SECCOMP_RET_TRACE       0x7ff00000U /* pass to a tracer or disallow */
-#define SECCOMP_RET_ALLOW       0x7fff0000U /* allow */
-
-/* Masks for the return value sections. */
-#define SECCOMP_RET_ACTION      0x7fff0000U
-#define SECCOMP_RET_DATA        0x0000ffffU
-
+#ifndef SECCOMP_RET_ALLOW
 struct seccomp_data {
        int nr;
        __u32 arch;
@@ -87,6 +85,70 @@ struct seccomp_data {
 };
 #endif
 
+#ifndef SECCOMP_RET_KILL_PROCESS
+#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
+#define SECCOMP_RET_KILL_THREAD         0x00000000U /* kill the thread */
+#endif
+#ifndef SECCOMP_RET_KILL
+#define SECCOMP_RET_KILL        SECCOMP_RET_KILL_THREAD
+#define SECCOMP_RET_TRAP        0x00030000U /* disallow and force a SIGSYS */
+#define SECCOMP_RET_ERRNO       0x00050000U /* returns an errno */
+#define SECCOMP_RET_TRACE       0x7ff00000U /* pass to a tracer or disallow */
+#define SECCOMP_RET_ALLOW       0x7fff0000U /* allow */
+#endif
+#ifndef SECCOMP_RET_LOG
+#define SECCOMP_RET_LOG                 0x7ffc0000U /* allow after logging */
+#endif
+
+#ifndef __NR_seccomp
+# if defined(__i386__)
+#  define __NR_seccomp 354
+# elif defined(__x86_64__)
+#  define __NR_seccomp 317
+# elif defined(__arm__)
+#  define __NR_seccomp 383
+# elif defined(__aarch64__)
+#  define __NR_seccomp 277
+# elif defined(__hppa__)
+#  define __NR_seccomp 338
+# elif defined(__powerpc__)
+#  define __NR_seccomp 358
+# elif defined(__s390__)
+#  define __NR_seccomp 348
+# else
+#  warning "seccomp syscall number unknown for this architecture"
+#  define __NR_seccomp 0xffff
+# endif
+#endif
+
+#ifndef SECCOMP_SET_MODE_STRICT
+#define SECCOMP_SET_MODE_STRICT 0
+#endif
+
+#ifndef SECCOMP_SET_MODE_FILTER
+#define SECCOMP_SET_MODE_FILTER 1
+#endif
+
+#ifndef SECCOMP_GET_ACTION_AVAIL
+#define SECCOMP_GET_ACTION_AVAIL 2
+#endif
+
+#ifndef SECCOMP_FILTER_FLAG_TSYNC
+#define SECCOMP_FILTER_FLAG_TSYNC 1
+#endif
+
+#ifndef SECCOMP_FILTER_FLAG_LOG
+#define SECCOMP_FILTER_FLAG_LOG 2
+#endif
+
+#ifndef seccomp
+int seccomp(unsigned int op, unsigned int flags, void *args)
+{
+       errno = 0;
+       return syscall(__NR_seccomp, op, flags, args);
+}
+#endif
+
 #if __BYTE_ORDER == __LITTLE_ENDIAN
 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
 #elif __BYTE_ORDER == __BIG_ENDIAN
@@ -136,7 +198,7 @@ TEST(no_new_privs_support)
        }
 }
 
-/* Tests kernel support by checking for a copy_from_user() fault on NULL. */
+/* Tests kernel support by checking for a copy_from_user() fault on NULL. */
 TEST(mode_filter_support)
 {
        long ret;
@@ -342,6 +404,28 @@ TEST(empty_prog)
        EXPECT_EQ(EINVAL, errno);
 }
 
+TEST(log_all)
+{
+       struct sock_filter filter[] = {
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
+       };
+       struct sock_fprog prog = {
+               .len = (unsigned short)ARRAY_SIZE(filter),
+               .filter = filter,
+       };
+       long ret;
+       pid_t parent = getppid();
+
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
+       ASSERT_EQ(0, ret);
+
+       /* getppid() should succeed and be logged (no check for logging) */
+       EXPECT_EQ(parent, syscall(__NR_getppid));
+}
+
 TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
 {
        struct sock_filter filter[] = {
@@ -520,6 +604,117 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
        close(fd);
 }
 
+/* This is a thread task to die via seccomp filter violation. */
+void *kill_thread(void *data)
+{
+       bool die = (bool)data;
+
+       if (die) {
+               prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
+               return (void *)SIBLING_EXIT_FAILURE;
+       }
+
+       return (void *)SIBLING_EXIT_UNKILLED;
+}
+
+/* Prepare a thread that will kill itself or both of us. */
+void kill_thread_or_group(struct __test_metadata *_metadata, bool kill_process)
+{
+       pthread_t thread;
+       void *status;
+       /* Kill only when calling __NR_prctl. */
+       struct sock_filter filter_thread[] = {
+               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
+                       offsetof(struct seccomp_data, nr)),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+       };
+       struct sock_fprog prog_thread = {
+               .len = (unsigned short)ARRAY_SIZE(filter_thread),
+               .filter = filter_thread,
+       };
+       struct sock_filter filter_process[] = {
+               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
+                       offsetof(struct seccomp_data, nr)),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_PROCESS),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+       };
+       struct sock_fprog prog_process = {
+               .len = (unsigned short)ARRAY_SIZE(filter_process),
+               .filter = filter_process,
+       };
+
+       ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+               TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+       }
+
+       ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0,
+                            kill_process ? &prog_process : &prog_thread));
+
+       /*
+        * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS
+        * flag cannot be downgraded by a new filter.
+        */
+       ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread));
+
+       /* Start a thread that will exit immediately. */
+       ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false));
+       ASSERT_EQ(0, pthread_join(thread, &status));
+       ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status);
+
+       /* Start a thread that will die immediately. */
+       ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true));
+       ASSERT_EQ(0, pthread_join(thread, &status));
+       ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status);
+
+       /*
+        * If we get here, only the spawned thread died. Let the parent know
+        * the whole process didn't die (i.e. this thread, the spawner,
+        * stayed running).
+        */
+       exit(42);
+}
+
+TEST(KILL_thread)
+{
+       int status;
+       pid_t child_pid;
+
+       child_pid = fork();
+       ASSERT_LE(0, child_pid);
+       if (child_pid == 0) {
+               kill_thread_or_group(_metadata, false);
+               _exit(38);
+       }
+
+       ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
+
+       /* If only the thread was killed, we'll see exit 42. */
+       ASSERT_TRUE(WIFEXITED(status));
+       ASSERT_EQ(42, WEXITSTATUS(status));
+}
+
+TEST(KILL_process)
+{
+       int status;
+       pid_t child_pid;
+
+       child_pid = fork();
+       ASSERT_LE(0, child_pid);
+       if (child_pid == 0) {
+               kill_thread_or_group(_metadata, true);
+               _exit(38);
+       }
+
+       ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
+
+       /* If the entire process was killed, we'll see SIGSYS. */
+       ASSERT_TRUE(WIFSIGNALED(status));
+       ASSERT_EQ(SIGSYS, WTERMSIG(status));
+}
+
 /* TODO(wad) add 64-bit versus 32-bit arg tests. */
 TEST(arg_out_of_range)
 {
@@ -541,26 +736,30 @@ TEST(arg_out_of_range)
        EXPECT_EQ(EINVAL, errno);
 }
 
+#define ERRNO_FILTER(name, errno)                                      \
+       struct sock_filter _read_filter_##name[] = {                    \
+               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,                          \
+                       offsetof(struct seccomp_data, nr)),             \
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),       \
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno),     \
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),             \
+       };                                                              \
+       struct sock_fprog prog_##name = {                               \
+               .len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \
+               .filter = _read_filter_##name,                          \
+       }
+
+/* Make sure basic errno values are correctly passed through a filter. */
 TEST(ERRNO_valid)
 {
-       struct sock_filter filter[] = {
-               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
-                       offsetof(struct seccomp_data, nr)),
-               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
-               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG),
-               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
-       };
-       struct sock_fprog prog = {
-               .len = (unsigned short)ARRAY_SIZE(filter),
-               .filter = filter,
-       };
+       ERRNO_FILTER(valid, E2BIG);
        long ret;
        pid_t parent = getppid();
 
        ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
        ASSERT_EQ(0, ret);
 
-       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid);
        ASSERT_EQ(0, ret);
 
        EXPECT_EQ(parent, syscall(__NR_getppid));
@@ -568,26 +767,17 @@ TEST(ERRNO_valid)
        EXPECT_EQ(E2BIG, errno);
 }
 
+/* Make sure an errno of zero is correctly handled by the arch code. */
 TEST(ERRNO_zero)
 {
-       struct sock_filter filter[] = {
-               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
-                       offsetof(struct seccomp_data, nr)),
-               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
-               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0),
-               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
-       };
-       struct sock_fprog prog = {
-               .len = (unsigned short)ARRAY_SIZE(filter),
-               .filter = filter,
-       };
+       ERRNO_FILTER(zero, 0);
        long ret;
        pid_t parent = getppid();
 
        ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
        ASSERT_EQ(0, ret);
 
-       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero);
        ASSERT_EQ(0, ret);
 
        EXPECT_EQ(parent, syscall(__NR_getppid));
@@ -595,26 +785,21 @@ TEST(ERRNO_zero)
        EXPECT_EQ(0, read(0, NULL, 0));
 }
 
+/*
+ * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller.
+ * This tests that the errno value gets capped correctly, fixed by
+ * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO").
+ */
 TEST(ERRNO_capped)
 {
-       struct sock_filter filter[] = {
-               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
-                       offsetof(struct seccomp_data, nr)),
-               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
-               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 4096),
-               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
-       };
-       struct sock_fprog prog = {
-               .len = (unsigned short)ARRAY_SIZE(filter),
-               .filter = filter,
-       };
+       ERRNO_FILTER(capped, 4096);
        long ret;
        pid_t parent = getppid();
 
        ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
        ASSERT_EQ(0, ret);
 
-       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped);
        ASSERT_EQ(0, ret);
 
        EXPECT_EQ(parent, syscall(__NR_getppid));
@@ -622,6 +807,37 @@ TEST(ERRNO_capped)
        EXPECT_EQ(4095, errno);
 }
 
+/*
+ * Filters are processed in reverse order: last applied is executed first.
+ * Since only the SECCOMP_RET_ACTION mask is tested for return values, the
+ * SECCOMP_RET_DATA mask results will follow the most recently applied
+ * matching filter return (and not the lowest or highest value).
+ */
+TEST(ERRNO_order)
+{
+       ERRNO_FILTER(first,  11);
+       ERRNO_FILTER(second, 13);
+       ERRNO_FILTER(third,  12);
+       long ret;
+       pid_t parent = getppid();
+
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third);
+       ASSERT_EQ(0, ret);
+
+       EXPECT_EQ(parent, syscall(__NR_getppid));
+       EXPECT_EQ(-1, read(0, NULL, 0));
+       EXPECT_EQ(12, errno);
+}
+
 FIXTURE_DATA(TRAP) {
        struct sock_fprog prog;
 };
@@ -676,7 +892,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS)
        syscall(__NR_getpid);
 }
 
-static struct siginfo TRAP_info;
+static siginfo_t TRAP_info;
 static volatile int TRAP_nr;
 static void TRAP_action(int nr, siginfo_t *info, void *void_context)
 {
@@ -735,6 +951,7 @@ TEST_F(TRAP, handler)
 
 FIXTURE_DATA(precedence) {
        struct sock_fprog allow;
+       struct sock_fprog log;
        struct sock_fprog trace;
        struct sock_fprog error;
        struct sock_fprog trap;
@@ -746,6 +963,13 @@ FIXTURE_SETUP(precedence)
        struct sock_filter allow_insns[] = {
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
        };
+       struct sock_filter log_insns[] = {
+               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
+                       offsetof(struct seccomp_data, nr)),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
+       };
        struct sock_filter trace_insns[] = {
                BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
                        offsetof(struct seccomp_data, nr)),
@@ -782,6 +1006,7 @@ FIXTURE_SETUP(precedence)
        memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
        self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
        FILTER_ALLOC(allow);
+       FILTER_ALLOC(log);
        FILTER_ALLOC(trace);
        FILTER_ALLOC(error);
        FILTER_ALLOC(trap);
@@ -792,6 +1017,7 @@ FIXTURE_TEARDOWN(precedence)
 {
 #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
        FILTER_FREE(allow);
+       FILTER_FREE(log);
        FILTER_FREE(trace);
        FILTER_FREE(error);
        FILTER_FREE(trap);
@@ -809,6 +1035,8 @@ TEST_F(precedence, allow_ok)
 
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
@@ -833,6 +1061,8 @@ TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
 
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
@@ -864,6 +1094,8 @@ TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
@@ -885,6 +1117,8 @@ TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
 
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
@@ -910,6 +1144,8 @@ TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
@@ -931,6 +1167,8 @@ TEST_F(precedence, errno_is_third)
 
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
@@ -949,6 +1187,8 @@ TEST_F(precedence, errno_is_third_in_any_order)
        ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
        ASSERT_EQ(0, ret);
 
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
@@ -971,6 +1211,8 @@ TEST_F(precedence, trace_is_fourth)
 
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
        ASSERT_EQ(0, ret);
        /* Should work just fine. */
@@ -992,12 +1234,54 @@ TEST_F(precedence, trace_is_fourth_in_any_order)
        ASSERT_EQ(0, ret);
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
        ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
        /* Should work just fine. */
        EXPECT_EQ(parent, syscall(__NR_getppid));
        /* No ptracer */
        EXPECT_EQ(-1, syscall(__NR_getpid));
 }
 
+TEST_F(precedence, log_is_fifth)
+{
+       pid_t mypid, parent;
+       long ret;
+
+       mypid = getpid();
+       parent = getppid();
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
+       ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
+       /* Should work just fine. */
+       EXPECT_EQ(parent, syscall(__NR_getppid));
+       /* Should also work just fine */
+       EXPECT_EQ(mypid, syscall(__NR_getpid));
+}
+
+TEST_F(precedence, log_is_fifth_in_any_order)
+{
+       pid_t mypid, parent;
+       long ret;
+
+       mypid = getpid();
+       parent = getppid();
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
+       ASSERT_EQ(0, ret);
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
+       ASSERT_EQ(0, ret);
+       /* Should work just fine. */
+       EXPECT_EQ(parent, syscall(__NR_getppid));
+       /* Should also work just fine */
+       EXPECT_EQ(mypid, syscall(__NR_getpid));
+}
+
 #ifndef PTRACE_O_TRACESECCOMP
 #define PTRACE_O_TRACESECCOMP  0x00000080
 #endif
@@ -1262,6 +1546,13 @@ TEST_F(TRACE_poke, getpid_runs_normally)
 # error "Do not know how to find your architecture's registers and syscalls"
 #endif
 
+/* When the syscall return can't be changed, stub out the tests for it. */
+#ifdef SYSCALL_NUM_RET_SHARE_REG
+# define EXPECT_SYSCALL_RETURN(val, action)    EXPECT_EQ(-1, action)
+#else
+# define EXPECT_SYSCALL_RETURN(val, action)    EXPECT_EQ(val, action)
+#endif
+
 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
  * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
  */
@@ -1357,7 +1648,7 @@ void change_syscall(struct __test_metadata *_metadata,
 #ifdef SYSCALL_NUM_RET_SHARE_REG
                TH_LOG("Can't modify syscall return on this architecture");
 #else
-               regs.SYSCALL_RET = 1;
+               regs.SYSCALL_RET = EPERM;
 #endif
 
 #ifdef HAVE_GETREGS
@@ -1426,6 +1717,8 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
 
        if (nr == __NR_getpid)
                change_syscall(_metadata, tracee, __NR_getppid);
+       if (nr == __NR_open)
+               change_syscall(_metadata, tracee, -1);
 }
 
 FIXTURE_DATA(TRACE_syscall) {
@@ -1480,6 +1773,28 @@ FIXTURE_TEARDOWN(TRACE_syscall)
                free(self->prog.filter);
 }
 
+TEST_F(TRACE_syscall, ptrace_syscall_redirected)
+{
+       /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
+       teardown_trace_fixture(_metadata, self->tracer);
+       self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
+                                          true);
+
+       /* Tracer will redirect getpid to getppid. */
+       EXPECT_NE(self->mypid, syscall(__NR_getpid));
+}
+
+TEST_F(TRACE_syscall, ptrace_syscall_dropped)
+{
+       /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
+       teardown_trace_fixture(_metadata, self->tracer);
+       self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
+                                          true);
+
+       /* Tracer should skip the open syscall, resulting in EPERM. */
+       EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open));
+}
+
 TEST_F(TRACE_syscall, syscall_allowed)
 {
        long ret;
@@ -1520,13 +1835,8 @@ TEST_F(TRACE_syscall, syscall_dropped)
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
        ASSERT_EQ(0, ret);
 
-#ifdef SYSCALL_NUM_RET_SHARE_REG
-       /* gettid has been skipped */
-       EXPECT_EQ(-1, syscall(__NR_gettid));
-#else
        /* gettid has been skipped and an altered return value stored. */
-       EXPECT_EQ(1, syscall(__NR_gettid));
-#endif
+       EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
        EXPECT_NE(self->mytid, syscall(__NR_gettid));
 }
 
@@ -1557,6 +1867,7 @@ TEST_F(TRACE_syscall, skip_after_RET_TRACE)
        ASSERT_EQ(0, ret);
 
        /* Tracer will redirect getpid to getppid, and we should see EPERM. */
+       errno = 0;
        EXPECT_EQ(-1, syscall(__NR_getpid));
        EXPECT_EQ(EPERM, errno);
 }
@@ -1654,47 +1965,6 @@ TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS)
        EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-#ifndef __NR_seccomp
-# if defined(__i386__)
-#  define __NR_seccomp 354
-# elif defined(__x86_64__)
-#  define __NR_seccomp 317
-# elif defined(__arm__)
-#  define __NR_seccomp 383
-# elif defined(__aarch64__)
-#  define __NR_seccomp 277
-# elif defined(__hppa__)
-#  define __NR_seccomp 338
-# elif defined(__powerpc__)
-#  define __NR_seccomp 358
-# elif defined(__s390__)
-#  define __NR_seccomp 348
-# else
-#  warning "seccomp syscall number unknown for this architecture"
-#  define __NR_seccomp 0xffff
-# endif
-#endif
-
-#ifndef SECCOMP_SET_MODE_STRICT
-#define SECCOMP_SET_MODE_STRICT 0
-#endif
-
-#ifndef SECCOMP_SET_MODE_FILTER
-#define SECCOMP_SET_MODE_FILTER 1
-#endif
-
-#ifndef SECCOMP_FILTER_FLAG_TSYNC
-#define SECCOMP_FILTER_FLAG_TSYNC 1
-#endif
-
-#ifndef seccomp
-int seccomp(unsigned int op, unsigned int flags, void *args)
-{
-       errno = 0;
-       return syscall(__NR_seccomp, op, flags, args);
-}
-#endif
-
 TEST(seccomp_syscall)
 {
        struct sock_filter filter[] = {
@@ -1783,6 +2053,67 @@ TEST(seccomp_syscall_mode_lock)
        }
 }
 
+/*
+ * Test detection of known and unknown filter flags. Userspace needs to be able
+ * to check if a filter flag is supported by the current kernel and a good way
+ * of doing that is by attempting to enter filter mode, with the flag bit in
+ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
+ * that the flag is valid and EINVAL indicates that the flag is invalid.
+ */
+TEST(detect_seccomp_filter_flags)
+{
+       unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
+                                SECCOMP_FILTER_FLAG_LOG };
+       unsigned int flag, all_flags;
+       int i;
+       long ret;
+
+       /* Test detection of known-good filter flags */
+       for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
+               flag = flags[i];
+               ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+               ASSERT_NE(ENOSYS, errno) {
+                       TH_LOG("Kernel does not support seccomp syscall!");
+               }
+               EXPECT_EQ(-1, ret);
+               EXPECT_EQ(EFAULT, errno) {
+                       TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
+                              flag);
+               }
+
+               all_flags |= flag;
+       }
+
+       /* Test detection of all known-good filter flags */
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
+       EXPECT_EQ(-1, ret);
+       EXPECT_EQ(EFAULT, errno) {
+               TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
+                      all_flags);
+       }
+
+       /* Test detection of an unknown filter flag */
+       flag = -1;
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+       EXPECT_EQ(-1, ret);
+       EXPECT_EQ(EINVAL, errno) {
+               TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
+                      flag);
+       }
+
+       /*
+        * Test detection of an unknown filter flag that may simply need to be
+        * added to this test
+        */
+       flag = flags[ARRAY_SIZE(flags) - 1] << 1;
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+       EXPECT_EQ(-1, ret);
+       EXPECT_EQ(EINVAL, errno) {
+               TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
+                      flag);
+       }
+}
+
 TEST(TSYNC_first)
 {
        struct sock_filter filter[] = {
@@ -2421,6 +2752,99 @@ TEST(syscall_restart)
                _metadata->passed = 0;
 }
 
+TEST_SIGNAL(filter_flag_log, SIGSYS)
+{
+       struct sock_filter allow_filter[] = {
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+       };
+       struct sock_filter kill_filter[] = {
+               BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
+                       offsetof(struct seccomp_data, nr)),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+       };
+       struct sock_fprog allow_prog = {
+               .len = (unsigned short)ARRAY_SIZE(allow_filter),
+               .filter = allow_filter,
+       };
+       struct sock_fprog kill_prog = {
+               .len = (unsigned short)ARRAY_SIZE(kill_filter),
+               .filter = kill_filter,
+       };
+       long ret;
+       pid_t parent = getppid();
+
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       /* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */
+       ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG,
+                     &allow_prog);
+       ASSERT_NE(ENOSYS, errno) {
+               TH_LOG("Kernel does not support seccomp syscall!");
+       }
+       EXPECT_NE(0, ret) {
+               TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!");
+       }
+       EXPECT_EQ(EINVAL, errno) {
+               TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!");
+       }
+
+       /* Verify that a simple, permissive filter can be added with no flags */
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog);
+       EXPECT_EQ(0, ret);
+
+       /* See if the same filter can be added with the FILTER_FLAG_LOG flag */
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
+                     &allow_prog);
+       ASSERT_NE(EINVAL, errno) {
+               TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!");
+       }
+       EXPECT_EQ(0, ret);
+
+       /* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */
+       ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
+                     &kill_prog);
+       EXPECT_EQ(0, ret);
+
+       EXPECT_EQ(parent, syscall(__NR_getppid));
+       /* getpid() should never return. */
+       EXPECT_EQ(0, syscall(__NR_getpid));
+}
+
+TEST(get_action_avail)
+{
+       __u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP,
+                           SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE,
+                           SECCOMP_RET_LOG,   SECCOMP_RET_ALLOW };
+       __u32 unknown_action = 0x10000000U;
+       int i;
+       long ret;
+
+       ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]);
+       ASSERT_NE(ENOSYS, errno) {
+               TH_LOG("Kernel does not support seccomp syscall!");
+       }
+       ASSERT_NE(EINVAL, errno) {
+               TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!");
+       }
+       EXPECT_EQ(ret, 0);
+
+       for (i = 0; i < ARRAY_SIZE(actions); i++) {
+               ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]);
+               EXPECT_EQ(ret, 0) {
+                       TH_LOG("Expected action (0x%X) not available!",
+                              actions[i]);
+               }
+       }
+
+       /* Check that an unknown action is handled properly (EOPNOTSUPP) */
+       ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action);
+       EXPECT_EQ(ret, -1);
+       EXPECT_EQ(errno, EOPNOTSUPP);
+}
+
 /*
  * TODO:
  * - add microbenchmarks
@@ -2429,6 +2853,8 @@ TEST(syscall_restart)
  * - endianness checking when appropriate
  * - 64-bit arg prodding
  * - arch value testing (x86 modes especially)
+ * - verify that FILTER_FLAG_LOG filters generate log messages
+ * - verify that RET_LOG generates log messages
  * - ...
  */
 
index 7d406c3973ba4944a4baee8363f59c736a98b52e..97bb150837df02422fc8a005d683cebb6191878f 100644 (file)
@@ -39,7 +39,11 @@ void my_usr1(int sig, siginfo_t *si, void *u)
        stack_t stk;
        struct stk_data *p;
 
+#if __s390x__
+       register unsigned long sp asm("%15");
+#else
        register unsigned long sp asm("sp");
+#endif
 
        if (sp < (unsigned long)sstack ||
                        sp >= (unsigned long)sstack + SIGSTKSZ) {
index 4981c6b6d050e95b77fa1540640a4b71770179f2..8e04d0afcbd7f9f820af5c550a0fa4741fc62141 100644 (file)
@@ -2,12 +2,16 @@ CFLAGS += -O2 -g -std=gnu89 -pthread -Wall -Wextra
 CFLAGS += -I../../../../usr/include/
 LDFLAGS += -pthread
 
-TEST_PROGS = sync_test
-
-all: $(TEST_PROGS)
+.PHONY: all clean
 
 include ../lib.mk
 
+# lib.mk TEST_CUSTOM_PROGS var is for custom tests that need special
+# build rules. lib.mk will run and install them.
+
+TEST_CUSTOM_PROGS := $(OUTPUT)/sync_test
+all: $(TEST_CUSTOM_PROGS)
+
 OBJS = sync_test.o sync.o
 
 TESTS += sync_alloc.o
@@ -18,6 +22,16 @@ TESTS += sync_stress_parallelism.o
 TESTS += sync_stress_consumer.o
 TESTS += sync_stress_merge.o
 
-sync_test: $(OBJS) $(TESTS)
+OBJS := $(patsubst %,$(OUTPUT)/%,$(OBJS))
+TESTS := $(patsubst %,$(OUTPUT)/%,$(TESTS))
+
+$(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS)
+       $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS)
+
+$(OBJS): $(OUTPUT)/%.o: %.c
+       $(CC) -c $^ -o $@
+
+$(TESTS): $(OUTPUT)/%.o: %.c
+       $(CC) -c $^ -o $@
 
-EXTRA_CLEAN := sync_test $(OBJS) $(TESTS)
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS)
index c727b96a59b0d009ed93246d0f624ff1bdef045c..5fa02d86b35f6c7b69ca28e8dfc27acdabd1e470 100644 (file)
         "teardown": [
             "$TC qdisc del dev $DEV1 ingress"
         ]
+    },
+    {
+        "id": "d052",
+        "name": "Add 1M filters with the same action",
+        "category": [
+            "filter",
+            "flower"
+        ],
+        "setup": [
+            "$TC qdisc add dev $DEV2 ingress",
+            "./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
+        ],
+        "cmdUnderTest": "$TC -b $BATCH_FILE",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action gact",
+        "matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV2 ingress",
+            "/bin/rm $BATCH_FILE"
+        ]
     }
-]
\ No newline at end of file
+]
index cd61b7844c0d48505385103b7fd1a9ded7843677..5f11f5d7456e7c1488a2dabe38a81a5a5f949eea 100755 (executable)
@@ -88,7 +88,7 @@ def prepare_env(cmdlist):
             exit(1)
 
 
-def test_runner(filtered_tests):
+def test_runner(filtered_tests, args):
     """
     Driver function for the unit tests.
 
@@ -105,6 +105,8 @@ def test_runner(filtered_tests):
     for tidx in testlist:
         result = True
         tresult = ""
+        if "flower" in tidx["category"] and args.device == None:
+            continue
         print("Test " + tidx["id"] + ": " + tidx["name"])
         prepare_env(tidx["setup"])
         (p, procout) = exec_cmd(tidx["cmdUnderTest"])
@@ -152,6 +154,10 @@ def ns_create():
         exec_cmd(cmd, False)
         cmd = 'ip -s $NS link set $DEV1 up'
         exec_cmd(cmd, False)
+        cmd = 'ip link set $DEV2 netns $NS'
+        exec_cmd(cmd, False)
+        cmd = 'ip -s $NS link set $DEV2 up'
+        exec_cmd(cmd, False)
 
 
 def ns_destroy():
@@ -211,7 +217,8 @@ def set_args(parser):
                         help='Execute the single test case with specified ID')
     parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
                         help='Generate ID numbers for new test cases')
-    return parser
+    parser.add_argument('-d', '--device',
+                        help='Execute the test case in flower category')
     return parser
 
 
@@ -225,6 +232,8 @@ def check_default_settings(args):
 
     if args.path != None:
          NAMES['TC'] = args.path
+    if args.device != None:
+         NAMES['DEV2'] = args.device
     if not os.path.isfile(NAMES['TC']):
         print("The specified tc path " + NAMES['TC'] + " does not exist.")
         exit(1)
@@ -381,14 +390,17 @@ def set_operation_mode(args):
             if (len(alltests) == 0):
                 print("Cannot find a test case with ID matching " + target_id)
                 exit(1)
-        catresults = test_runner(alltests)
+        catresults = test_runner(alltests, args)
         print("All test results: " + "\n\n" + catresults)
     elif (len(target_category) > 0):
+        if (target_category == "flower") and args.device == None:
+            print("Please specify a NIC device (-d) to run category flower")
+            exit(1)
         if (target_category not in ucat):
             print("Specified category is not present in this file.")
             exit(1)
         else:
-            catresults = test_runner(testcases[target_category])
+            catresults = test_runner(testcases[target_category], args)
             print("Category " + target_category + "\n\n" + catresults)
 
     ns_destroy()
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
new file mode 100755 (executable)
index 0000000..707c6bf
--- /dev/null
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+
+"""
+tdc_batch.py - a script to generate TC batch file
+
+Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
+"""
+
+import argparse
+
+parser = argparse.ArgumentParser(description='TC batch file generator')
+parser.add_argument("device", help="device name")
+parser.add_argument("file", help="batch file name")
+parser.add_argument("-n", "--number", type=int,
+                    help="how many lines in batch file")
+parser.add_argument("-o", "--skip_sw",
+                    help="skip_sw (offload), by default skip_hw",
+                    action="store_true")
+parser.add_argument("-s", "--share_action",
+                    help="all filters share the same action",
+                    action="store_true")
+parser.add_argument("-p", "--prio",
+                    help="all filters have different prio",
+                    action="store_true")
+args = parser.parse_args()
+
+device = args.device
+file = open(args.file, 'w')
+
+number = 1
+if args.number:
+    number = args.number
+
+skip = "skip_hw"
+if args.skip_sw:
+    skip = "skip_sw"
+
+share_action = ""
+if args.share_action:
+    share_action = "index 1"
+
+prio = "prio 1"
+if args.prio:
+    prio = ""
+    if number > 0x4000:
+        number = 0x4000
+
+index = 0
+for i in range(0x100):
+    for j in range(0x100):
+        for k in range(0x100):
+            mac = ("%02x:%02x:%02x" % (i, j, k))
+            src_mac = "e4:11:00:" + mac
+            dst_mac = "e4:12:00:" + mac
+            cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s "
+                   "src_mac %s dst_mac %s action drop %s" %
+                   (device, prio, skip, src_mac, dst_mac, share_action))
+            file.write("%s\n" % cmd)
+            index += 1
+            if index >= number:
+                file.close()
+                exit(0)
index 01087375a7c38eec335ab518c8dd285c7c0858a1..b6352515c1b5778a6bcd7d40f672be3bb2fbfb11 100644 (file)
@@ -12,6 +12,8 @@ NAMES = {
           # Name of veth devices to be created for the namespace
           'DEV0': 'v0p0',
           'DEV1': 'v0p1',
+          'DEV2': '',
+          'BATCH_FILE': './batch.txt',
           # Name of the namespace to use
           'NS': 'tcut'
         }
index 9c92b7bd56410a38a0da0370bbee202f22d0778c..50da45437daab0b5785a4925ac793644cb9d02fd 100644 (file)
@@ -143,7 +143,8 @@ int setup_timer(int clock_id, int flags, int interval, timer_t *tm1)
                        printf("%-22s %s missing CAP_WAKE_ALARM?    : [UNSUPPORTED]\n",
                                        clockstring(clock_id),
                                        flags ? "ABSTIME":"RELTIME");
-                       return 0;
+                       /* Indicate timer isn't set, so caller doesn't wait */
+                       return 1;
                }
                printf("%s - timer_create() failed\n", clockstring(clock_id));
                return -1;
@@ -213,8 +214,9 @@ int do_timer(int clock_id, int flags)
        int err;
 
        err = setup_timer(clock_id, flags, interval, &tm1);
+       /* Unsupported case - return 0 to not fail the test */
        if (err)
-               return err;
+               return err == 1 ? 0 : err;
 
        while (alarmcount < 5)
                sleep(1);
@@ -228,18 +230,17 @@ int do_timer_oneshot(int clock_id, int flags)
        timer_t tm1;
        const int interval = 0;
        struct timeval timeout;
-       fd_set fds;
        int err;
 
        err = setup_timer(clock_id, flags, interval, &tm1);
+       /* Unsupported case - return 0 to not fail the test */
        if (err)
-               return err;
+               return err == 1 ? 0 : err;
 
        memset(&timeout, 0, sizeof(timeout));
        timeout.tv_sec = 5;
-       FD_ZERO(&fds);
        do {
-               err = select(FD_SETSIZE, &fds, NULL, NULL, &timeout);
+               err = select(0, NULL, NULL, NULL, &timeout);
        } while (err == -1 && errno == EINTR);
 
        timer_delete(tm1);
index a2c53a3d223d336e29a2e54f2e108a1c51810dae..de2f9ec8a87fb342a7a595a13b009358d9eae000 100644 (file)
@@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
        }
 }
 
-static int copy_page(int ufd, unsigned long offset)
+static int __copy_page(int ufd, unsigned long offset, bool retry)
 {
        struct uffdio_copy uffdio_copy;
 
@@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset)
                fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
                        uffdio_copy.copy), exit(1);
        } else {
-               if (test_uffdio_copy_eexist) {
+               if (test_uffdio_copy_eexist && retry) {
                        test_uffdio_copy_eexist = false;
                        retry_copy_page(ufd, &uffdio_copy, offset);
                }
@@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset)
        return 0;
 }
 
+static int copy_page_retry(int ufd, unsigned long offset)
+{
+       return __copy_page(ufd, offset, true);
+}
+
+static int copy_page(int ufd, unsigned long offset)
+{
+       return __copy_page(ufd, offset, false);
+}
+
 static void *uffd_poll_thread(void *arg)
 {
        unsigned long cpu = (unsigned long) arg;
@@ -544,7 +554,7 @@ static void *background_thread(void *arg)
        for (page_nr = cpu * nr_pages_per_cpu;
             page_nr < (cpu+1) * nr_pages_per_cpu;
             page_nr++)
-               copy_page(uffd, page_nr * page_size);
+               copy_page_retry(uffd, page_nr * page_size);
 
        return NULL;
 }
@@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd,
        }
 }
 
-static int uffdio_zeropage(int ufd, unsigned long offset)
+static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
 {
        struct uffdio_zeropage uffdio_zeropage;
        int ret;
@@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
                        fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n",
                                uffdio_zeropage.zeropage), exit(1);
                } else {
-                       if (test_uffdio_zeropage_eexist) {
+                       if (test_uffdio_zeropage_eexist && retry) {
                                test_uffdio_zeropage_eexist = false;
                                retry_uffdio_zeropage(ufd, &uffdio_zeropage,
                                                      offset);
@@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
        return 0;
 }
 
+static int uffdio_zeropage(int ufd, unsigned long offset)
+{
+       return __uffdio_zeropage(ufd, offset, false);
+}
+
 /* exercise UFFDIO_ZEROPAGE */
 static int userfaultfd_zeropage_test(void)
 {
index f863c664e3d143be1dd439a6098d5e389a49e762..ee068511fd0bc68ba8cb8b54bbe8a5c9371c322e 100644 (file)
@@ -1,8 +1,3 @@
-TEST_PROGS := watchdog-test
-
-all: $(TEST_PROGS)
+TEST_GEN_PROGS := watchdog-test
 
 include ../lib.mk
-
-clean:
-       rm -fr $(TEST_PROGS)
index 97f187e2663f3adaf37a50674c36aafec11d291f..0a74a20ca32bae76629e956a862eb051ebcd0a32 100644 (file)
@@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
 BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
 BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
 
-CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
+CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
 
 UNAME_M := $(shell uname -m)
 CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
index c608ab495282ddb63ace657ce0a6deeea18a2240..f2ac53ab82438f0b473ecd8ed91b1e2548af7ca2 100644 (file)
@@ -565,8 +565,6 @@ kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
        if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
                return -EINVAL;
-       if (args->gsi >= KVM_MAX_IRQ_ROUTES)
-               return -EINVAL;
 
        if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
                return kvm_irqfd_deassign(kvm, args);