Merge branch 'for-4.15/use-timer-setup' into for-linus
authorJiri Kosina <jkosina@suse.cz>
Wed, 15 Nov 2017 10:13:23 +0000 (11:13 +0100)
committerJiri Kosina <jkosina@suse.cz>
Wed, 15 Nov 2017 10:13:23 +0000 (11:13 +0100)
- usbhid: conversion to timer_setup() and from_timer() from Kees Cook

Signed-off-by: Jiri Kosina <jkosina@suse.cz>
250 files changed:
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
Documentation/filesystems/overlayfs.txt
Documentation/i2c/busses/i2c-i801
Documentation/networking/bonding.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/arcregs.h
arch/arc/kernel/setup.c
arch/arc/plat-axs10x/axs10x.c
arch/arc/plat-hsdk/Kconfig
arch/arc/plat-hsdk/platform.c
arch/arm64/include/asm/memory.h
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/fpsimd.c
arch/arm64/mm/fault.c
arch/parisc/kernel/process.c
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/mce_power.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/watchdog.c
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/book3s_xive.h
arch/powerpc/mm/pgtable_32.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/sysdev/xive/common.c
arch/powerpc/sysdev/xive/spapr.c
arch/sparc/Kconfig
arch/x86/events/intel/core.c
arch/x86/include/asm/kvm_para.h
arch/x86/kernel/kvm.c
arch/x86/kvm/Kconfig
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
block/bio.c
block/blk-mq-debugfs.c
block/blk-throttle.c
block/bsg-lib.c
drivers/acpi/arm64/iort.c
drivers/block/Kconfig
drivers/block/nbd.c
drivers/clk/clk-bulk.c
drivers/clk/rockchip/clk-rk3128.c
drivers/clk/samsung/clk-exynos4.c
drivers/gpio/Kconfig
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dpio_phy.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/hid/Kconfig
drivers/hid/hid-alps.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-elecom.c
drivers/hid/hid-hyperv.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lg.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-rmi.c
drivers/hid/hid-sony.c
drivers/hid/hid-tmff.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.h
drivers/hwmon/xgene-hwmon.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-sprd.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/ide/ide-probe.c
drivers/ide/ide-scan-pci.c
drivers/ide/setup-pci.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_p.h
drivers/infiniband/hw/i40iw/i40iw_puda.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/md/bcache/closure.c
drivers/md/dm-core.h
drivers/md/dm-crypt.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid.c
drivers/md/dm.c
drivers/misc/cxl/cxllib.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/core/queue.c
drivers/mmc/core/queue.h
drivers/mmc/host/cavium.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sdhci-xenon.c
drivers/mmc/host/sdhci-xenon.h
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/cdc_ether.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/pinctrl/Kconfig
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/remoteproc/Kconfig
drivers/remoteproc/imx_rproc.c
drivers/rpmsg/qcom_glink_native.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
fs/9p/vfs_addr.c
fs/btrfs/ctree.h
fs/btrfs/extent_io.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/direct-io.c
fs/f2fs/f2fs.h
fs/f2fs/segment.c
fs/f2fs/super.c
fs/namespace.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/nfs4idmap.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfsd/nfs4proc.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_reflink.c
include/linux/bpf.h
include/linux/hid.h
include/linux/mmc/host.h
include/linux/netfilter_bridge/ebtables.h
include/linux/nmi.h
include/linux/smpboot.h
include/scsi/scsi_device.h
include/scsi/scsi_devinfo.h
include/scsi/scsi_transport_iscsi.h
include/sound/hda_verbs.h
include/sound/seq_virmidi.h
include/uapi/linux/dm-ioctl.h
include/uapi/linux/netfilter/xt_bpf.h
kernel/bpf/inode.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/exit.c
kernel/power/suspend.c
kernel/seccomp.c
kernel/smpboot.c
kernel/sysctl.c
kernel/watchdog.c
kernel/watchdog_hld.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/ebtables.c
net/ipv4/gre_offload.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/ip6_offload.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/route.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_tables_api.c
net/netfilter/x_tables.c
net/netfilter/xt_bpf.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/sunrpc/xprtsock.c
net/tipc/bcast.c
net/tipc/msg.c
net/wireless/nl80211.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
sound/core/compress_offload.c
sound/core/pcm_compat.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_ports.c
sound/core/seq/seq_virmidi.c
sound/pci/asihpi/hpioctl.c
sound/pci/echoaudio/echoaudio.c
sound/pci/hda/patch_hdmi.c
sound/usb/caiaq/device.c
sound/usb/card.c
sound/usb/line6/driver.c
sound/usb/line6/podhd.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/quirks.c
sound/usb/usx2y/usb_stream.c
tools/testing/selftests/mqueue/Makefile
tools/testing/selftests/networking/timestamping/rxtimestamp.c
tools/testing/selftests/x86/Makefile

index 4a0a7469fdd7bbcd93e26ea5ae9c11e4285f7c7f..32df07e29f6860d4c1de9e3c3c63d2b4d0fc454f 100644 (file)
@@ -344,3 +344,4 @@ Version History
        (wrong raid10_copies/raid10_format sequence)
 1.11.1  Add raid4/5/6 journal write-back support via journal_mode option
 1.12.1  fix for MD deadlock between mddev_suspend() and md_write_start() available
+1.13.0  Fix dev_health status at end of "recover" (was 'a', now 'A')
index b878a1e305af6e143a646b3473be522c82a62e8a..ed1456f5c94dda21ecaf2eebd86bcd7934d11376 100644 (file)
@@ -16,11 +16,13 @@ Required Properties:
 
 - clocks:
   Array of clocks required for SDHC.
-  Require at least input clock for Xenon IP core.
+  Require at least input clock for Xenon IP core. For Armada AP806 and
+  CP110, the AXI clock is also mandatory.
 
 - clock-names:
   Array of names corresponding to clocks property.
   The input clock for Xenon IP core should be named as "core".
+  The input clock for the AXI bus must be named as "axi".
 
 - reg:
   * For "marvell,armada-3700-sdhci", two register areas.
@@ -106,8 +108,8 @@ Example:
                compatible = "marvell,armada-ap806-sdhci";
                reg = <0xaa0000 0x1000>;
                interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
-               clocks = <&emmc_clk>;
-               clock-names = "core";
+               clocks = <&emmc_clk>,<&axi_clk>;
+               clock-names = "core", "axi";
                bus-width = <4>;
                marvell,xenon-phy-slow-mode;
                marvell,xenon-tun-count = <11>;
@@ -126,8 +128,8 @@ Example:
                interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
                vqmmc-supply = <&sd_vqmmc_regulator>;
                vmmc-supply = <&sd_vmmc_regulator>;
-               clocks = <&sdclk>;
-               clock-names = "core";
+               clocks = <&sdclk>, <&axi_clk>;
+               clock-names = "core", "axi";
                bus-width = <4>;
                marvell,xenon-tun-count = <9>;
        };
index 36f528a7fdd64d18c7ee7b3a50849b45bc046ef3..8caa60734647f70a777b8a568ba9f2dd99182fb8 100644 (file)
@@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is
 beneath or above the path of another overlay lower layer path.
 
 Using an upper layer path and/or a workdir path that are already used by
-another overlay mount is not allowed and will fail with EBUSY.  Using
+another overlay mount is not allowed and may fail with EBUSY.  Using
 partially overlapping paths is not allowed but will not fail with EBUSY.
+If files are accessed from two overlayfs mounts which share or overlap the
+upper layer and/or workdir path the behavior of the overlay is undefined,
+though it will not result in a crash or deadlock.
 
 Mounting an overlay using an upper layer path, where the upper layer path
 was previously used by another mounted overlay in combination with a
index 0500193434cb2957eec34124d621ccc176eac555..d477024569269ff741720537ef609cafb60166f2 100644 (file)
@@ -36,6 +36,7 @@ Supported adapters:
   * Intel Gemini Lake (SOC)
   * Intel Cannon Lake-H (PCH)
   * Intel Cannon Lake-LP (PCH)
+  * Intel Cedar Fork (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index 57f52cdce32e42c9d170e8e475a02e8a08e1f454..9ba04c0bab8db6e1a74947770a028ebca43e1651 100644 (file)
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
        and packet type ID), so in a "gatewayed" configuration, all
        outgoing traffic will generally use the same device.  Incoming
        traffic may also end up on a single device, but that is
-       dependent upon the balancing policy of the peer's 8023.ad
+       dependent upon the balancing policy of the peer's 802.3ad
        implementation.  In a "local" configuration, traffic will be
        distributed across the devices in the bond.
 
index cc42c838ab4fce9499edfa2ddeef32ff60195448..ef23cf5f74c25c568194e613433994aa888a640e 100644 (file)
@@ -5259,7 +5259,8 @@ S:        Maintained
 F:     drivers/iommu/exynos-iommu.c
 
 EZchip NPS platform support
-M:     Noam Camus <noamc@ezchip.com>
+M:     Elad Kanfi <eladkan@mellanox.com>
+M:     Vineet Gupta <vgupta@synopsys.com>
 S:     Supported
 F:     arch/arc/plat-eznps
 F:     arch/arc/boot/dts/eznps.dts
@@ -5345,9 +5346,7 @@ M:        "J. Bruce Fields" <bfields@fieldses.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     include/linux/fcntl.h
-F:     include/linux/fs.h
 F:     include/uapi/linux/fcntl.h
-F:     include/uapi/linux/fs.h
 F:     fs/fcntl.c
 F:     fs/locks.c
 
@@ -5356,6 +5355,8 @@ M:        Alexander Viro <viro@zeniv.linux.org.uk>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     fs/*
+F:     include/linux/fs.h
+F:     include/uapi/linux/fs.h
 
 FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:     Riku Voipio <riku.voipio@iki.fi>
@@ -9360,7 +9361,7 @@ NETWORK BLOCK DEVICE (NBD)
 M:     Josef Bacik <jbacik@fb.com>
 S:     Maintained
 L:     linux-block@vger.kernel.org
-L:     nbd-general@lists.sourceforge.net
+L:     nbd@other.debian.org
 F:     Documentation/blockdev/nbd.txt
 F:     drivers/block/nbd.c
 F:     include/uapi/linux/nbd.h
index cf007a31d575d6312f3f5a860e9591dc329a86a4..2835863bdd5a44d75e4b48820caedcfb5ee31ef2 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 1aafb4efbb51dfea8c5639f2bd68379c930825d4..d789a89cb32c450fa436bac9e4b99171e3c9cc5c 100644 (file)
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
          and non-text memory will be made non-executable. This provides
          protection against certain security exploits (e.g. writing to text)
 
-config ARCH_WANT_RELAX_ORDER
-       bool
-
 config ARCH_HAS_REFCOUNT
        bool
        help
index a598641eed98ef4d60dc65a25a4279a70be7e134..c84e67fdea095cbed225edac052f4ce07007abb9 100644 (file)
@@ -24,7 +24,7 @@ config ARC
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_FUTEX_CMPXCHG
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_IOREMAP_PROT
        select HAVE_KPROBES
        select HAVE_KRETPROBES
index 3a4b52b7e09d61456daf12335e7e5b7c75ec7077..d37f49d6a27f40f65d3e34bd3e2df5343a97d1e4 100644 (file)
@@ -6,8 +6,6 @@
 # published by the Free Software Foundation.
 #
 
-UTS_MACHINE := arc
-
 ifeq ($(CROSS_COMPILE),)
 ifndef CONFIG_CPU_BIG_ENDIAN
 CROSS_COMPILE := arc-linux-
index 2367a67c5f10bd3e99ce757dafed25dfa95ecb35..e114000a84f56c9e07ddd3a2e623c4dfeb3df6a2 100644 (file)
 
                        mmcclk: mmcclk {
                                compatible = "fixed-clock";
-                               clock-frequency = <50000000>;
+                               /*
+                                * DW sdio controller has external ciu clock divider
+                                * controlled via register in SDIO IP. It divides
+                                * sdio_ref_clk (which comes from CGU) by 16 for
+                                * default. So default mmcclk clock (which comes
+                                * to sdk_in) is 25000000 Hz.
+                                */
+                               clock-frequency = <25000000>;
                                #clock-cells = <0>;
                        };
 
index 229d13adbce426c1690684f7e8dcb344a001429d..8adde1b492f14e833279ff75f448b7c0bb350781 100644 (file)
@@ -12,6 +12,7 @@
 /dts-v1/;
 
 #include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/reset/snps,hsdk-reset.h>
 
 / {
        model = "snps,hsdk";
                };
        };
 
-       core_clk: core-clk {
+       input_clk: input-clk {
                #clock-cells = <0>;
                compatible = "fixed-clock";
-               clock-frequency = <500000000>;
+               clock-frequency = <33333333>;
        };
 
        cpu_intc: cpu-interrupt-controller {
 
                ranges = <0x00000000 0xf0000000 0x10000000>;
 
+               cgu_rst: reset-controller@8a0 {
+                       compatible = "snps,hsdk-reset";
+                       #reset-cells = <1>;
+                       reg = <0x8A0 0x4>, <0xFF0 0x4>;
+               };
+
+               core_clk: core-clk@0 {
+                       compatible = "snps,hsdk-core-pll-clock";
+                       reg = <0x00 0x10>, <0x14B8 0x4>;
+                       #clock-cells = <0>;
+                       clocks = <&input_clk>;
+               };
+
                serial: serial@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
 
                mmcclk_ciu: mmcclk-ciu {
                        compatible = "fixed-clock";
-                       clock-frequency = <100000000>;
+                       /*
+                        * DW sdio controller has external ciu clock divider
+                        * controlled via register in SDIO IP. Due to its
+                        * unexpected default value (it should devide by 1
+                        * but it devides by 8) SDIO IP uses wrong clock and
+                        * works unstable (see STAR 9001204800)
+                        * So add temporary fix and change clock frequency
+                        * from 100000000 to 12500000 Hz until we fix dw sdio
+                        * driver itself.
+                        */
+                       clock-frequency = <12500000>;
                        #clock-cells = <0>;
                };
 
                        clocks = <&gmacclk>;
                        clock-names = "stmmaceth";
                        phy-handle = <&phy0>;
+                       resets = <&cgu_rst HSDK_ETH_RESET>;
+                       reset-names = "stmmaceth";
 
                        mdio {
                                #address-cells = <1>;
index 6980b966a36403b6170130fa1389ea9fb66095b8..ec7c849a5c8e9887fbd60acf176f194cc0c4ba03 100644 (file)
@@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 2233f5777a71f82cb5defb9ae99bbfc11772fdac..63d3cf69e0b02efe45fca035649951836fc1e948 100644 (file)
@@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 30a3d4cf53d20af6f202a61e3f5a980e7f1e7c2e..f613ecac14a750e6008dc817d3a06727a4631989 100644 (file)
@@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 821a2e562f3f12422844556630d7136ad63097a8..3507be2af6fe3684f4acca6de7009903da82b97e 100644 (file)
@@ -84,5 +84,5 @@ CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 # CONFIG_DEBUG_PREEMPT is not set
index 9a3fcf44638820d905895b2bf0ba5a6f18511f4c..15f0f6b5fec1ae6f8e0c1bfb4fa6c970915e1713 100644 (file)
@@ -63,6 +63,7 @@ CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
 # CONFIG_IOMMU_SUPPORT is not set
+CONFIG_RESET_HSDK=y
 CONFIG_EXT3_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
@@ -72,7 +73,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index c0d6a010751a9f80e4139fa6a9b833de4b24b6a8..4fcf4f2503f61a9e802b82e2cbb4aad9617725d8 100644 (file)
@@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_SHIRQ=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index 5c0971787acf4c18066c4bc537f36c1dae37232f..7b71464f6c2f2904e18893d8ba80c65471fc4ef5 100644 (file)
@@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_SHIRQ=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
index ba8e802dba80bd0776508ff70945e6e8b54ea43e..b1c56d35f2a938e59c9677454499aeecb1da4f7e 100644 (file)
@@ -98,6 +98,7 @@
 
 /* Auxiliary registers */
 #define AUX_IDENTITY           4
+#define AUX_EXEC_CTRL          8
 #define AUX_INTR_VEC_BASE      0x25
 #define AUX_VOL                        0x5e
 
@@ -135,12 +136,12 @@ struct bcr_identity {
 #endif
 };
 
-struct bcr_isa {
+struct bcr_isa_arcv2 {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
-                    pad1:11, atomic1:1, ver:8;
+                    pad1:12, ver:8;
 #else
-       unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1,
+       unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1,
                     ldd:1, pad2:4, div_rem:4;
 #endif
 };
@@ -263,13 +264,13 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_mmu mmu;
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
-       struct bcr_isa isa;
+       struct bcr_isa_arcv2 isa;
        const char *details, *name;
        unsigned int vec_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
-                            fpu_sp:1, fpu_dp:1, pad2:6,
+                            fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
                             debug:1, ap:1, smart:1, rtt:1, pad3:4,
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
index 877cec8f5ea21256e233f4f353882ff02097537a..fb83844daeea3550aacd27de2525711a6aa5fddc 100644 (file)
@@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
        { 0x51, "R2.0" },
        { 0x52, "R2.1" },
        { 0x53, "R3.0" },
+       { 0x54, "R4.0" },
 #endif
        { 0x00, NULL   }
 };
@@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = {
 #else
        { 0x40, "ARC EM"  },
        { 0x50, "ARC HS38"  },
+       { 0x54, "ARC HS48"  },
 #endif
        { 0x00, "Unknown"   }
 };
@@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void)
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        const struct id_to_str *tbl;
+       struct bcr_isa_arcv2 isa;
 
        FIX_PTR(cpu);
 
        READ_BCR(AUX_IDENTITY, cpu->core);
-       READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 
        for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
                if (cpu->core.family == tbl->id) {
@@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void)
        }
 
        for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
-               if ((cpu->core.family & 0xF0) == tbl->id)
+               if ((cpu->core.family & 0xF4) == tbl->id)
                        break;
        }
        cpu->name = tbl->str;
@@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void)
                cpu->bpu.full = bpu.ft;
                cpu->bpu.num_cache = 256 << bpu.bce;
                cpu->bpu.num_pred = 2048 << bpu.pte;
+
+               if (cpu->core.family >= 0x54) {
+                       unsigned int exec_ctrl;
+
+                       READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+                       cpu->extn.dual_iss_exist = 1;
+                       cpu->extn.dual_iss_enb = exec_ctrl & 1;
+               }
        }
 
        READ_BCR(ARC_REG_AP_BCR, bcr);
@@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void)
 
        cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
 
+       READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
+
        /* some hacks for lack of feature BCR info in old ARC700 cores */
        if (is_isa_arcompact()) {
-               if (!cpu->isa.ver)      /* ISA BCR absent, use Kconfig info */
+               if (!isa.ver)   /* ISA BCR absent, use Kconfig info */
                        cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
-               else
-                       cpu->isa.atomic = cpu->isa.atomic1;
+               else {
+                       /* ARC700_BUILD only has 2 bits of isa info */
+                       struct bcr_generic bcr = *(struct bcr_generic *)&isa;
+                       cpu->isa.atomic = bcr.info & 1;
+               }
 
                cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
 
                 /* there's no direct way to distinguish 750 vs. 770 */
                if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
                        cpu->name = "ARC750";
+       } else {
+               cpu->isa = isa;
        }
 }
 
@@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
                       core->family, core->cpu_id, core->chip_id);
 
-       n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
+       n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
                       cpu_id, cpu->name, cpu->details,
                       is_isa_arcompact() ? "ARCompact" : "ARCv2",
-                      IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
+                      IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
+                      IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
 
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
                       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
index f1ac6790da5fe64782b59b720bf3ea80d999bff1..cf14ebc36916a2a0eca39728c0cc0f315d58bbeb 100644 (file)
@@ -111,6 +111,13 @@ static void __init axs10x_early_init(void)
 
        axs10x_enable_gpio_intc_wire();
 
+       /*
+        * Reset ethernet IP core.
+        * TODO: get rid of this quirk after axs10x reset driver (or simple
+        * reset driver) will be available in upstream.
+        */
+       iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
+
        scnprintf(mb, 32, "MainBoard v%d", mb_rev);
        axs10x_print_board_ver(CREG_MB_VER, mb);
 }
index 5a6ed5afb009a160e617a5e2fd81f1be6f56fcd3..bd08de4be75e7edc4ce8875c8af7f1723f0f5b01 100644 (file)
@@ -6,4 +6,5 @@
 #
 
 menuconfig ARC_SOC_HSDK
-       bool "ARC HS Development Kit SOC"
+       bool "ARC HS Development Kit SOC"
+       select CLK_HSDK
index a2e7fd17e36d36a4e0f29c183834ae349e5a8786..744e62e5878898b21f49c58c588ddb167523cc60 100644 (file)
@@ -38,6 +38,42 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
 #define CREG_PAE               (CREG_BASE + 0x180)
 #define CREG_PAE_UPDATE                (CREG_BASE + 0x194)
 
+#define CREG_CORE_IF_CLK_DIV   (CREG_BASE + 0x4B8)
+#define CREG_CORE_IF_CLK_DIV_2 0x1
+#define CGU_BASE               ARC_PERIPHERAL_BASE
+#define CGU_PLL_STATUS         (ARC_PERIPHERAL_BASE + 0x4)
+#define CGU_PLL_CTRL           (ARC_PERIPHERAL_BASE + 0x0)
+#define CGU_PLL_STATUS_LOCK    BIT(0)
+#define CGU_PLL_STATUS_ERR     BIT(1)
+#define CGU_PLL_CTRL_1GHZ      0x3A10
+#define HSDK_PLL_LOCK_TIMEOUT  500
+
+#define HSDK_PLL_LOCKED() \
+       !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
+
+#define HSDK_PLL_ERR() \
+       !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
+
+static void __init hsdk_set_cpu_freq_1ghz(void)
+{
+       u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
+
+       /*
+        * As we set cpu clock which exceeds 500MHz, the divider for the interface
+        * clock must be programmed to div-by-2.
+        */
+       iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
+
+       /* Set cpu clock to 1GHz */
+       iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
+
+       while (!HSDK_PLL_LOCKED() && timeout--)
+               cpu_relax();
+
+       if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
+               pr_err("Failed to setup CPU frequency to 1GHz!");
+}
+
 static void __init hsdk_init_early(void)
 {
        /*
@@ -52,6 +88,12 @@ static void __init hsdk_init_early(void)
 
        /* Really apply settings made above */
        writel(1, (void __iomem *) CREG_PAE_UPDATE);
+
+       /*
+        * Setup CPU frequency to 1GHz.
+        * TODO: remove it after smart hsdk pll driver will be introduced.
+        */
+       hsdk_set_cpu_freq_1ghz();
 }
 
 static const char *hsdk_compat[] __initconst = {
index 3585a5e2615105162f82510f64917b094974a2a4..f7c4d2146aed09b715a59506f7e01ab0d167f3de 100644 (file)
 #define KERNEL_END        _end
 
 /*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
+ * KASAN requires 1/8th of the kernel virtual address space for the shadow
+ * region. KASAN can bloat the stack significantly, so double the (minimum)
+ * stack size when KASAN is in use.
  */
 #ifdef CONFIG_KASAN
 #define KASAN_SHADOW_SIZE      (UL(1) << (VA_BITS - 3))
+#define KASAN_THREAD_SHIFT     1
 #else
 #define KASAN_SHADOW_SIZE      (0)
+#define KASAN_THREAD_SHIFT     0
 #endif
 
-#define MIN_THREAD_SHIFT       14
+#define MIN_THREAD_SHIFT       (14 + KASAN_THREAD_SHIFT)
 
 /*
  * VMAP'd stacks are allocated at page granularity, so we must ensure that such
index f0e6d717885b1fcf3b22f64c10c38f19c25f809d..d06fbe4cd38d7423c900aff64b0e728f995478d3 100644 (file)
@@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void)
        return 0;
 }
 
-late_initcall(armv8_deprecated_init);
+core_initcall(armv8_deprecated_init);
index cd52d365d1f01aefae2f63e6a0207e65e0fdcb57..21e2c95d24e72a8e339406af601f02690675d3d8 100644 (file)
@@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void)
        return 0;
 }
 
-late_initcall(enable_mrs_emulation);
+core_initcall(enable_mrs_emulation);
index f444f374bd7b3ccea1b097cc13d9229db5c8f2c3..5d547deb6996c0091c64f14de18b5b8a75c88ae4 100644 (file)
@@ -444,4 +444,4 @@ static int __init fpsimd_init(void)
 
        return 0;
 }
-late_initcall(fpsimd_init);
+core_initcall(fpsimd_init);
index 2069e9bc0fca674ba1d6fe769be05e6f731ef93d..b64958b23a7fa193c23dd1a9039f69f4e9f0b863 100644 (file)
@@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr)
                         (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
                         (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
        } else {
-               pr_alert("  ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK);
+               pr_alert("  ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
        }
 
        pr_alert("  CM = %lu, WnR = %lu\n",
index a45a67d526f8ca8001fd1d06625b3b233d5a3835..30f92391a93ef6d5a90970b81921a2133d5e2eb0 100644 (file)
@@ -146,7 +146,7 @@ void machine_power_off(void)
 
        /* prevent soft lockup/stalled CPU messages for endless loop. */
        rcu_sysrq_start();
-       lockup_detector_suspend();
+       lockup_detector_soft_poweroff();
        for (;;);
 }
 
index 1df770e8cbe03194f31576cff264fdedbb1aa517..7275fed271afa601711b2f43b0da2c2377c68eff 100644 (file)
@@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void)
        case PVR_POWER8:
        case PVR_POWER8E:
        case PVR_POWER8NVL:
-               __flush_tlb_power8(POWER8_TLB_SETS);
+               __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
                break;
        case PVR_POWER9:
-               __flush_tlb_power9(POWER9_TLB_SETS_HASH);
+               __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
                break;
        default:
                pr_err("unknown CPU version for boot TLB flush\n");
index 48da0f5d2f7fe0a4745bce864795df16d612a89e..b82586c535604158986bc1d3c1abb481ff736dd6 100644 (file)
@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
 EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
 TRAMP_KVM(PACA_EXGEN, 0x700)
 EXC_COMMON_BEGIN(program_check_common)
-       EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+       /*
+        * It's possible to receive a TM Bad Thing type program check with
+        * userspace register values (in particular r1), but with SRR1 reporting
+        * that we came from the kernel. Normally that would confuse the bad
+        * stack logic, and we would report a bad kernel stack pointer. Instead
+        * we switch to the emergency stack if we're taking a TM Bad Thing from
+        * the kernel.
+        */
+       li      r10,MSR_PR              /* Build a mask of MSR_PR ..    */
+       oris    r10,r10,0x200000@h      /* .. and SRR1_PROGTM           */
+       and     r10,r10,r12             /* Mask SRR1 with that.         */
+       srdi    r10,r10,8               /* Shift it so we can compare   */
+       cmpldi  r10,(0x200000 >> 8)     /* .. with an immediate.        */
+       bne 1f                          /* If != go to normal path.     */
+
+       /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack  */
+       andi.   r10,r12,MSR_PR;         /* Set CR0 correctly for label  */
+                                       /* 3 in EXCEPTION_PROLOG_COMMON */
+       mr      r10,r1                  /* Save r1                      */
+       ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
+       subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
+       b 3f                            /* Jump into the macro !!       */
+1:     EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
index b76ca198e09c186e2d90e9b3eb83c7765c6c1cdf..72f153c6f3facea2bda06b64e90b24a6fed253d5 100644 (file)
@@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
 
 long __machine_check_early_realmode_p9(struct pt_regs *regs)
 {
+       /*
+        * On POWER9 DD2.1 and below, it's possible to get a machine check
+        * caused by a paste instruction where only DSISR bit 25 is set. This
+        * will result in the MCE handler seeing an unknown event and the kernel
+        * crashing. An MCE that occurs like this is spurious, so we don't need
+        * to do anything in terms of servicing it. If there is something that
+        * needs to be serviced, the CPU will raise the MCE again with the
+        * correct DSISR so that it can be serviced properly. So detect this
+        * case and mark it as handled.
+        */
+       if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
+               return 1;
+
        return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
 }
index 0ac741fae90ea5c1cffc227323fe40cbb5319188..2e3bc16d02b289b5d03906010518bc7005233452 100644 (file)
@@ -904,9 +904,6 @@ void __init setup_arch(char **cmdline_p)
 #endif
 #endif
 
-#ifdef CONFIG_PPC_64K_PAGES
-       init_mm.context.pte_frag = NULL;
-#endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
        mm_iommu_init(&init_mm);
 #endif
index c83c115858c1909218c5897a23f15f0bbd7b10ed..b2c002993d78d340db6ef882d5eeec6692577475 100644 (file)
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
        if (MSR_TM_RESV(msr))
                return -EINVAL;
 
-       /* pull in MSR TM from user context */
+       /* pull in MSR TS bits from user context */
        regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
 
+       /*
+        * Ensure that TM is enabled in regs->msr before we leave the signal
+        * handler. It could be the case that (a) user disabled the TM bit
+        * through the manipulation of the MSR bits in uc_mcontext or (b) the
+        * TM bit was disabled because a sufficient number of context switches
+        * happened whilst in the signal handler and load_tm overflowed,
+        * disabling the TM bit. In either case we can end up with an illegal
+        * TM state leading to a TM Bad Thing when we return to userspace.
+        */
+       regs->msr |= MSR_TM;
+
        /* pull in MSR LE from user context */
        regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
index 2f6eadd9408d9490b171ca20f937c1af79096acd..c702a898145250aa4e4da3a88bd32a960ad4fb78 100644 (file)
@@ -310,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu)
        if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
                return 0;
 
-       if (watchdog_suspended)
-               return 0;
-
        if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
                return 0;
 
@@ -358,36 +355,39 @@ static void watchdog_calc_timeouts(void)
        wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
 }
 
-void watchdog_nmi_reconfigure(void)
+void watchdog_nmi_stop(void)
 {
        int cpu;
 
-       watchdog_calc_timeouts();
-
        for_each_cpu(cpu, &wd_cpus_enabled)
                stop_wd_on_cpu(cpu);
+}
 
+void watchdog_nmi_start(void)
+{
+       int cpu;
+
+       watchdog_calc_timeouts();
        for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
                start_wd_on_cpu(cpu);
 }
 
 /*
- * This runs after lockup_detector_init() which sets up watchdog_cpumask.
+ * Invoked from core watchdog init.
  */
-static int __init powerpc_watchdog_init(void)
+int __init watchdog_nmi_probe(void)
 {
        int err;
 
-       watchdog_calc_timeouts();
-
-       err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
-                               start_wd_on_cpu, stop_wd_on_cpu);
-       if (err < 0)
+       err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                       "powerpc/watchdog:online",
+                                       start_wd_on_cpu, stop_wd_on_cpu);
+       if (err < 0) {
                pr_warn("Watchdog could not be initialized");
-
+               return err;
+       }
        return 0;
 }
-arch_initcall(powerpc_watchdog_init);
 
 static void handle_backtrace_ipi(struct pt_regs *regs)
 {
index 13304622ab1c78682fa18f06120f5eb0970f57fb..bf457843e03217b9aa02815d7791f0fce72aea2b 100644 (file)
@@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
                return -EINVAL;
        state = &sb->irq_state[idx];
        arch_spin_lock(&sb->lock);
-       *server = state->guest_server;
+       *server = state->act_server;
        *priority = state->guest_priority;
        arch_spin_unlock(&sb->lock);
 
@@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
        xive->saved_src_count++;
 
        /* Convert saved state into something compatible with xics */
-       val = state->guest_server;
+       val = state->act_server;
        prio = state->saved_scan_prio;
 
        if (prio == MASKED) {
@@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
        /* First convert prio and mark interrupt as untargetted */
        act_prio = xive_prio_from_guest(guest_prio);
        state->act_priority = MASKED;
-       state->guest_server = server;
 
        /*
         * We need to drop the lock due to the mutex below. Hopefully
index 5938f7644dc17587dc811132459cf8b71425856a..6ba63f8e8a614ed4aad2a5890236834f3e08c0a7 100644 (file)
@@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state {
        struct xive_irq_data *pt_data;  /* XIVE Pass-through associated data */
 
        /* Targetting as set by guest */
-       u32 guest_server;               /* Current guest selected target */
        u8 guest_priority;              /* Guest set priority */
        u8 saved_priority;              /* Saved priority when masking */
 
index 65eda1997c3f8750c899294bc1d9bb15215d9426..f6c7f54c05157e226c5426798b6fa0f379f8becb 100644 (file)
@@ -361,9 +361,9 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
                        break;
        }
        wmb();
+       local_irq_restore(flags);
        flush_tlb_kernel_range((unsigned long)page_address(start),
                               (unsigned long)page_address(page));
-       local_irq_restore(flags);
        return err;
 }
 
index 897aa1400eb833e944fabbc65840904006267a1f..bbb73aa0eb8f041630110c795828215471cc5f2d 100644 (file)
@@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 static unsigned long pnv_memory_block_size(void)
 {
-       return 256UL * 1024 * 1024;
+       /*
+        * We map the kernel linear region with 1GB large pages on radix. For
+        * memory hot unplug to work our memory block size must be at least
+        * this size.
+        */
+       if (radix_enabled())
+               return 1UL * 1024 * 1024 * 1024;
+       else
+               return 256UL * 1024 * 1024;
 }
 #endif
 
index f387318678b97abdfa8e57fe46eade5a707e566d..a3b8d7d1316eb1863f19ffa19ff34f0c761ada33 100644 (file)
@@ -1402,6 +1402,14 @@ void xive_teardown_cpu(void)
 
        if (xive_ops->teardown_cpu)
                xive_ops->teardown_cpu(cpu, xc);
+
+#ifdef CONFIG_SMP
+       /* Get rid of IPI */
+       xive_cleanup_cpu_ipi(cpu, xc);
+#endif
+
+       /* Disable and free the queues */
+       xive_cleanup_cpu_queues(cpu, xc);
 }
 
 void xive_kexec_teardown_cpu(int secondary)
index f24a70bc6855d54cf86a50d17fefebc9777e2ed0..d9c4c93660491849029044d37ab8e60160b0d7de 100644 (file)
@@ -431,7 +431,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
 
 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
 {
+       if (!xc->hw_ipi)
+               return;
+
        xive_irq_bitmap_free(xc->hw_ipi);
+       xc->hw_ipi = 0;
 }
 #endif /* CONFIG_SMP */
 
index 0be3828752e5bc9ddf33ba74c9e5b9efd03691e4..4e83f950713e9f9837da01406586613a9c07cb3f 100644 (file)
@@ -44,7 +44,6 @@ config SPARC
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
        select LOCKDEP_SMALL if LOCKDEP
-       select ARCH_WANT_RELAX_ORDER
 
 config SPARC32
        def_bool !64BIT
index 829e89cfcee2deff8b937806b16dd573d736a895..9fb9a1f1e47bd0d0db3f9be9d4722f956d48dcfe 100644 (file)
@@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void)
                return 0;
        }
 
-       if (lockup_detector_suspend() != 0) {
-               pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n");
-               return 0;
-       }
+       cpus_read_lock();
+
+       hardlockup_detector_perf_stop();
 
        x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
 
@@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void)
        x86_pmu.commit_scheduling = NULL;
        x86_pmu.stop_scheduling = NULL;
 
-       lockup_detector_resume();
-
-       cpus_read_lock();
+       hardlockup_detector_perf_restart();
 
        for_each_online_cpu(c)
                free_excl_cntrs(c);
index bc62e7cbf1b1f883fc9acb0a145305384a3bf062..59ad3d132353280cae5a1ba4f3a8ae0efe996331 100644 (file)
@@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
 bool kvm_para_available(void);
 unsigned int kvm_arch_para_features(void);
 void __init kvm_guest_init(void);
-void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
 void kvm_async_pf_task_wake(u32 token);
 u32 kvm_read_and_reset_pf_reason(void);
 extern void kvm_disable_steal_time(void);
@@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void)
 
 #else /* CONFIG_KVM_GUEST */
 #define kvm_guest_init() do {} while (0)
-#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wait(T, I) do {} while(0)
 #define kvm_async_pf_task_wake(T) do {} while(0)
 
 static inline bool kvm_para_available(void)
index e675704fa6f7d89ed919631284156717e342979f..8bb9594d076166ee0f6bd4f70350fe3ecf8c3d8b 100644 (file)
@@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
        return NULL;
 }
 
-void kvm_async_pf_task_wait(u32 token)
+/*
+ * @interrupt_kernel: Is this called from a routine which interrupts the kernel
+ *                   (other than user space)?
+ */
+void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
 {
        u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
        struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
@@ -140,8 +144,10 @@ void kvm_async_pf_task_wait(u32 token)
 
        n.token = token;
        n.cpu = smp_processor_id();
-       n.halted = is_idle_task(current) || preempt_count() > 1 ||
-                  rcu_preempt_depth();
+       n.halted = is_idle_task(current) ||
+                  (IS_ENABLED(CONFIG_PREEMPT_COUNT)
+                   ? preempt_count() > 1 || rcu_preempt_depth()
+                   : interrupt_kernel);
        init_swait_queue_head(&n.wq);
        hlist_add_head(&n.link, &b->list);
        raw_spin_unlock(&b->lock);
@@ -269,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                /* page is swapped out by the host. */
                prev_state = exception_enter();
-               kvm_async_pf_task_wait((u32)read_cr2());
+               kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
                exception_exit(prev_state);
                break;
        case KVM_PV_REASON_PAGE_READY:
index 3ea624452f9327d252dda724100bd6e97f8c6e0c..3c48bc8bf08c7bc6afe896a2dae21d4f22d8f58e 100644 (file)
@@ -23,6 +23,7 @@ config KVM
        depends on HIGH_RES_TIMERS
        # for TASKSTATS/TASK_DELAY_ACCT:
        depends on NET && MULTIUSER
+       depends on X86_LOCAL_APIC
        select PREEMPT_NOTIFIERS
        select MMU_NOTIFIER
        select ANON_INODES
index a36254cbf7765aabf3e77c45e7cc82c3e3ff0fbb..d90cdc77e077354f1407235e6b73f2fda21c430c 100644 (file)
@@ -425,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
        #op " %al \n\t" \
        FOP_RET
 
-asm(".global kvm_fastop_exception \n"
-    "kvm_fastop_exception: xor %esi, %esi; ret");
+asm(".pushsection .fixup, \"ax\"\n"
+    ".global kvm_fastop_exception \n"
+    "kvm_fastop_exception: xor %esi, %esi; ret\n"
+    ".popsection");
 
 FOP_START(setcc)
 FOP_SETCC(seto)
index eca30c1eb1d97cd367c6b2b76396991b79328b1a..106d4a029a8a933ad3865a11a5e2c1a11810e714 100644 (file)
@@ -3837,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                vcpu->arch.apf.host_apf_reason = 0;
                local_irq_disable();
-               kvm_async_pf_task_wait(fault_address);
+               kvm_async_pf_task_wait(fault_address, 0);
                local_irq_enable();
                break;
        case KVM_PV_REASON_PAGE_READY:
index b38e962fa83e774c5c9121bbbe277124d13dd793..101c2a9b548150cd3f7bb5b28a460ef82c9e4a75 100644 (file)
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
         */
        bmd->is_our_pages = map_data ? 0 : 1;
        memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
-       iov_iter_init(&bmd->iter, iter->type, bmd->iov,
-                       iter->nr_segs, iter->count);
+       bmd->iter = *iter;
+       bmd->iter.iov = bmd->iov;
 
        ret = -ENOMEM;
        bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
        int ret, offset;
        struct iov_iter i;
        struct iovec iov;
+       struct bio_vec *bvec;
 
        iov_for_each(iov, i, *iter) {
                unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                ret = get_user_pages_fast(uaddr, local_nr_pages,
                                (iter->type & WRITE) != WRITE,
                                &pages[cur_page]);
-               if (ret < local_nr_pages) {
+               if (unlikely(ret < local_nr_pages)) {
+                       for (j = cur_page; j < page_limit; j++) {
+                               if (!pages[j])
+                                       break;
+                               put_page(pages[j]);
+                       }
                        ret = -EFAULT;
                        goto out_unmap;
                }
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                offset = offset_in_page(uaddr);
                for (j = cur_page; j < page_limit; j++) {
                        unsigned int bytes = PAGE_SIZE - offset;
+                       unsigned short prev_bi_vcnt = bio->bi_vcnt;
 
                        if (len <= 0)
                                break;
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                                            bytes)
                                break;
 
+                       /*
+                        * check if vector was merged with previous
+                        * drop page reference if needed
+                        */
+                       if (bio->bi_vcnt == prev_bi_vcnt)
+                               put_page(pages[j]);
+
                        len -= bytes;
                        offset = 0;
                }
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
        return bio;
 
  out_unmap:
-       for (j = 0; j < nr_pages; j++) {
-               if (!pages[j])
-                       break;
-               put_page(pages[j]);
+       bio_for_each_segment_all(bvec, bio, j) {
+               put_page(bvec->bv_page);
        }
  out:
        kfree(pages);
index 980e7309564332ee1c23e0d96b13996f27c0aeca..de294d775acfa413854c109eeaa08b9b6bdfd354 100644 (file)
@@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q)
                goto err;
 
        /*
-        * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
+        * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
         * didn't exist yet (because we don't know what to name the directory
         * until the queue is registered to a gendisk).
         */
+       if (q->elevator && !q->sched_debugfs_dir)
+               blk_mq_debugfs_register_sched(q);
+
+       /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
                        goto err;
index 0fea76aa0f3ff0093c4867f10139d331589df53a..17816a028dcbb6fb7f7fe4e068fb90f313448ebf 100644 (file)
@@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td)
 
                tg->disptime = jiffies - 1;
                throtl_select_dispatch(sq);
-               throtl_schedule_next_dispatch(sq, false);
+               throtl_schedule_next_dispatch(sq, true);
        }
        rcu_read_unlock();
        throtl_select_dispatch(&td->service_queue);
-       throtl_schedule_next_dispatch(&td->service_queue, false);
+       throtl_schedule_next_dispatch(&td->service_queue, true);
        queue_work(kthrotld_workqueue, &td->dispatch_work);
 }
 
index dbddff8174e57a92d09f5bdc9cc83ee6f7217eb9..15d25ccd51a5cad7c9505427888f88c15a9e5d99 100644 (file)
@@ -207,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
        struct bsg_job *job = blk_mq_rq_to_pdu(req);
        struct scsi_request *sreq = &job->sreq;
 
+       /* called right after the request is allocated for the request_queue */
+
+       sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
+       if (!sreq->sense)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void bsg_initialize_rq(struct request *req)
+{
+       struct bsg_job *job = blk_mq_rq_to_pdu(req);
+       struct scsi_request *sreq = &job->sreq;
+       void *sense = sreq->sense;
+
+       /* called right before the request is given to the request_queue user */
+
        memset(job, 0, sizeof(*job));
 
        scsi_req_init(sreq);
+
+       sreq->sense = sense;
        sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
-       sreq->sense = kzalloc(sreq->sense_len, gfp);
-       if (!sreq->sense)
-               return -ENOMEM;
 
        job->req = req;
-       job->reply = sreq->sense;
+       job->reply = sense;
        job->reply_len = sreq->sense_len;
        job->dd_data = job + 1;
-
-       return 0;
 }
 
 static void bsg_exit_rq(struct request_queue *q, struct request *req)
@@ -251,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
        q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
        q->init_rq_fn = bsg_init_rq;
        q->exit_rq_fn = bsg_exit_rq;
+       q->initialize_rq_fn = bsg_initialize_rq;
        q->request_fn = bsg_request_fn;
 
        ret = blk_init_allocated_queue(q);
index 9565d572f8dd2f3d65c0e16b8257f61100bbad50..de56394dd161f7813b7f5bce64321d3196bc4904 100644 (file)
@@ -1178,12 +1178,44 @@ dev_put:
        return ret;
 }
 
+static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
+{
+       if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+               struct acpi_iort_node *parent;
+               struct acpi_iort_id_mapping *map;
+               int i;
+
+               map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
+                                  iort_node->mapping_offset);
+
+               for (i = 0; i < iort_node->mapping_count; i++, map++) {
+                       if (!map->output_reference)
+                               continue;
+
+                       parent = ACPI_ADD_PTR(struct acpi_iort_node,
+                                       iort_table,  map->output_reference);
+                       /*
+                        * If we detect a RC->SMMU mapping, make sure
+                        * we enable ACS on the system.
+                        */
+                       if ((parent->type == ACPI_IORT_NODE_SMMU) ||
+                               (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
+                               pci_request_acs();
+                               return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
 static void __init iort_init_platform_devices(void)
 {
        struct acpi_iort_node *iort_node, *iort_end;
        struct acpi_table_iort *iort;
        struct fwnode_handle *fwnode;
        int i, ret;
+       bool acs_enabled = false;
 
        /*
         * iort_table and iort both point to the start of IORT table, but
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
                        return;
                }
 
+               if (!acs_enabled)
+                       acs_enabled = iort_enable_acs(iort_node);
+
                if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
                        (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
 
index 4a438b8abe27ad9c1774c89f608bd47fecc295a3..2dfe99b328f88e35d812b34bd4a48e3392fe5819 100644 (file)
@@ -17,7 +17,7 @@ if BLK_DEV
 
 config BLK_DEV_NULL_BLK
        tristate "Null test block driver"
-       depends on CONFIGFS_FS
+       select CONFIGFS_FS
 
 config BLK_DEV_FD
        tristate "Normal floppy disk support"
index 3684e21d543f23e95bda09fb895e8933b9bce186..883dfebd3014b506a0861aed0640dc313fd8b0cc 100644 (file)
@@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         * appropriate.
         */
        ret = nbd_handle_cmd(cmd, hctx->queue_num);
+       if (ret < 0)
+               ret = BLK_STS_IOERR;
+       else if (!ret)
+               ret = BLK_STS_OK;
        complete(&cmd->send_complete);
 
-       return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
+       return ret;
 }
 
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
index c834f5abfc497e8ce674de2a736b9c3103bf55fa..4c10456f8a32923c0dade56265e4eeef714df641 100644 (file)
@@ -105,6 +105,7 @@ err:
 
        return  ret;
 }
+EXPORT_SYMBOL_GPL(clk_bulk_prepare);
 
 #endif /* CONFIG_HAVE_CLK_PREPARE */
 
index 62d7854e4b873fe04c38dbeaaaf4e02f5031dc45..5970a50671b9a6638d637f8a017e1cc0cf063e97 100644 (file)
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(10), 8, GFLAGS),
 
        GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(10), 0, GFLAGS),
        GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(10), 1, GFLAGS),
        GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(10), 2, GFLAGS),
        GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
-                       RK2928_CLKGATE_CON(10), 8, GFLAGS),
+                       RK2928_CLKGATE_CON(2), 15, GFLAGS),
 
        COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
                        RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
        GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
 
-       GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+       GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
        GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
 
        /* PD_MMC */
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
        "aclk_peri",
        "hclk_peri",
        "pclk_peri",
+       "pclk_pmu",
+       "sclk_timer5",
 };
 
 static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
index e40b77583c476f4cb413b34a7c6f2a734895efec..d8d3cb67b4029ac58c905fcae097a2c6957a9f22 100644 (file)
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
 #define PLL_ENABLED    (1 << 31)
 #define PLL_LOCKED     (1 << 29)
 
+static void exynos4_clk_enable_pll(u32 reg)
+{
+       u32 pll_con = readl(reg_base + reg);
+       pll_con |= PLL_ENABLED;
+       writel(pll_con, reg_base + reg);
+
+       while (!(pll_con & PLL_LOCKED)) {
+               cpu_relax();
+               pll_con = readl(reg_base + reg);
+       }
+}
+
 static void exynos4_clk_wait_for_pll(u32 reg)
 {
        u32 pll_con;
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
        samsung_clk_save(reg_base, exynos4_save_pll,
                                ARRAY_SIZE(exynos4_clk_pll_regs));
 
+       exynos4_clk_enable_pll(EPLL_CON0);
+       exynos4_clk_enable_pll(VPLL_CON0);
+
        if (exynos4_soc == EXYNOS4210) {
                samsung_clk_save(reg_base, exynos4_save_soc,
                                        ARRAY_SIZE(exynos4210_clk_save));
index 3388d54ba11468a0ab013f1160b1ea2a258ca354..3f80f167ed56d917405aaad1dd5e09598c7fc9e6 100644 (file)
@@ -453,7 +453,8 @@ config GPIO_TS4800
 config GPIO_THUNDERX
        tristate "Cavium ThunderX/OCTEON-TX GPIO"
        depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
-       depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+       depends on PCI_MSI
+       select IRQ_DOMAIN_HIERARCHY
        select IRQ_FASTEOI_HIERARCHY_HANDLERS
        help
          Say yes here to support the on-chip GPIO lines on the ThunderX
index dbf869fb63ced2cb2884be234c5275f2893237b2..3233b72b682809e197ed2528f2c9bc634400d878 100644 (file)
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
                irq_set_handler_locked(d, handle_level_irq);
        else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-               irq_set_handler_locked(d, handle_edge_irq);
+               /*
+                * Edge IRQs are already cleared/acked in irq_handler and
+                * not need to be masked, as result handle_edge_irq()
+                * logic is excessed here and may cause lose of interrupts.
+                * So just use handle_simple_irq.
+                */
+               irq_set_handler_locked(d, handle_simple_irq);
 
        return 0;
 
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
 {
        void __iomem *isr_reg = NULL;
-       u32 isr;
+       u32 enabled, isr, level_mask;
        unsigned int bit;
        struct gpio_bank *bank = gpiobank;
        unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
        pm_runtime_get_sync(bank->chip.parent);
 
        while (1) {
-               u32 isr_saved, level_mask = 0;
-               u32 enabled;
-
                raw_spin_lock_irqsave(&bank->lock, lock_flags);
 
                enabled = omap_get_gpio_irqbank_mask(bank);
-               isr_saved = isr = readl_relaxed(isr_reg) & enabled;
+               isr = readl_relaxed(isr_reg) & enabled;
 
                if (bank->level_mask)
                        level_mask = bank->level_mask & enabled;
+               else
+                       level_mask = 0;
 
                /* clear edge sensitive interrupts before handler(s) are
                called so that we don't miss any interrupt occurred while
                executing them */
-               omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
-               omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
-               omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+               if (isr & ~level_mask)
+                       omap_clear_gpio_irqbank(bank, isr & ~level_mask);
 
                raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
 
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 
 /*---------------------------------------------------------------------*/
 
-static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+static void omap_gpio_show_rev(struct gpio_bank *bank)
 {
        static bool called;
        u32 rev;
index 4d2113530735185e5dcdc87553b2764df8121242..eb4528c87c0b3977420a2108c7feaaf9b2a95869 100644 (file)
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        if (pin <= 255) {
                char ev_name[5];
-               sprintf(ev_name, "_%c%02X",
+               sprintf(ev_name, "_%c%02hhX",
                        agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
                        pin);
                if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
index d805b6e6fe71467ed4f9a85a74fdba1b5ed81998..27743be5b768e13c4be9749537b1af76dfb3f478 100644 (file)
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
                         connector->encoder->base.id,
                         connector->encoder->name);
 
-       /* ELD Conn_Type */
-       connector->eld[5] &= ~(3 << 2);
-       if (intel_crtc_has_dp_encoder(crtc_state))
-               connector->eld[5] |= (1 << 2);
-
        connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
 
        if (dev_priv->display.audio_codec_enable)
index 183e87e8ea319ee7b4c8cb67dadbe6aaf159f106..00c6aee0a9a1902978516cd08514fa9330b424c8 100644 (file)
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
        is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
 
+       if (port == PORT_A && is_dvi) {
+               DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+                             is_hdmi ? "/HDMI" : "");
+               is_dvi = false;
+               is_hdmi = false;
+       }
+
        info->supports_dvi = is_dvi;
        info->supports_hdmi = is_hdmi;
        info->supports_dp = is_dp;
index 965988f79a558aaa933a546fb69b72f0ef81cc29..92c1f8e166dc55381ab77bb92b680909131ffc4f 100644 (file)
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 
        mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
 
-       if (IS_BROXTON(dev_priv))
+       if (IS_GEN9_LP(dev_priv))
                mask |= DC_STATE_DEBUG_MASK_CORES;
 
        /* The below bit doesn't need to be cleared ever afterwards */
index 4b4fd1f8110b2f886d5c1299367dbea74eef77cb..476681d5940c7d381c2d48f2c4a1202054e79845 100644 (file)
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 out:
        if (ret && IS_GEN9_LP(dev_priv)) {
                tmp = I915_READ(BXT_PHY_CTL(port));
-               if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+               if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                           BXT_PHY_LANE_POWERDOWN_ACK |
                            BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
                        DRM_ERROR("Port %c enabled but PHY powered down? "
                                  "(PHY_CTL %08x)\n", port_name(port), tmp);
index 00cd17c76fdcc3bc5427f53bf45e416d66400655..64f7b51ed97c18ee036c6315bff13324e975f62e 100644 (file)
@@ -12359,7 +12359,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        struct drm_crtc *crtc;
        struct intel_crtc_state *intel_cstate;
-       bool hw_check = intel_state->modeset;
        u64 put_domains[I915_MAX_PIPES] = {};
        unsigned crtc_vblank_mask = 0;
        int i;
@@ -12376,7 +12375,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
                if (needs_modeset(new_crtc_state) ||
                    to_intel_crtc_state(new_crtc_state)->update_pipe) {
-                       hw_check = true;
 
                        put_domains[to_intel_crtc(crtc)->pipe] =
                                modeset_get_crtc_power_domains(crtc,
index 09b6709297867d6751957dc12541812fe3f30344..de38d014ed39b937abc348988830a2f188815901 100644 (file)
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
        },
 };
 
-static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
-{
-       return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
-               BIT(phy_info->channel[DPIO_CH0].port);
-}
-
 static const struct bxt_ddi_phy_info *
 bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
 {
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
                            enum dpio_phy phy)
 {
        const struct bxt_ddi_phy_info *phy_info;
-       enum port port;
 
        phy_info = bxt_get_phy_info(dev_priv, phy);
 
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
                return false;
        }
 
-       for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
-               u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
-               if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
-                       DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
-                                        "for port %c powered down "
-                                        "(PHY_CTL %08x)\n",
-                                        phy, port_name(port), tmp);
-
-                       return false;
-               }
-       }
-
        return true;
 }
 
index 951e834dd2744f7f2fe84a54723136f743d185cf..28a778b785ac9e02ea7de55878e5fec98f019b98 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
+{
+       u8 conn_type;
+
+       if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+           connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+               conn_type = DRM_ELD_CONN_TYPE_DP;
+       } else {
+               conn_type = DRM_ELD_CONN_TYPE_HDMI;
+       }
+
+       connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
+       connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
+}
+
 /**
  * intel_connector_update_modes - update connector from edid
  * @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
        ret = drm_add_edid_modes(connector, edid);
        drm_edid_to_eld(connector, edid);
 
+       intel_connector_update_eld_conn_type(connector);
+
        return ret;
 }
 
index b66d8e136aa37c3b72d072312d0f06d141223237..b3a087cb0860d99f429719077ae795cf734a0b9e 100644 (file)
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
 
        /* 6. Enable DBUF */
        gen9_dbuf_enable(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
 }
 
 #undef CNL_PROCMON_IDX
index 9ea6cd5a1370d92e6eb78c864641986bffbc0086..3cf1a6932facf0b33d25cebbd288477ff6152af7 100644 (file)
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
        hdmi->mod_clk = devm_clk_get(dev, "mod");
        if (IS_ERR(hdmi->mod_clk)) {
                dev_err(dev, "Couldn't get the HDMI mod clock\n");
-               return PTR_ERR(hdmi->mod_clk);
+               ret = PTR_ERR(hdmi->mod_clk);
+               goto err_disable_bus_clk;
        }
        clk_prepare_enable(hdmi->mod_clk);
 
        hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
        if (IS_ERR(hdmi->pll0_clk)) {
                dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
-               return PTR_ERR(hdmi->pll0_clk);
+               ret = PTR_ERR(hdmi->pll0_clk);
+               goto err_disable_mod_clk;
        }
 
        hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
        if (IS_ERR(hdmi->pll1_clk)) {
                dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
-               return PTR_ERR(hdmi->pll1_clk);
+               ret = PTR_ERR(hdmi->pll1_clk);
+               goto err_disable_mod_clk;
        }
 
        ret = sun4i_tmds_create(hdmi);
        if (ret) {
                dev_err(dev, "Couldn't create the TMDS clock\n");
-               return ret;
+               goto err_disable_mod_clk;
        }
 
        writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
        ret = sun4i_hdmi_i2c_create(dev, hdmi);
        if (ret) {
                dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
-               return ret;
+               goto err_disable_mod_clk;
        }
 
        drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +425,10 @@ err_cleanup_connector:
        drm_encoder_cleanup(&hdmi->encoder);
 err_del_i2c_adapter:
        i2c_del_adapter(hdmi->i2c);
+err_disable_mod_clk:
+       clk_disable_unprepare(hdmi->mod_clk);
+err_disable_bus_clk:
+       clk_disable_unprepare(hdmi->bus_clk);
        return ret;
 }
 
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
        drm_connector_cleanup(&hdmi->connector);
        drm_encoder_cleanup(&hdmi->encoder);
        i2c_del_adapter(hdmi->i2c);
+       clk_disable_unprepare(hdmi->mod_clk);
+       clk_disable_unprepare(hdmi->bus_clk);
 }
 
 static const struct component_ops sun4i_hdmi_ops = {
index 0a3117cc29e70c54b3f9b269889d9b4ccf2fb56c..779c5ae47f36b5857dfed6a67342472ce4ddc571 100644 (file)
@@ -230,7 +230,7 @@ config HID_CMEDIA
 
 config HID_CP2112
        tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
-       depends on USB_HID && I2C && GPIOLIB
+       depends on USB_HID && HIDRAW && I2C && GPIOLIB
        select GPIOLIB_IRQCHIP
        ---help---
        Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
@@ -281,6 +281,7 @@ config HID_ELECOM
        Support for ELECOM devices:
          - BM084 Bluetooth Mouse
          - DEFT Trackball (Wired and wireless)
+         - HUGE Trackball (Wired and wireless)
 
 config HID_ELO
        tristate "ELO USB 4000/4500 touchscreen"
@@ -749,11 +750,10 @@ config HID_PRIMAX
        HID standard.
 
 config HID_RETRODE
-       tristate "Retrode"
+       tristate "Retrode 2 USB adapter for vintage video games"
        depends on USB_HID
        ---help---
        Support for
-
          * Retrode 2 cartridge and controller adapter
 
 config HID_ROCCAT
index ed9c0ea5b026efa698462bc389307bd16427f98c..b1eeb4839bfc8f4747a1610077fda3ae6c7e77a7 100644 (file)
 #define ADDRESS_U1_PAD_BTN             0x00800052
 #define ADDRESS_U1_SP_BTN              0x0080009F
 
+#define T4_INPUT_REPORT_LEN                    sizeof(struct t4_input_report)
+#define T4_FEATURE_REPORT_LEN          T4_INPUT_REPORT_LEN
+#define T4_FEATURE_REPORT_ID           7
+#define T4_CMD_REGISTER_READ                   0x08
+#define T4_CMD_REGISTER_WRITE                  0x07
+
+#define T4_ADDRESS_BASE                                0xC2C0
+#define PRM_SYS_CONFIG_1                       (T4_ADDRESS_BASE + 0x0002)
+#define T4_PRM_FEED_CONFIG_1           (T4_ADDRESS_BASE + 0x0004)
+#define T4_PRM_FEED_CONFIG_4           (T4_ADDRESS_BASE + 0x001A)
+#define T4_PRM_ID_CONFIG_3                     (T4_ADDRESS_BASE + 0x00B0)
+
+
+#define T4_FEEDCFG4_ADVANCED_ABS_ENABLE                        0x01
+#define T4_I2C_ABS     0x78
+
+#define T4_COUNT_PER_ELECTRODE         256
 #define MAX_TOUCHES    5
 
+enum dev_num {
+       U1,
+       T4,
+       UNKNOWN,
+};
 /**
  * struct u1_data
  *
  * @input2: pointer to the kernel input2 device
  * @hdev: pointer to the struct hid_device
  *
- * @dev_ctrl: device control parameter
  * @dev_type: device type
- * @sen_line_num_x: number of sensor line of X
- * @sen_line_num_y: number of sensor line of Y
- * @pitch_x: sensor pitch of X
- * @pitch_y: sensor pitch of Y
- * @resolution: resolution
- * @btn_info: button information
+ * @max_fingers: total number of fingers
+ * @has_sp: boolean of sp existense
+ * @sp_btn_info: button information
  * @x_active_len_mm: active area length of X (mm)
  * @y_active_len_mm: active area length of Y (mm)
  * @x_max: maximum x coordinate value
  * @y_max: maximum y coordinate value
+ * @x_min: minimum x coordinate value
+ * @y_min: minimum y coordinate value
  * @btn_cnt: number of buttons
  * @sp_btn_cnt: number of stick buttons
  */
-struct u1_dev {
+struct alps_dev {
        struct input_dev *input;
        struct input_dev *input2;
        struct hid_device *hdev;
 
-       u8      dev_ctrl;
-       u8      dev_type;
-       u8      sen_line_num_x;
-       u8      sen_line_num_y;
-       u8      pitch_x;
-       u8      pitch_y;
-       u8      resolution;
-       u8      btn_info;
+       enum dev_num dev_type;
+       u8  max_fingers;
+       u8  has_sp;
        u8      sp_btn_info;
        u32     x_active_len_mm;
        u32     y_active_len_mm;
        u32     x_max;
        u32     y_max;
+       u32     x_min;
+       u32     y_min;
        u32     btn_cnt;
        u32     sp_btn_cnt;
 };
 
+struct t4_contact_data {
+       u8  palm;
+       u8      x_lo;
+       u8      x_hi;
+       u8      y_lo;
+       u8      y_hi;
+};
+
+struct t4_input_report {
+       u8  reportID;
+       u8  numContacts;
+       struct t4_contact_data contact[5];
+       u8  button;
+       u8  track[5];
+       u8  zx[5], zy[5];
+       u8  palmTime[5];
+       u8  kilroy;
+       u16 timeStamp;
+};
+
+static u16 t4_calc_check_sum(u8 *buffer,
+               unsigned long offset, unsigned long length)
+{
+       u16 sum1 = 0xFF, sum2 = 0xFF;
+       unsigned long i = 0;
+
+       if (offset + length >= 50)
+               return 0;
+
+       while (length > 0) {
+               u32 tlen = length > 20 ? 20 : length;
+
+               length -= tlen;
+
+               do {
+                       sum1 += buffer[offset + i];
+                       sum2 += sum1;
+                       i++;
+               } while (--tlen > 0);
+
+               sum1 = (sum1 & 0xFF) + (sum1 >> 8);
+               sum2 = (sum2 & 0xFF) + (sum2 >> 8);
+       }
+
+       sum1 = (sum1 & 0xFF) + (sum1 >> 8);
+       sum2 = (sum2 & 0xFF) + (sum2 >> 8);
+
+       return(sum2 << 8 | sum1);
+}
+
+static int t4_read_write_register(struct hid_device *hdev, u32 address,
+       u8 *read_val, u8 write_val, bool read_flag)
+{
+       int ret;
+       u16 check_sum;
+       u8 *input;
+       u8 *readbuf;
+
+       input = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
+       if (!input)
+               return -ENOMEM;
+
+       input[0] = T4_FEATURE_REPORT_ID;
+       if (read_flag) {
+               input[1] = T4_CMD_REGISTER_READ;
+               input[8] = 0x00;
+       } else {
+               input[1] = T4_CMD_REGISTER_WRITE;
+               input[8] = write_val;
+       }
+       put_unaligned_le32(address, input + 2);
+       input[6] = 1;
+       input[7] = 0;
+
+       /* Calculate the checksum */
+       check_sum = t4_calc_check_sum(input, 1, 8);
+       input[9] = (u8)check_sum;
+       input[10] = (u8)(check_sum >> 8);
+       input[11] = 0;
+
+       ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, input,
+                       T4_FEATURE_REPORT_LEN,
+                       HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+       if (ret < 0) {
+               dev_err(&hdev->dev, "failed to read command (%d)\n", ret);
+               goto exit;
+       }
+
+       readbuf = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
+       if (read_flag) {
+               if (!readbuf) {
+                       ret = -ENOMEM;
+                       goto exit;
+               }
+
+               ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, readbuf,
+                               T4_FEATURE_REPORT_LEN,
+                               HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+               if (ret < 0) {
+                       dev_err(&hdev->dev, "failed read register (%d)\n", ret);
+                       goto exit_readbuf;
+               }
+
+               if (*(u32 *)&readbuf[6] != address) {
+                       dev_err(&hdev->dev, "read register address error (%x,%x)\n",
+                       *(u32 *)&readbuf[6], address);
+                       goto exit_readbuf;
+               }
+
+               if (*(u16 *)&readbuf[10] != 1) {
+                       dev_err(&hdev->dev, "read register size error (%x)\n",
+                       *(u16 *)&readbuf[10]);
+                       goto exit_readbuf;
+               }
+
+               check_sum = t4_calc_check_sum(readbuf, 6, 7);
+               if (*(u16 *)&readbuf[13] != check_sum) {
+                       dev_err(&hdev->dev, "read register checksum error (%x,%x)\n",
+                       *(u16 *)&readbuf[13], check_sum);
+                       goto exit_readbuf;
+               }
+
+               *read_val = readbuf[12];
+       }
+
+       ret = 0;
+
+exit_readbuf:
+       kfree(readbuf);
+exit:
+       kfree(input);
+       return ret;
+}
+
 static int u1_read_write_register(struct hid_device *hdev, u32 address,
        u8 *read_val, u8 write_val, bool read_flag)
 {
@@ -165,21 +317,60 @@ exit:
        return ret;
 }
 
-static int alps_raw_event(struct hid_device *hdev,
-               struct hid_report *report, u8 *data, int size)
+static int t4_raw_event(struct alps_dev *hdata, u8 *data, int size)
+{
+       unsigned int x, y, z;
+       int i;
+       struct t4_input_report *p_report = (struct t4_input_report *)data;
+
+       if (!data)
+               return 0;
+       for (i = 0; i < hdata->max_fingers; i++) {
+               x = p_report->contact[i].x_hi << 8 | p_report->contact[i].x_lo;
+               y = p_report->contact[i].y_hi << 8 | p_report->contact[i].y_lo;
+               y = hdata->y_max - y + hdata->y_min;
+               z = (p_report->contact[i].palm < 0x80 &&
+                       p_report->contact[i].palm > 0) * 62;
+               if (x == 0xffff) {
+                       x = 0;
+                       y = 0;
+                       z = 0;
+               }
+               input_mt_slot(hdata->input, i);
+
+               input_mt_report_slot_state(hdata->input,
+                       MT_TOOL_FINGER, z != 0);
+
+               if (!z)
+                       continue;
+
+               input_report_abs(hdata->input, ABS_MT_POSITION_X, x);
+               input_report_abs(hdata->input, ABS_MT_POSITION_Y, y);
+               input_report_abs(hdata->input, ABS_MT_PRESSURE, z);
+       }
+       input_mt_sync_frame(hdata->input);
+
+       input_report_key(hdata->input, BTN_LEFT, p_report->button);
+
+       input_sync(hdata->input);
+       return 1;
+}
+
+static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
 {
        unsigned int x, y, z;
        int i;
        short sp_x, sp_y;
-       struct u1_dev *hdata = hid_get_drvdata(hdev);
 
+       if (!data)
+               return 0;
        switch (data[0]) {
        case U1_MOUSE_REPORT_ID:
                break;
        case U1_FEATURE_REPORT_ID:
                break;
        case U1_ABSOLUTE_REPORT_ID:
-               for (i = 0; i < MAX_TOUCHES; i++) {
+               for (i = 0; i < hdata->max_fingers; i++) {
                        u8 *contact = &data[i * 5];
 
                        x = get_unaligned_le16(contact + 3);
@@ -241,122 +432,253 @@ static int alps_raw_event(struct hid_device *hdev,
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int alps_post_reset(struct hid_device *hdev)
+static int alps_raw_event(struct hid_device *hdev,
+               struct hid_report *report, u8 *data, int size)
 {
-       return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
-                               NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+       int ret = 0;
+       struct alps_dev *hdata = hid_get_drvdata(hdev);
+
+       switch (hdev->product) {
+       case HID_PRODUCT_ID_T4_BTNLESS:
+               ret = t4_raw_event(hdata, data, size);
+               break;
+       default:
+               ret = u1_raw_event(hdata, data, size);
+               break;
+       }
+       return ret;
 }
 
-static int alps_post_resume(struct hid_device *hdev)
+static int __maybe_unused alps_post_reset(struct hid_device *hdev)
 {
-       return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
-                               NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+       int ret = -1;
+       struct alps_dev *data = hid_get_drvdata(hdev);
+
+       switch (data->dev_type) {
+       case T4:
+               ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1,
+                       NULL, T4_I2C_ABS, false);
+               ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4,
+                       NULL, T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false);
+               break;
+       case U1:
+               ret = u1_read_write_register(hdev,
+                       ADDRESS_U1_DEV_CTRL_1, NULL,
+                       U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+               break;
+       default:
+               break;
+       }
+       return ret;
 }
-#endif /* CONFIG_PM */
 
-static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+static int __maybe_unused alps_post_resume(struct hid_device *hdev)
 {
-       struct u1_dev *data = hid_get_drvdata(hdev);
-       struct input_dev *input = hi->input, *input2;
-       struct u1_dev devInfo;
-       int ret;
-       int res_x, res_y, i;
-
-       data->input = input;
-
-       hid_dbg(hdev, "Opening low level driver\n");
-       ret = hid_hw_open(hdev);
-       if (ret)
-               return ret;
+       return alps_post_reset(hdev);
+}
 
-       /* Allow incoming hid reports */
-       hid_device_io_start(hdev);
+static int u1_init(struct hid_device *hdev, struct alps_dev *pri_data)
+{
+       int ret;
+       u8 tmp, dev_ctrl, sen_line_num_x, sen_line_num_y;
+       u8 pitch_x, pitch_y, resolution;
 
        /* Device initialization */
        ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
-                       &devInfo.dev_ctrl, 0, true);
+                       &dev_ctrl, 0, true);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret);
                goto exit;
        }
 
-       devInfo.dev_ctrl &= ~U1_DISABLE_DEV;
-       devInfo.dev_ctrl |= U1_TP_ABS_MODE;
+       dev_ctrl &= ~U1_DISABLE_DEV;
+       dev_ctrl |= U1_TP_ABS_MODE;
        ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
-                       NULL, devInfo.dev_ctrl, false);
+                       NULL, dev_ctrl, false);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret);
                goto exit;
        }
 
        ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X,
-                       &devInfo.sen_line_num_x, 0, true);
+                       &sen_line_num_x, 0, true);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret);
                goto exit;
        }
 
        ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y,
-                       &devInfo.sen_line_num_y, 0, true);
+                       &sen_line_num_y, 0, true);
                if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret);
                goto exit;
        }
 
        ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X,
-                       &devInfo.pitch_x, 0, true);
+                       &pitch_x, 0, true);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret);
                goto exit;
        }
 
        ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y,
-                       &devInfo.pitch_y, 0, true);
+                       &pitch_y, 0, true);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret);
                goto exit;
        }
 
        ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS,
-               &devInfo.resolution, 0, true);
+               &resolution, 0, true);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret);
                goto exit;
        }
+       pri_data->x_active_len_mm =
+               (pitch_x * (sen_line_num_x - 1)) / 10;
+       pri_data->y_active_len_mm =
+               (pitch_y * (sen_line_num_y - 1)) / 10;
+
+       pri_data->x_max =
+               (resolution << 2) * (sen_line_num_x - 1);
+       pri_data->x_min = 1;
+       pri_data->y_max =
+               (resolution << 2) * (sen_line_num_y - 1);
+       pri_data->y_min = 1;
 
        ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN,
-                       &devInfo.btn_info, 0, true);
+                       &tmp, 0, true);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret);
                goto exit;
        }
+       if ((tmp & 0x0F) == (tmp & 0xF0) >> 4) {
+               pri_data->btn_cnt = (tmp & 0x0F);
+       } else {
+               /* Button pad */
+               pri_data->btn_cnt = 1;
+       }
 
+       pri_data->has_sp = 0;
        /* Check StickPointer device */
        ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP,
-                       &devInfo.dev_type, 0, true);
+                       &tmp, 0, true);
        if (ret < 0) {
                dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret);
                goto exit;
        }
+       if (tmp & U1_DEVTYPE_SP_SUPPORT) {
+               dev_ctrl |= U1_SP_ABS_MODE;
+               ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+                       NULL, dev_ctrl, false);
+               if (ret < 0) {
+                       dev_err(&hdev->dev, "failed SP mode (%d)\n", ret);
+                       goto exit;
+               }
 
-       devInfo.x_active_len_mm =
-               (devInfo.pitch_x * (devInfo.sen_line_num_x - 1)) / 10;
-       devInfo.y_active_len_mm =
-               (devInfo.pitch_y * (devInfo.sen_line_num_y - 1)) / 10;
+               ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN,
+                       &pri_data->sp_btn_info, 0, true);
+               if (ret < 0) {
+                       dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret);
+                       goto exit;
+               }
+               pri_data->has_sp = 1;
+       }
+       pri_data->max_fingers = 5;
+exit:
+       return ret;
+}
 
-       devInfo.x_max =
-               (devInfo.resolution << 2) * (devInfo.sen_line_num_x - 1);
-       devInfo.y_max =
-               (devInfo.resolution << 2) * (devInfo.sen_line_num_y - 1);
+static int T4_init(struct hid_device *hdev, struct alps_dev *pri_data)
+{
+       int ret;
+       u8 tmp, sen_line_num_x, sen_line_num_y;
+
+       ret = t4_read_write_register(hdev, T4_PRM_ID_CONFIG_3, &tmp, 0, true);
+       if (ret < 0) {
+               dev_err(&hdev->dev, "failed T4_PRM_ID_CONFIG_3 (%d)\n", ret);
+               goto exit;
+       }
+       sen_line_num_x = 16 + ((tmp & 0x0F)  | (tmp & 0x08 ? 0xF0 : 0));
+       sen_line_num_y = 12 + (((tmp & 0xF0) >> 4)  | (tmp & 0x80 ? 0xF0 : 0));
+
+       pri_data->x_max = sen_line_num_x * T4_COUNT_PER_ELECTRODE;
+       pri_data->x_min = T4_COUNT_PER_ELECTRODE;
+       pri_data->y_max = sen_line_num_y * T4_COUNT_PER_ELECTRODE;
+       pri_data->y_min = T4_COUNT_PER_ELECTRODE;
+       pri_data->x_active_len_mm = pri_data->y_active_len_mm = 0;
+       pri_data->btn_cnt = 1;
+
+       ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, &tmp, 0, true);
+       if (ret < 0) {
+               dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret);
+               goto exit;
+       }
+       tmp |= 0x02;
+       ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, NULL, tmp, false);
+       if (ret < 0) {
+               dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1,
+                                       NULL, T4_I2C_ABS, false);
+       if (ret < 0) {
+               dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_1 (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4, NULL,
+                               T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false);
+       if (ret < 0) {
+               dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_4 (%d)\n", ret);
+               goto exit;
+       }
+       pri_data->max_fingers = 5;
+       pri_data->has_sp = 0;
+exit:
+       return ret;
+}
+
+static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+       struct alps_dev *data = hid_get_drvdata(hdev);
+       struct input_dev *input = hi->input, *input2;
+       int ret;
+       int res_x, res_y, i;
+
+       data->input = input;
+
+       hid_dbg(hdev, "Opening low level driver\n");
+       ret = hid_hw_open(hdev);
+       if (ret)
+               return ret;
+
+       /* Allow incoming hid reports */
+       hid_device_io_start(hdev);
+       switch (data->dev_type) {
+       case T4:
+               ret = T4_init(hdev, data);
+               break;
+       case U1:
+               ret = u1_init(hdev, data);
+               break;
+       default:
+               break;
+       }
+
+       if (ret)
+               goto exit;
 
        __set_bit(EV_ABS, input->evbit);
-       input_set_abs_params(input, ABS_MT_POSITION_X, 1, devInfo.x_max, 0, 0);
-       input_set_abs_params(input, ABS_MT_POSITION_Y, 1, devInfo.y_max, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_X,
+                                               data->x_min, data->x_max, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                                               data->y_min, data->y_max, 0, 0);
 
-       if (devInfo.x_active_len_mm && devInfo.y_active_len_mm) {
-               res_x = (devInfo.x_max - 1) / devInfo.x_active_len_mm;
-               res_y = (devInfo.y_max - 1) / devInfo.y_active_len_mm;
+       if (data->x_active_len_mm && data->y_active_len_mm) {
+               res_x = (data->x_max - 1) / data->x_active_len_mm;
+               res_y = (data->y_max - 1) / data->y_active_len_mm;
 
                input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
                input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
@@ -364,49 +686,25 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
 
        input_set_abs_params(input, ABS_MT_PRESSURE, 0, 64, 0, 0);
 
-       input_mt_init_slots(input, MAX_TOUCHES, INPUT_MT_POINTER);
+       input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER);
 
        __set_bit(EV_KEY, input->evbit);
-       if ((devInfo.btn_info & 0x0F) == (devInfo.btn_info & 0xF0) >> 4) {
-               devInfo.btn_cnt = (devInfo.btn_info & 0x0F);
-       } else {
-               /* Button pad */
-               devInfo.btn_cnt = 1;
+
+       if (data->btn_cnt == 1)
                __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
-       }
 
-       for (i = 0; i < devInfo.btn_cnt; i++)
+       for (i = 0; i < data->btn_cnt; i++)
                __set_bit(BTN_LEFT + i, input->keybit);
 
-
        /* Stick device initialization */
-       if (devInfo.dev_type & U1_DEVTYPE_SP_SUPPORT) {
-
+       if (data->has_sp) {
                input2 = input_allocate_device();
                if (!input2) {
-                       ret = -ENOMEM;
-                       goto exit;
-               }
-
-               data->input2 = input2;
-
-               devInfo.dev_ctrl |= U1_SP_ABS_MODE;
-               ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
-                       NULL, devInfo.dev_ctrl, false);
-               if (ret < 0) {
-                       dev_err(&hdev->dev, "failed SP mode (%d)\n", ret);
-                       input_free_device(input2);
-                       goto exit;
-               }
-
-               ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN,
-                       &devInfo.sp_btn_info, 0, true);
-               if (ret < 0) {
-                       dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret);
                        input_free_device(input2);
                        goto exit;
                }
 
+               data->input2 = input2;
                input2->phys = input->phys;
                input2->name = "DualPoint Stick";
                input2->id.bustype = BUS_I2C;
@@ -416,8 +714,8 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
                input2->dev.parent = input->dev.parent;
 
                __set_bit(EV_KEY, input2->evbit);
-               devInfo.sp_btn_cnt = (devInfo.sp_btn_info & 0x0F);
-               for (i = 0; i < devInfo.sp_btn_cnt; i++)
+               data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
+               for (i = 0; i < data->sp_btn_cnt; i++)
                        __set_bit(BTN_LEFT + i, input2->keybit);
 
                __set_bit(EV_REL, input2->evbit);
@@ -426,8 +724,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
                __set_bit(INPUT_PROP_POINTER, input2->propbit);
                __set_bit(INPUT_PROP_POINTING_STICK, input2->propbit);
 
-               ret = input_register_device(data->input2);
-               if (ret) {
+               if (input_register_device(data->input2)) {
                        input_free_device(input2);
                        goto exit;
                }
@@ -448,10 +745,9 @@ static int alps_input_mapping(struct hid_device *hdev,
 
 static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
-       struct u1_dev *data = NULL;
+       struct alps_dev *data = NULL;
        int ret;
-
-       data = devm_kzalloc(&hdev->dev, sizeof(struct u1_dev), GFP_KERNEL);
+       data = devm_kzalloc(&hdev->dev, sizeof(struct alps_dev), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -466,6 +762,18 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
+       switch (hdev->product) {
+       case HID_DEVICE_ID_ALPS_T4_BTNLESS:
+               data->dev_type = T4;
+               break;
+       case HID_DEVICE_ID_ALPS_U1_DUAL:
+       case HID_DEVICE_ID_ALPS_U1:
+               data->dev_type = U1;
+               break;
+       default:
+               data->dev_type = UNKNOWN;
+       }
+
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "hw start failed\n");
@@ -483,6 +791,10 @@ static void alps_remove(struct hid_device *hdev)
 static const struct hid_device_id alps_id[] = {
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
                USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+               USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+               USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, alps_id);
index 50c294be8324ab803450c00165717962e1603d13..1bb7b63b31508cf95c23e661370e32965c978c3a 100644 (file)
@@ -67,6 +67,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_USE_KBD_BACKLIGHT                BIT(5)
 #define QUIRK_T100_KEYBOARD            BIT(6)
 #define QUIRK_T100CHI                  BIT(7)
+#define QUIRK_G752_KEYBOARD            BIT(8)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -670,6 +671,11 @@ static void asus_remove(struct hid_device *hdev)
        hid_hw_stop(hdev);
 }
 
+static const __u8 asus_g752_fixed_rdesc[] = {
+        0x19, 0x00,                    /*   Usage Minimum (0x00)       */
+        0x2A, 0xFF, 0x00,              /*   Usage Maximum (0xFF)       */
+};
+
 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
@@ -708,6 +714,27 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                rdesc[391] = 0xff;
                rdesc[402] = 0x00;
        }
+       if (drvdata->quirks & QUIRK_G752_KEYBOARD &&
+                *rsize == 75 && rdesc[61] == 0x15 && rdesc[62] == 0x00) {
+               /* report is missing usage mninum and maximum */
+               __u8 *new_rdesc;
+               size_t new_size = *rsize + sizeof(asus_g752_fixed_rdesc);
+
+               new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL);
+               if (new_rdesc == NULL)
+                       return rdesc;
+
+               hid_info(hdev, "Fixing up Asus G752 keyb report descriptor\n");
+               /* copy the valid part */
+               memcpy(new_rdesc, rdesc, 61);
+               /* insert missing part */
+               memcpy(new_rdesc + 61, asus_g752_fixed_rdesc, sizeof(asus_g752_fixed_rdesc));
+               /* copy remaining data */
+               memcpy(new_rdesc + 61 + sizeof(asus_g752_fixed_rdesc), rdesc + 61, *rsize - 61);
+
+               *rsize = new_size;
+               rdesc = new_rdesc;
+       }
 
        return rdesc;
 }
@@ -718,9 +745,11 @@ static const struct hid_device_id asus_devices[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD), I2C_TOUCHPAD_QUIRKS },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
-               USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
+               USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1), QUIRK_USE_KBD_BACKLIGHT },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+               USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
index 9bc91160819b6eaeac1b0368bdbdfe15a1328422..f3fcb836a1f9edd0af5b1f31362987d22eff11b2 100644 (file)
@@ -1662,7 +1662,7 @@ static struct bin_attribute dev_bin_attr_report_desc = {
        .size = HID_MAX_DESCRIPTOR_SIZE,
 };
 
-static struct device_attribute dev_attr_country = {
+static const struct device_attribute dev_attr_country = {
        .attr = { .name = "country", .mode = 0444 },
        .show = show_country,
 };
@@ -1889,6 +1889,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
 #endif
 #if IS_ENABLED(CONFIG_HID_ALPS)
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
 #endif
 #if IS_ENABLED(CONFIG_HID_APPLE)
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
@@ -1979,6 +1982,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
@@ -2032,6 +2036,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ELO)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
@@ -2327,6 +2333,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
@@ -3119,4 +3126,3 @@ MODULE_AUTHOR("Andreas Gal");
 MODULE_AUTHOR("Vojtech Pavlik");
 MODULE_AUTHOR("Jiri Kosina");
 MODULE_LICENSE("GPL");
-
index 078026f63b6f40999a2aa1feb8023d3905a6044c..68cdc962265b1051d7e399e0c6a181a4426673ed 100644 (file)
@@ -21,7 +21,7 @@
  * Data Sheet:
  *   http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf
  * Programming Interface Specification:
- *   http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
+ *   https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf
  */
 
 #include <linux/gpio.h>
@@ -196,6 +196,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
                                 HID_REQ_GET_REPORT);
        if (ret != CP2112_GPIO_CONFIG_LENGTH) {
                hid_err(hdev, "error requesting GPIO config: %d\n", ret);
+               if (ret >= 0)
+                       ret = -EIO;
                goto exit;
        }
 
@@ -205,8 +207,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
                                 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
                                 HID_REQ_SET_REPORT);
-       if (ret < 0) {
+       if (ret != CP2112_GPIO_CONFIG_LENGTH) {
                hid_err(hdev, "error setting GPIO config: %d\n", ret);
+               if (ret >= 0)
+                       ret = -EIO;
                goto exit;
        }
 
@@ -214,7 +218,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 
 exit:
        mutex_unlock(&dev->lock);
-       return ret < 0 ? ret : -EIO;
+       return ret;
 }
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
index e2c7465df69f3ae74c2cb1979c531b02e2934089..54aeea57d2099bd8c0a1acf41b9d2f0c21c58ee6 100644 (file)
@@ -3,6 +3,7 @@
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
  *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
  *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ *  Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
  */
 
 /*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                break;
        case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
        case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
-               /* The DEFT trackball has eight buttons, but its descriptor only
-                * reports five, disabling the three Fn buttons on the top of
-                * the mouse.
+       case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
+       case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
+               /* The DEFT/HUGE trackball has eight buttons, but its descriptor
+                * only reports five, disabling the three Fn buttons on the top
+                * of the mouse.
                 *
                 * Apply the following diff to the descriptor:
                 *
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                 * End Collection,                     End Collection,
                 */
                if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
-                       hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+                       hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
                        rdesc[13] = 8; /* Button/Variable Report Count */
                        rdesc[21] = 8; /* Button/Variable Usage Maximum */
                        rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index 6039f071fab1370da4a684e801883ef23e9706d4..3aa2bb9f0f811dbe118de6f31ce25900c33f7fd1 100644 (file)
@@ -313,7 +313,7 @@ static void mousevsc_on_receive(struct hv_device *device,
 
                break;
        default:
-               pr_err("unsupported hid msg type - type %d len %d",
+               pr_err("unsupported hid msg type - type %d len %d\n",
                       hid_msg->header.type, hid_msg->header.size);
                break;
        }
index a98919199858717ecd173ba2ef04f014edcb109f..5da3d6256d25353054060dbc8dfbb91ec62ca563 100644 (file)
@@ -77,6 +77,9 @@
 #define HID_DEVICE_ID_ALPS_U1_DUAL     0x120B
 #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
 #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP    0x1220
+#define HID_DEVICE_ID_ALPS_U1          0x1215
+#define HID_DEVICE_ID_ALPS_T4_BTNLESS  0x120C
+
 
 #define USB_VENDOR_ID_AMI              0x046b
 #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE      0xff10
 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD     0x0101
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
+#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
 
 #define USB_VENDOR_ID_ATEN             0x0557
 #define USB_DEVICE_ID_ATEN_UC100KM     0x2004
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
 #define USB_DEVICE_ID_ELECOM_DEFT_WIRED        0x00fe
 #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS     0x00ff
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRED        0x010c
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS     0x010d
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
 #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
 #define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
 
+#define I2C_VENDOR_ID_HANTICK          0x0911
+#define I2C_PRODUCT_ID_HANTICK_5288    0x5288
+
 #define USB_VENDOR_ID_HANWANG          0x0b57
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST     0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST      0x8fff
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
 
+#define USB_VENDOR_ID_MCS              0x16d0
+#define USB_DEVICE_ID_MCS_GAMEPADBLOCK 0x0bcc
+
 #define USB_VENDOR_ID_MGE              0x0463
 #define USB_DEVICE_ID_MGE_UPS          0xffff
 #define USB_DEVICE_ID_MGE_UPS1         0x0001
index 199f6a01fc629ead34108da7ddc895269ed0820a..04d01b57d94c8280d2d6a63ef863136165555c11 100644 (file)
@@ -797,6 +797,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        map_key_clear(BTN_STYLUS);
                        break;
 
+               case 0x45: /* ERASER */
+                       /*
+                        * This event is reported when eraser tip touches the surface.
+                        * Actual eraser (BTN_TOOL_RUBBER) is set by Invert usage when
+                        * tool gets in proximity.
+                        */
+                       map_key_clear(BTN_TOUCH);
+                       break;
+
                case 0x46: /* TabletPick */
                case 0x5a: /* SecondaryBarrelSwitch */
                        map_key_clear(BTN_STYLUS2);
index 52026dc94d5c4b0306ce585be293cbe2cb1910d9..596227ddb6e078af028b2d83102d40e5006b147a 100644 (file)
@@ -756,7 +756,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        /* Setup wireless link with Logitech Wii wheel */
        if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
-               const unsigned char cbuf[] = { 0x00, 0xAF,  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+               static const unsigned char cbuf[] = {
+                       0x00, 0xAF,  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+               };
                u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
 
                if (!buf) {
index 1fc12e3570359248f2cd2847aad219ad4f134a81..512d67e1aae386e37b90288f800734b5a826aebe 100644 (file)
@@ -474,9 +474,7 @@ static int lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effec
 static void lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
 {
        struct hid_device *hid = input_get_drvdata(dev);
-       struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
-       s32 *value = report->field[0]->value;
+       s32 *value;
        u32 expand_a, expand_b;
        struct lg4ff_device_entry *entry;
        struct lg_drv_data *drv_data;
index 8ba95c3af0567a75c18fbc43f09d87ee338ba56c..65ea23be9677b6dbd0b539ee2cc7ca1abd58958a 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/input/mt.h>
+#include <linux/jiffies.h>
 #include <linux/string.h>
 #include <linux/timer.h>
 
@@ -137,6 +138,9 @@ struct mt_device {
        bool serial_maybe;      /* need to check for serial protocol */
        bool curvalid;          /* is the current contact valid? */
        unsigned mt_flags;      /* flags to pass to input-mt */
+       __s32 dev_time;         /* the scan time provided by the device */
+       unsigned long jiffies;  /* the frame's jiffies */
+       int timestamp;          /* the timestamp to be sent */
 };
 
 static void mt_post_parse_default_settings(struct mt_device *td);
@@ -178,6 +182,12 @@ static void mt_post_parse(struct mt_device *td);
 #define MT_DEFAULT_MAXCONTACT  10
 #define MT_MAX_MAXCONTACT      250
 
+/*
+ * Resync device and local timestamps after that many microseconds without
+ * receiving data.
+ */
+#define MAX_TIMESTAMP_INTERVAL 1000000
+
 #define MT_USB_DEVICE(v, p)    HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p)
 #define MT_BT_DEVICE(v, p)     HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p)
 
@@ -584,6 +594,12 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_pressure);
                        mt_store_field(usage, td, hi);
                        return 1;
+               case HID_DG_SCANTIME:
+                       hid_map_usage(hi, usage, bit, max,
+                               EV_MSC, MSC_TIMESTAMP);
+                       input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
+                       mt_store_field(usage, td, hi);
+                       return 1;
                case HID_DG_CONTACTCOUNT:
                        /* Ignore if indexes are out of bounds. */
                        if (field->index >= field->report->maxfield ||
@@ -719,6 +735,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
 static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
 {
        input_mt_sync_frame(input);
+       input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
        input_sync(input);
        td->num_received = 0;
        if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
@@ -728,6 +745,28 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
        clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
 }
 
+static int mt_compute_timestamp(struct mt_device *td, struct hid_field *field,
+               __s32 value)
+{
+       long delta = value - td->dev_time;
+       unsigned long jdelta = jiffies_to_usecs(jiffies - td->jiffies);
+
+       td->jiffies = jiffies;
+       td->dev_time = value;
+
+       if (delta < 0)
+               delta += field->logical_maximum;
+
+       /* HID_DG_SCANTIME is expressed in 100us, we want it in us. */
+       delta *= 100;
+
+       if (jdelta > MAX_TIMESTAMP_INTERVAL)
+               /* No data received for a while, resync the timestamp. */
+               return 0;
+       else
+               return td->timestamp + delta;
+}
+
 static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
                                struct hid_usage *usage, __s32 value)
 {
@@ -788,6 +827,9 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
                case HID_DG_HEIGHT:
                        td->curdata.h = value;
                        break;
+               case HID_DG_SCANTIME:
+                       td->timestamp = mt_compute_timestamp(td, field, value);
+                       break;
                case HID_DG_CONTACTCOUNT:
                        break;
                case HID_DG_TOUCH:
index ef241d66562e00950e8979ef4fd0633fd4efdca3..0f43c4292685fcd4bcde4eeeb6268d17a31fe89e 100644 (file)
@@ -368,6 +368,11 @@ static int rmi_check_sanity(struct hid_device *hdev, u8 *data, int size)
 static int rmi_raw_event(struct hid_device *hdev,
                struct hid_report *report, u8 *data, int size)
 {
+       struct rmi_data *hdata = hid_get_drvdata(hdev);
+
+       if (!(hdata->device_flags & RMI_DEVICE))
+               return 0;
+
        size = rmi_check_sanity(hdev, data, size);
        if (size < 2)
                return 0;
@@ -713,9 +718,11 @@ static void rmi_remove(struct hid_device *hdev)
 {
        struct rmi_data *hdata = hid_get_drvdata(hdev);
 
-       clear_bit(RMI_STARTED, &hdata->flags);
-       cancel_work_sync(&hdata->reset_work);
-       rmi_unregister_transport_device(&hdata->xport);
+       if (hdata->device_flags & RMI_DEVICE) {
+               clear_bit(RMI_STARTED, &hdata->flags);
+               cancel_work_sync(&hdata->reset_work);
+               rmi_unregister_transport_device(&hdata->xport);
+       }
 
        hid_hw_stop(hdev);
 }
index d03203a82e8f78962697791590ef45b35c003833..b9dc3ac4d4aad8301bf55c2ef5ba803b0b742261 100644 (file)
@@ -1439,10 +1439,16 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
                goto out;
        }
 
-       ret = hid_hw_output_report(hdev, buf, 1);
-       if (ret < 0) {
-               hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
-               ret = 0;
+       /*
+        * But the USB interrupt would cause SHANWAN controllers to
+        * start rumbling non-stop.
+        */
+       if (strcmp(hdev->name, "SHANWAN PS3 GamePad")) {
+               ret = hid_hw_output_report(hdev, buf, 1);
+               if (ret < 0) {
+                       hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+                       ret = 0;
+               }
        }
 
 out:
index b83376077d72282ac3373fe700e8d273d303fc1f..bea8def64f437ed13ed5576a479634cee0a27ca4 100644 (file)
@@ -242,6 +242,8 @@ static const struct hid_device_id tm_devices[] = {
                .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324),   /* Dual Trigger 3-in-1 (PS3 Mode) */
                .driver_data = (unsigned long)ff_rumble },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605),   /* NASCAR PRO FF2 Wheel */
+               .driver_data = (unsigned long)ff_joystick },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651),   /* FGT Rumble Force Wheel */
                .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653),   /* RGT Force Feedback CLUTCH Raging Wheel */
index 9145c2129a967464e1e7ed8978dbe53c6db9e86e..e054ee43c1e275da0adff1d6ee5e8ec49bb1ecbc 100644 (file)
@@ -46,6 +46,7 @@
 
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
+#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -168,6 +169,8 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+       { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
        { 0, 0 }
 };
 
@@ -252,7 +255,9 @@ static int __i2c_hid_command(struct i2c_client *client,
 
        ret = 0;
 
-       if (wait) {
+       if (wait && (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET)) {
+               msleep(100);
+       } else if (wait) {
                i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
                if (!wait_event_timeout(ihid->wait,
                                !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
index 9f9fe0e58f5b678270a41ad845d48e0d8d1bc28f..640dfb937c6910f9ff9091a5a455b18c640f9edd 100644 (file)
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
        unsigned int rsize = 0;
        char *rdesc;
        int ret, n;
+       int num_descriptors;
+       size_t offset = offsetof(struct hid_descriptor, desc);
 
        quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
                        le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
                return -ENODEV;
        }
 
+       if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+               dbg_hid("hid descriptor is too short\n");
+               return -EINVAL;
+       }
+
        hid->version = le16_to_cpu(hdesc->bcdHID);
        hid->country = hdesc->bCountryCode;
 
-       for (n = 0; n < hdesc->bNumDescriptors; n++)
+       num_descriptors = min_t(int, hdesc->bNumDescriptors,
+              (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+       for (n = 0; n < num_descriptors; n++)
                if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
                        rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
 
index f489a5cfcb48cf5216a532091482ac669786e61d..331f7f34ec14e68c85f3cd4a0b051b4e4ffdb365 100644 (file)
@@ -170,6 +170,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK, HID_QUIRK_MULTI_INPUT },
 
        { 0, 0 }
 };
index 8a03654048bf611a008e61b4101f35262fb9a5d2..feb62fd4dfc3ecff1ba43ee46fc7f70ec9b3c6d2 100644 (file)
                                 ((f)->physical == HID_DG_PEN) || \
                                 ((f)->application == HID_DG_PEN) || \
                                 ((f)->application == HID_DG_DIGITIZER) || \
+                                ((f)->application == WACOM_HID_WD_PEN) || \
                                 ((f)->application == WACOM_HID_WD_DIGITIZER) || \
                                 ((f)->application == WACOM_HID_G9_PEN) || \
                                 ((f)->application == WACOM_HID_G11_PEN))
index 9c0dbb8191ad4da9e1c5111da2ab88790ae30966..e1be61095532f03dda79effe36fbde2147a31d66 100644 (file)
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                         sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
                         GFP_KERNEL);
        if (rc)
-               goto out_mbox_free;
+               return -ENOMEM;
 
        INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
 
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (IS_ERR(ctx->mbox_chan)) {
                        dev_err(&pdev->dev,
                                "SLIMpro mailbox channel request failed\n");
-                       return -ENODEV;
+                       rc = -ENODEV;
+                       goto out_mbox_free;
                }
        } else {
                struct acpi_pcct_hw_reduced *cppc_ss;
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (device_property_read_u32(&pdev->dev, "pcc-channel",
                                             &ctx->mbox_idx)) {
                        dev_err(&pdev->dev, "no pcc-channel property\n");
-                       return -ENODEV;
+                       rc = -ENODEV;
+                       goto out_mbox_free;
                }
 
                cl->rx_callback = xgene_hwmon_pcc_rx_cb;
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (IS_ERR(ctx->mbox_chan)) {
                        dev_err(&pdev->dev,
                                "PPC channel request failed\n");
-                       return -ENODEV;
+                       rc = -ENODEV;
+                       goto out_mbox_free;
                }
 
                /*
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                if (!cppc_ss) {
                        dev_err(&pdev->dev, "PPC subspace not found\n");
                        rc = -ENODEV;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                if (!ctx->mbox_chan->mbox->txdone_irq) {
                        dev_err(&pdev->dev, "PCC IRQ not supported\n");
                        rc = -ENODEV;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                /*
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                } else {
                        dev_err(&pdev->dev, "Failed to get PCC comm region\n");
                        rc = -ENODEV;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                if (!ctx->pcc_comm_addr) {
                        dev_err(&pdev->dev,
                                "Failed to ioremap PCC comm region\n");
                        rc = -ENOMEM;
-                       goto out_mbox_free;
+                       goto out;
                }
 
                /*
index c06dce2c1da745fab9727715f5ffaa01a40d4d04..45a3f3ca29b383ce7edfbb34e6d3fd506c5173b6 100644 (file)
@@ -131,6 +131,7 @@ config I2C_I801
            Gemini Lake (SOC)
            Cannon Lake-H (PCH)
            Cannon Lake-LP (PCH)
+           Cedar Fork (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index e114e4e00d2972e2bb55765bfbf7c431d38ccb44..9e12a53ef7b8cf2cdccf9de473af8e2cec9c5f36 100644 (file)
@@ -68,6 +68,7 @@
  * Gemini Lake (SOC)           0x31d4  32      hard    yes     yes     yes
  * Cannon Lake-H (PCH)         0xa323  32      hard    yes     yes     yes
  * Cannon Lake-LP (PCH)                0x9da3  32      hard    yes     yes     yes
+ * Cedar Fork (PCH)            0x18df  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 
 /* Older devices have their ID defined in <linux/pci_ids.h> */
 #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS             0x0f12
+#define PCI_DEVICE_ID_INTEL_CDF_SMBUS                  0x18df
 #define PCI_DEVICE_ID_INTEL_DNV_SMBUS                  0x19df
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS          0x1c22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS             0x1d22
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
        case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
        case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
+       case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
        case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
        case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
                priv->features |= FEATURE_I2C_BLOCK_READ;
index 22e08ae1704f4a6730928ee1e1392f9e08a08778..25fcc3c1e32bf3d9a41fa345982039fb234dbcbd 100644 (file)
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
 
 static const struct of_device_id sprd_i2c_of_match[] = {
        { .compatible = "sprd,sc9860-i2c", },
+       {},
 };
 
 static struct platform_driver sprd_i2c_driver = {
index 47c67b0ca8960a2436ef899f9e1cecd5d35ab19c..d4a6e9c2e9aaeaa679bb159ade420bcdb37e6988 100644 (file)
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
        unsigned int msg_num;
        unsigned int msg_id;
        struct stm32f7_i2c_msg f7_msg;
-       struct stm32f7_i2c_setup *setup;
+       struct stm32f7_i2c_setup setup;
        struct stm32f7_i2c_timings timing;
 };
 
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
        },
 };
 
-struct stm32f7_i2c_setup stm32f7_setup = {
+static const struct stm32f7_i2c_setup stm32f7_setup = {
        .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
        .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
        .dnf = STM32F7_I2C_DNF_DEFAULT,
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
        writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
 
        /* Enable I2C */
-       if (i2c_dev->setup->analog_filter)
+       if (i2c_dev->setup.analog_filter)
                stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
                                     STM32F7_I2C_CR1_ANFOFF);
        else
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
        }
 
        setup = of_device_get_match_data(&pdev->dev);
-       i2c_dev->setup->rise_time = setup->rise_time;
-       i2c_dev->setup->fall_time = setup->fall_time;
-       i2c_dev->setup->dnf = setup->dnf;
-       i2c_dev->setup->analog_filter = setup->analog_filter;
+       i2c_dev->setup = *setup;
 
        ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
                                       &rise_time);
        if (!ret)
-               i2c_dev->setup->rise_time = rise_time;
+               i2c_dev->setup.rise_time = rise_time;
 
        ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
                                       &fall_time);
        if (!ret)
-               i2c_dev->setup->fall_time = fall_time;
+               i2c_dev->setup.fall_time = fall_time;
 
-       ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup);
+       ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
        if (ret)
                goto clk_free;
 
index 01b2adfd8226bf1bcd048100f4e4bcb0f6afb180..eaf39e5db08ba98ad2bfcc32e9892e2f2c8be972 100644 (file)
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
                if (hwif_init(hwif) == 0) {
                        printk(KERN_INFO "%s: failed to initialize IDE "
                                         "interface\n", hwif->name);
+                       device_unregister(hwif->portdev);
                        device_unregister(&hwif->gendev);
                        ide_disable_port(hwif);
                        continue;
index 86aa88aeb3a6b529315277f636fc07b196030d0a..acf874800ca4086803d6740d3df32ca38b732017 100644 (file)
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
 {
        struct list_head *l;
        struct pci_driver *d;
+       int ret;
 
        list_for_each(l, &ide_pci_drivers) {
                d = list_entry(l, struct pci_driver, node);
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
                        const struct pci_device_id *id =
                                pci_match_id(d->id_table, dev);
 
-                       if (id != NULL && d->probe(dev, id) >= 0) {
-                               dev->driver = d;
-                               pci_dev_get(dev);
-                               return 1;
+                       if (id != NULL) {
+                               pci_assign_irq(dev);
+                               ret = d->probe(dev, id);
+                               if (ret >= 0) {
+                                       dev->driver = d;
+                                       pci_dev_get(dev);
+                                       return 1;
+                               }
                        }
                }
        }
index 112d2fe1bcdbc8b3acb45d613901afa06798d556..fdc8e813170c32b4df99417c37d4940e0abbbc78 100644 (file)
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
 /**
  *     ide_pci_enable  -       do PCI enables
  *     @dev: PCI device
+ *     @bars: PCI BARs mask
  *     @d: IDE port info
  *
  *     Enable the IDE PCI device. We attempt to enable the device in full
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
  *     Returns zero on success or an error code
  */
 
-static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
+static int ide_pci_enable(struct pci_dev *dev, int bars,
+                         const struct ide_port_info *d)
 {
-       int ret, bars;
+       int ret;
 
        if (pci_enable_device(dev)) {
                ret = pci_enable_device_io(dev);
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
                goto out;
        }
 
-       if (d->host_flags & IDE_HFLAG_SINGLE)
-               bars = (1 << 2) - 1;
-       else
-               bars = (1 << 4) - 1;
-
-       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
-               if (d->host_flags & IDE_HFLAG_CS5520)
-                       bars |= (1 << 2);
-               else
-                       bars |= (1 << 4);
-       }
-
        ret = pci_request_selected_regions(dev, bars, d->name);
        if (ret < 0)
                printk(KERN_ERR "%s %s: can't reserve resources\n",
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
 /**
  *     ide_setup_pci_controller        -       set up IDE PCI
  *     @dev: PCI device
+ *     @bars: PCI BARs mask
  *     @d: IDE port info
  *     @noisy: verbose flag
  *
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
  *     and enables it if need be
  */
 
-static int ide_setup_pci_controller(struct pci_dev *dev,
+static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
                                    const struct ide_port_info *d, int noisy)
 {
        int ret;
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
        if (noisy)
                ide_setup_pci_noise(dev, d);
 
-       ret = ide_pci_enable(dev, d);
+       ret = ide_pci_enable(dev, bars, d);
        if (ret < 0)
                goto out;
 
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
        if (ret < 0) {
                printk(KERN_ERR "%s %s: error accessing PCI regs\n",
                        d->name, pci_name(dev));
-               goto out;
+               goto out_free_bars;
        }
        if (!(pcicmd & PCI_COMMAND_IO)) {       /* is device disabled? */
                ret = ide_pci_configure(dev, d);
                if (ret < 0)
-                       goto out;
+                       goto out_free_bars;
                printk(KERN_INFO "%s %s: device enabled (Linux)\n",
                        d->name, pci_name(dev));
        }
 
+       goto out;
+
+out_free_bars:
+       pci_release_selected_regions(dev, bars);
 out:
        return ret;
 }
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
 {
        struct pci_dev *pdev[] = { dev1, dev2 };
        struct ide_host *host;
-       int ret, i, n_ports = dev2 ? 4 : 2;
+       int ret, i, n_ports = dev2 ? 4 : 2, bars;
        struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
 
+       if (d->host_flags & IDE_HFLAG_SINGLE)
+               bars = (1 << 2) - 1;
+       else
+               bars = (1 << 4) - 1;
+
+       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+               if (d->host_flags & IDE_HFLAG_CS5520)
+                       bars |= (1 << 2);
+               else
+                       bars |= (1 << 4);
+       }
+
        for (i = 0; i < n_ports / 2; i++) {
-               ret = ide_setup_pci_controller(pdev[i], d, !i);
-               if (ret < 0)
+               ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
+               if (ret < 0) {
+                       if (i == 1)
+                               pci_release_selected_regions(pdev[0], bars);
                        goto out;
+               }
 
                ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
        }
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
        host = ide_host_alloc(d, hws, n_ports);
        if (host == NULL) {
                ret = -ENOMEM;
-               goto out;
+               goto out_free_bars;
        }
 
        host->dev[0] = &dev1->dev;
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
                 * do_ide_setup_pci_device() on the first device!
                 */
                if (ret < 0)
-                       goto out;
+                       goto out_free_bars;
 
                /* fixup IRQ */
                if (ide_pci_is_in_compatibility_mode(pdev[i])) {
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
        ret = ide_host_register(host, d, hws);
        if (ret)
                ide_host_free(host);
+       else
+               goto out;
+
+out_free_bars:
+       i = n_ports / 2;
+       while (i--)
+               pci_release_selected_regions(pdev[i], bars);
 out:
        return ret;
 }
index 30825bb9b8e9247800b3aaa6084c603383d45bbf..8861c052155ab72e0062be5e1c03d1723705f0c2 100644 (file)
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
        if (ret)
                goto pid_query_error;
 
+       nlmsg_end(skb, nlh);
+
        pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
                __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
 
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                                &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
        if (ret)
                goto add_mapping_error;
+
+       nlmsg_end(skb, nlh);
        nlmsg_request->req_buffer = pm_msg;
 
        ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                                &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
        if (ret)
                goto query_mapping_error;
+
+       nlmsg_end(skb, nlh);
        nlmsg_request->req_buffer = pm_msg;
 
        ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
        if (ret)
                goto remove_mapping_error;
 
+       nlmsg_end(skb, nlh);
+
        ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
index c81c5594262621770b7f55f433cdd0340f153396..3c4faadb8cddd7fdf6611df3674811664b1eb890 100644 (file)
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
                                &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
        if (ret)
                goto mapinfo_num_error;
+
+       nlmsg_end(skb, nlh);
+
        ret = rdma_nl_unicast(skb, iwpm_pid);
        if (ret) {
                skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
                        if (ret)
                                goto send_mapping_info_unlock;
 
+                       nlmsg_end(skb, nlh);
+
                        iwpm_print_sockaddr(&map_info->local_sockaddr,
                                "send_mapping_info: Local sockaddr:");
                        iwpm_print_sockaddr(&map_info->mapped_sockaddr,
index d1f5345f04f08eb916539162b024f7b59e9b9a94..42ca5346777ddfc4a5d433daf61146ed428fc424 100644 (file)
@@ -48,7 +48,7 @@
  * @wqe: cqp wqe for header
  * @header: header for the cqp wqe
  */
-static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
 {
        wmb();            /* make sure WQE is populated before polarity is set */
        set_64bit_val(wqe, 24, header);
index e217a1259f5703ac1ef1ac4d3ed70acd986e2336..5498ad01c280fde7f476778afba3669c72fcff8c 100644 (file)
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
                                                 struct i40iw_fast_reg_stag_info *info,
                                                 bool post_sq);
 
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
+
 /* HMC/FPM functions */
 enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
                                            u8 hmc_fn_id);
index c2cab20c4bc5d24e258a95bcf7d4f94f8985ced3..59f70676f0e0305ad03567192a340ecb0ef0c7a5 100644 (file)
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
                get_64bit_val(wqe, 24, &offset24);
 
        offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
-       set_64bit_val(wqe, 24, offset24);
 
        set_64bit_val(wqe, 0, buf->mem.pa);
        set_64bit_val(wqe, 8,
                      LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
-       set_64bit_val(wqe, 24, offset24);
+       i40iw_insert_wqe_hdr(wqe, offset24);
 }
 
 /**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
        set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
        set_64bit_val(wqe, 16, header[0]);
 
-       /* Ensure all data is written before writing valid bit */
-       wmb();
-       set_64bit_val(wqe, 24, header[1]);
+       i40iw_insert_wqe_hdr(wqe, header[1]);
 
        i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
        i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
                 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 
-       set_64bit_val(wqe, 24, header);
+       i40iw_insert_wqe_hdr(wqe, header);
 
        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
        i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
            LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
            LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
            LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-       set_64bit_val(wqe, 24, header);
+       i40iw_insert_wqe_hdr(wqe, header);
 
        i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
                        wqe, I40IW_CQP_WQE_SIZE * 8);
index 28b3d02d511b35f60c44437c47ed88bb4a04d424..62be0a41ad0b2ffeccb0b6e73d9a29621e4944f1 100644 (file)
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
        attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
        attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
        attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+       attr->port_num = 1;
        init_attr->event_handler = iwqp->ibqp.event_handler;
        init_attr->qp_context = iwqp->ibqp.qp_context;
        init_attr->send_cq = iwqp->ibqp.send_cq;
        init_attr->recv_cq = iwqp->ibqp.recv_cq;
        init_attr->srq = iwqp->ibqp.srq;
        init_attr->cap = attr->cap;
+       init_attr->port_num = 1;
        return 0;
 }
 
index d6fbad8f34aa71848c7291122ad847b6944a27a6..552f7bd4ecc38bf17aff093fa037b423c4d0026b 100644 (file)
@@ -4174,9 +4174,9 @@ err_bfreg:
 err_uar_page:
        mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
 
-err_cnt:
-       mlx5_ib_cleanup_cong_debugfs(dev);
 err_cong:
+       mlx5_ib_cleanup_cong_debugfs(dev);
+err_cnt:
        if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
                mlx5_ib_dealloc_counters(dev);
 
index b2bb42e2805ddfb220b7033620d988e0877f3d6a..254083b524bd90e1557f69b91949b9dca6806223 100644 (file)
@@ -387,7 +387,7 @@ struct qedr_qp {
                u8 wqe_size;
 
                u8 smac[ETH_ALEN];
-               u16 vlan_id;
+               u16 vlan;
                int rc;
        } *rqe_wr_id;
 
index 4689e802b33264bb8b66bacf89cd77ac294d4372..ad8965397cf76a3ae8a1e5d1d36df2f2a33a6676 100644 (file)
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
 
        qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
                -EINVAL : 0;
-       qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+       qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
        /* note: length stands for data length i.e. GRH is excluded */
        qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
                data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        struct qedr_cq *cq = get_qedr_cq(ibcq);
        struct qedr_qp *qp = dev->gsi_qp;
        unsigned long flags;
+       u16 vlan_id;
        int i = 0;
 
        spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
                wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
                ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
                wc[i].wc_flags |= IB_WC_WITH_SMAC;
-               if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+
+               vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
+                         VLAN_VID_MASK;
+               if (vlan_id) {
                        wc[i].wc_flags |= IB_WC_WITH_VLAN;
-                       wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+                       wc[i].vlan_id = vlan_id;
+                       wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
+                                   VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
                }
 
                qedr_inc_sw_cons(&qp->rq);
index 7d5286b05036976059c3af6c7277a987b457c59d..1841d0359bace765cc52acd6340bb96efb5aa2ee 100644 (file)
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
 void __closure_wake_up(struct closure_waitlist *wait_list)
 {
        struct llist_node *list;
-       struct closure *cl;
+       struct closure *cl, *t;
        struct llist_node *reverse = NULL;
 
        list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
        reverse = llist_reverse_order(list);
 
        /* Then do the wakeups */
-       llist_for_each_entry(cl, reverse, list) {
+       llist_for_each_entry_safe(cl, t, reverse, list) {
                closure_set_waiting(cl, 0);
                closure_sub(cl, CLOSURE_WAITING + 1);
        }
index 24eddbdf2ab4cb9437674a3783ffbbfaca8a21fd..203144762f3688ab118ccd2e865df3c427b4dca5 100644 (file)
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
 
 extern atomic_t dm_global_event_nr;
 extern wait_queue_head_t dm_global_eventq;
+void dm_issue_global_event(void);
 
 #endif
index a55ffd4f5933fc1247b6729dcc0344964c9f01b8..96ab46512e1fdcf27e4669f22d97b553b491c196 100644 (file)
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
                kfree(cipher_api);
                return ret;
        }
+       kfree(cipher_api);
 
        return 0;
 bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
                                ti->error = "Invalid feature value for sector_size";
                                return -EINVAL;
                        }
+                       if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
+                               ti->error = "Device size is not multiple of sector_size feature";
+                               return -EINVAL;
+                       }
                        cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
                } else if (!strcasecmp(opt_string, "iv_large_sectors"))
                        set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
index 8756a6850431d007f756079a595f5ae30d51a8df..e52676fa9832c53dcd57dc37eb654e3e7eee0d88 100644 (file)
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
  * Round up the ptr to an 8-byte boundary.
  */
 #define ALIGN_MASK 7
+static inline size_t align_val(size_t val)
+{
+       return (val + ALIGN_MASK) & ~ALIGN_MASK;
+}
 static inline void *align_ptr(void *ptr)
 {
-       return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
+       return (void *)align_val((size_t)ptr);
 }
 
 /*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
        struct hash_cell *hc;
        size_t len, needed = 0;
        struct gendisk *disk;
-       struct dm_name_list *nl, *old_nl = NULL;
+       struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
        uint32_t *event_nr;
 
        down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
         */
        for (i = 0; i < NUM_BUCKETS; i++) {
                list_for_each_entry (hc, _name_buckets + i, name_list) {
-                       needed += sizeof(struct dm_name_list);
-                       needed += strlen(hc->name) + 1;
-                       needed += ALIGN_MASK;
-                       needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
+                       needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+                       needed += align_val(sizeof(uint32_t));
                }
        }
 
        /*
         * Grab our output buffer.
         */
-       nl = get_result_buffer(param, param_size, &len);
+       nl = orig_nl = get_result_buffer(param, param_size, &len);
        if (len < needed) {
                param->flags |= DM_BUFFER_FULL_FLAG;
                goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
                        strcpy(nl->name, hc->name);
 
                        old_nl = nl;
-                       event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
+                       event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
                        *event_nr = dm_get_event_nr(hc->md);
                        nl = align_ptr(event_nr + 1);
                }
        }
+       /*
+        * If mismatch happens, security may be compromised due to buffer
+        * overflow, so it's better to crash.
+        */
+       BUG_ON((char *)nl - (char *)orig_nl != needed);
 
  out:
        up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
  * which has a variable size, is not used by the function processing
  * the ioctl.
  */
-#define IOCTL_FLAGS_NO_PARAMS  1
+#define IOCTL_FLAGS_NO_PARAMS          1
+#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
 
 /*-----------------------------------------------------------------
  * Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
                ioctl_fn fn;
        } _ioctls[] = {
                {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
-               {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+               {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
                {DM_LIST_DEVICES_CMD, 0, list_devices},
 
-               {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
-               {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
-               {DM_DEV_RENAME_CMD, 0, dev_rename},
+               {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
+               {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
+               {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
                {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
                {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
                {DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
            unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
                DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
 
+       if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
+               dm_issue_global_event();
+
        /*
         * Copy the results back to userland.
         */
index 1ac58c5651b7f8086ccba297547ff13823804706..2245d06d204597b537c22caf48c4f88626051f57 100644 (file)
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
 static sector_t rs_get_progress(struct raid_set *rs,
                                sector_t resync_max_sectors, bool *array_in_sync)
 {
-       sector_t r, recovery_cp, curr_resync_completed;
+       sector_t r, curr_resync_completed;
        struct mddev *mddev = &rs->md;
 
        curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
-       recovery_cp = mddev->recovery_cp;
        *array_in_sync = false;
 
        if (rs_is_raid0(rs)) {
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
                } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                        r = curr_resync_completed;
                else
-                       r = recovery_cp;
+                       r = mddev->recovery_cp;
 
-               if (r == MaxSector) {
+               if ((r == MaxSector) ||
+                   (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
+                    (mddev->curr_resync_completed == resync_max_sectors))) {
                        /*
                         * Sync complete.
                         */
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 12, 1},
+       .version = {1, 13, 0},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index 6e54145969c5ce30184cf162283db0f01796f1ca..4be85324f44dc26177c17bf9ab6acf451dca3fcf 100644 (file)
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
 
+void dm_issue_global_event(void)
+{
+       atomic_inc(&dm_global_event_nr);
+       wake_up(&dm_global_eventq);
+}
+
 /*
  * One of these is allocated per bio.
  */
@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
        dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
 
        atomic_inc(&md->event_nr);
-       atomic_inc(&dm_global_event_nr);
        wake_up(&md->eventq);
-       wake_up(&dm_global_eventq);
+       dm_issue_global_event();
 }
 
 /*
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
        }
 
        map = __bind(md, table, &limits);
+       dm_issue_global_event();
 
 out:
        mutex_unlock(&md->suspend_lock);
index 5dba23ca2e5fe955cd8101909b2b2e20bbdf2e68..dc9bc1807fdfa52aede0748308d16f5c36c6c95f 100644 (file)
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
 
        down_read(&mm->mmap_sem);
 
-       for (dar = addr; dar < addr + size; dar += page_size) {
-               if (!vma || dar < vma->vm_start || dar > vma->vm_end) {
+       vma = find_vma(mm, addr);
+       if (!vma) {
+               pr_err("Can't find vma for addr %016llx\n", addr);
+               rc = -EFAULT;
+               goto out;
+       }
+       /* get the size of the pages allocated */
+       page_size = vma_kernel_pagesize(vma);
+
+       for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
+               if (dar < vma->vm_start || dar >= vma->vm_end) {
                        vma = find_vma(mm, addr);
                        if (!vma) {
                                pr_err("Can't find vma for addr %016llx\n", addr);
index 29fc1e662891d7bc2157ff55740bd2f110f4e14d..2ad7b5c691569e37cd366425c62c6ae0514f3857 100644 (file)
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
        }
 
        mqrq->areq.mrq = &brq->mrq;
-
-       mmc_queue_bounce_pre(mqrq);
 }
 
 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                brq = &mq_rq->brq;
                old_req = mmc_queue_req_to_req(mq_rq);
                type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-               mmc_queue_bounce_post(mq_rq);
 
                switch (status) {
                case MMC_BLK_SUCCESS:
index a7eb623f8daa3e610891ffb30157dd5e4dd4e103..36217ad5e9b1fbddb4c5dedf6dd711374fc27497 100644 (file)
@@ -1286,6 +1286,23 @@ out_err:
        return err;
 }
 
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+       int card_drv_type, drive_strength, drv_type;
+
+       card_drv_type = card->ext_csd.raw_driver_strength |
+                       mmc_driver_type_mask(0);
+
+       drive_strength = mmc_select_drive_strength(card,
+                                                  card->ext_csd.hs200_max_dtr,
+                                                  card_drv_type, &drv_type);
+
+       card->drive_strength = drive_strength;
+
+       if (drv_type)
+               mmc_set_driver_type(card->host, drv_type);
+}
+
 static int mmc_select_hs400es(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
                goto out_err;
        }
 
+       mmc_select_driver_type(card);
+
        /* Switch card to HS400 */
        val = EXT_CSD_TIMING_HS400 |
              card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1374,23 +1393,6 @@ out_err:
        return err;
 }
 
-static void mmc_select_driver_type(struct mmc_card *card)
-{
-       int card_drv_type, drive_strength, drv_type;
-
-       card_drv_type = card->ext_csd.raw_driver_strength |
-                       mmc_driver_type_mask(0);
-
-       drive_strength = mmc_select_drive_strength(card,
-                                                  card->ext_csd.hs200_max_dtr,
-                                                  card_drv_type, &drv_type);
-
-       card->drive_strength = drive_strength;
-
-       if (drv_type)
-               mmc_set_driver_type(card->host, drv_type);
-}
-
 /*
  * For device supporting HS200 mode, the following sequence
  * should be done before executing the tuning process.
index 74c663b1c0a7418c53ef0d7fa4ae27e2499d1ff7..0a4e77a5ba33fe7e0009ab1f7f97078a38e1559c 100644 (file)
@@ -23,8 +23,6 @@
 #include "core.h"
 #include "card.h"
 
-#define MMC_QUEUE_BOUNCESZ     65536
-
 /*
  * Prepare a MMC request. This just filters out odd stuff.
  */
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
                queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
 }
 
-static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
-{
-       unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
-
-       if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
-               return 0;
-
-       if (bouncesz > host->max_req_size)
-               bouncesz = host->max_req_size;
-       if (bouncesz > host->max_seg_size)
-               bouncesz = host->max_seg_size;
-       if (bouncesz > host->max_blk_count * 512)
-               bouncesz = host->max_blk_count * 512;
-
-       if (bouncesz <= 512)
-               return 0;
-
-       return bouncesz;
-}
-
 /**
  * mmc_init_request() - initialize the MMC-specific per-request data
  * @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
        struct mmc_card *card = mq->card;
        struct mmc_host *host = card->host;
 
-       if (card->bouncesz) {
-               mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
-               if (!mq_rq->bounce_buf)
-                       return -ENOMEM;
-               if (card->bouncesz > 512) {
-                       mq_rq->sg = mmc_alloc_sg(1, gfp);
-                       if (!mq_rq->sg)
-                               return -ENOMEM;
-                       mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
-                                                       gfp);
-                       if (!mq_rq->bounce_sg)
-                               return -ENOMEM;
-               }
-       } else {
-               mq_rq->bounce_buf = NULL;
-               mq_rq->bounce_sg = NULL;
-               mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
-               if (!mq_rq->sg)
-                       return -ENOMEM;
-       }
+       mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+       if (!mq_rq->sg)
+               return -ENOMEM;
 
        return 0;
 }
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
 {
        struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
 
-       /* It is OK to kfree(NULL) so this will be smooth */
-       kfree(mq_rq->bounce_sg);
-       mq_rq->bounce_sg = NULL;
-
-       kfree(mq_rq->bounce_buf);
-       mq_rq->bounce_buf = NULL;
-
        kfree(mq_rq->sg);
        mq_rq->sg = NULL;
 }
@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
                limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
-       /*
-        * mmc_init_request() depends on card->bouncesz so it must be calculated
-        * before blk_init_allocated_queue() starts allocating requests.
-        */
-       card->bouncesz = mmc_queue_calc_bouncesz(host);
-
        mq->card = card;
        mq->queue = blk_alloc_queue(GFP_KERNEL);
        if (!mq->queue)
@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
        if (mmc_can_erase(card))
                mmc_queue_setup_discard(mq->queue, card);
 
-       if (card->bouncesz) {
-               blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
-               blk_queue_max_segments(mq->queue, card->bouncesz / 512);
-               blk_queue_max_segment_size(mq->queue, card->bouncesz);
-       } else {
-               blk_queue_bounce_limit(mq->queue, limit);
-               blk_queue_max_hw_sectors(mq->queue,
-                       min(host->max_blk_count, host->max_req_size / 512));
-               blk_queue_max_segments(mq->queue, host->max_segs);
-               blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-       }
+       blk_queue_bounce_limit(mq->queue, limit);
+       blk_queue_max_hw_sectors(mq->queue,
+               min(host->max_blk_count, host->max_req_size / 512));
+       blk_queue_max_segments(mq->queue, host->max_segs);
+       blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 
        sema_init(&mq->thread_sem, 1);
 
@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
  */
 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
 {
-       unsigned int sg_len;
-       size_t buflen;
-       struct scatterlist *sg;
        struct request *req = mmc_queue_req_to_req(mqrq);
-       int i;
-
-       if (!mqrq->bounce_buf)
-               return blk_rq_map_sg(mq->queue, req, mqrq->sg);
-
-       sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
-
-       mqrq->bounce_sg_len = sg_len;
-
-       buflen = 0;
-       for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
-               buflen += sg->length;
-
-       sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
-
-       return 1;
-}
-
-/*
- * If writing, bounce the data to the buffer before the request
- * is sent to the host driver
- */
-void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
-{
-       if (!mqrq->bounce_buf)
-               return;
-
-       if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
-               return;
-
-       sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
-               mqrq->bounce_buf, mqrq->sg[0].length);
-}
-
-/*
- * If reading, bounce the data from the buffer after the request
- * has been handled by the host driver
- */
-void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
-{
-       if (!mqrq->bounce_buf)
-               return;
-
-       if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
-               return;
 
-       sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
-               mqrq->bounce_buf, mqrq->sg[0].length);
+       return blk_rq_map_sg(mq->queue, req, mqrq->sg);
 }
index 04fc89360a7a1a5669d7d3a027edc2fd6fdbc1f6..f18d3f656baa510b6022e9f51efd3990ec5f194d 100644 (file)
@@ -49,9 +49,6 @@ enum mmc_drv_op {
 struct mmc_queue_req {
        struct mmc_blk_request  brq;
        struct scatterlist      *sg;
-       char                    *bounce_buf;
-       struct scatterlist      *bounce_sg;
-       unsigned int            bounce_sg_len;
        struct mmc_async_req    areq;
        enum mmc_drv_op         drv_op;
        int                     drv_op_result;
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
 extern void mmc_cleanup_queue(struct mmc_queue *);
 extern void mmc_queue_suspend(struct mmc_queue *);
 extern void mmc_queue_resume(struct mmc_queue *);
-
 extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
                                     struct mmc_queue_req *);
-extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
-extern void mmc_queue_bounce_post(struct mmc_queue_req *);
 
 extern int mmc_access_rpmb(struct mmc_queue *);
 
index 27fb625cbcf3ff459a62668a93c80a30883fbe21..fbd29f00fca05e5dc46e09d1aa10c9f619283097 100644 (file)
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
         */
        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
                     MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
-                    MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
+                    MMC_CAP_3_3V_DDR;
 
        if (host->use_sg)
                mmc->max_segs = 16;
index c885c2d4b90418a93519ab05a989a7d5c77efd76..85745ef179e22b2e49e4b23e15df311891f192dc 100644 (file)
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
        div->shift = __ffs(CLK_DIV_MASK);
        div->width = __builtin_popcountl(CLK_DIV_MASK);
        div->hw.init = &init;
-       div->flags = (CLK_DIVIDER_ONE_BASED |
-                     CLK_DIVIDER_ROUND_CLOSEST);
+       div->flags = CLK_DIVIDER_ONE_BASED;
 
        clk = devm_clk_register(host->dev, &div->hw);
        if (WARN_ON(IS_ERR(clk)))
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct meson_host *host = mmc_priv(mmc);
+       int ret;
+
+       /*
+        * If this is the initial tuning, try to get a sane Rx starting
+        * phase before doing the actual tuning.
+        */
+       if (!mmc->doing_retune) {
+               ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
+
+               if (ret)
+                       return ret;
+       }
+
+       ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
+       if (ret)
+               return ret;
 
        return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
 }
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        case MMC_POWER_UP:
                if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+               /* Reset phases */
+               clk_set_phase(host->rx_clk, 0);
+               clk_set_phase(host->tx_clk, 270);
+
                break;
 
        case MMC_POWER_ON:
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                                host->vqmmc_enabled = true;
                }
 
-               /* Reset rx phase */
-               clk_set_phase(host->rx_clk, 0);
                break;
        }
 
index 59ab194cb0099b19dcaa247977bc9ccc7c8175e1..c763b404510f3a29d3864290297b22f596701cc3 100644 (file)
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
 
        pxamci_init_ocr(host);
 
-       /*
-        * This architecture used to disable bounce buffers through its
-        * defconfig, now it is done at runtime as a host property.
-        */
-       mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
+       mmc->caps = 0;
        host->cmdat = 0;
        if (!cpu_is_pxa25x()) {
                mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
index 2eec2e652c53da9c1f0b9c6156dcdc9778d82d68..0842bbc2d7ad340011a70f43409d2503cf662ff7 100644 (file)
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
 {
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_host *host;
+       struct xenon_priv *priv;
        int err;
 
        host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
                return PTR_ERR(host);
 
        pltfm_host = sdhci_priv(host);
+       priv = sdhci_pltfm_priv(pltfm_host);
 
        /*
         * Link Xenon specific mmc_host_ops function,
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
        if (err)
                goto free_pltfm;
 
+       priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
+       if (IS_ERR(priv->axi_clk)) {
+               err = PTR_ERR(priv->axi_clk);
+               if (err == -EPROBE_DEFER)
+                       goto err_clk;
+       } else {
+               err = clk_prepare_enable(priv->axi_clk);
+               if (err)
+                       goto err_clk;
+       }
+
        err = mmc_of_parse(host->mmc);
        if (err)
-               goto err_clk;
+               goto err_clk_axi;
 
        sdhci_get_of_property(pdev);
 
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
        /* Xenon specific dt parse */
        err = xenon_probe_dt(pdev);
        if (err)
-               goto err_clk;
+               goto err_clk_axi;
 
        err = xenon_sdhc_prepare(host);
        if (err)
-               goto err_clk;
+               goto err_clk_axi;
 
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
@@ -527,6 +540,8 @@ remove_sdhc:
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        xenon_sdhc_unprepare(host);
+err_clk_axi:
+       clk_disable_unprepare(priv->axi_clk);
 err_clk:
        clk_disable_unprepare(pltfm_host->clk);
 free_pltfm:
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
 {
        struct sdhci_host *host = platform_get_drvdata(pdev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
 
        pm_runtime_get_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
        sdhci_remove_host(host, 0);
 
        xenon_sdhc_unprepare(host);
-
+       clk_disable_unprepare(priv->axi_clk);
        clk_disable_unprepare(pltfm_host->clk);
 
        sdhci_pltfm_free(pdev);
index 2bc0510c07696a11015c8e3bcee8c3497bd67384..9994995c7c56867bd24835d86bd1343e8fa3b770 100644 (file)
@@ -83,6 +83,7 @@ struct xenon_priv {
        unsigned char   bus_width;
        unsigned char   timing;
        unsigned int    clock;
+       struct clk      *axi_clk;
 
        int             phy_type;
        /*
index 49b80da51ba7307eb0d7fff8116203723b30cc9b..805ab45e9b5ac85b9b09037d89364bb9ac789809 100644 (file)
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
                return true;
        default:
                bpf_warn_invalid_xdp_action(action);
+               /* fall through */
        case XDP_ABORTED:
                trace_xdp_exception(nic->netdev, prog, action);
+               /* fall through */
        case XDP_DROP:
                /* Check if it's a recycled page, if not
                 * unmap the DMA mapping.
index 523f9d05a810f175582e5b1474f4d398bfa0aea2..8a32eb7d47b9ba88f97ad25be9374e68020bfb29 100644 (file)
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
 {
-#ifndef CONFIG_SPARC
-       u32 regval;
-       u32 i;
-#endif
        s32 ret_val;
 
        ret_val = ixgbe_start_hw_generic(hw);
-
-#ifndef CONFIG_SPARC
-       /* Disable relaxed ordering */
-       for (i = 0; ((i < hw->mac.max_tx_queues) &&
-            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
-               regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
-       }
-
-       for (i = 0; ((i < hw->mac.max_rx_queues) &&
-            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-               regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-                           IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-       }
-#endif
        if (ret_val)
                return ret_val;
 
index 2c19070d2a0b08dee8b47a3d98bf54971741cb78..6e6ab6f6875ebc188fc11a6d9f8b1b21dea3fb59 100644 (file)
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
        }
        IXGBE_WRITE_FLUSH(hw);
 
-#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
-       /* Disable relaxed ordering */
-       for (i = 0; i < hw->mac.max_tx_queues; i++) {
-               u32 regval;
-
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
-               regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
-       }
-
-       for (i = 0; i < hw->mac.max_rx_queues; i++) {
-               u32 regval;
-
-               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-               regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-                           IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-       }
-#endif
        return 0;
 }
 
index 72c565712a5f83106fe92ee7aa6e2f674e89efa2..c3e7a8191128dea542db9208a8076b4cf190e60e 100644 (file)
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *temp_ring;
-       int i, err = 0;
+       int i, j, err = 0;
        u32 new_rx_count, new_tx_count;
 
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
        }
 
        /* allocate temporary buffer to store rings in */
-       i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
-       i = max_t(int, i, adapter->num_xdp_queues);
+       i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
+                 adapter->num_rx_queues);
        temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
 
        if (!temp_ring) {
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        }
                }
 
-               for (i = 0; i < adapter->num_xdp_queues; i++) {
-                       memcpy(&temp_ring[i], adapter->xdp_ring[i],
+               for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+                       memcpy(&temp_ring[i], adapter->xdp_ring[j],
                               sizeof(struct ixgbe_ring));
 
                        temp_ring[i].count = new_tx_count;
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        memcpy(adapter->tx_ring[i], &temp_ring[i],
                               sizeof(struct ixgbe_ring));
                }
-               for (i = 0; i < adapter->num_xdp_queues; i++) {
-                       ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+               for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+                       ixgbe_free_tx_resources(adapter->xdp_ring[j]);
 
-                       memcpy(adapter->xdp_ring[i], &temp_ring[i],
+                       memcpy(adapter->xdp_ring[j], &temp_ring[i],
                               sizeof(struct ixgbe_ring));
                }
 
index d962368d08d0ff4c25032e3d0b3b2f51963a39d7..4d76afd13868574854759d844af34b2b3879a780 100644 (file)
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
                                IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
                return;
 
-       vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+       vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
        IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
 
        if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
                return ixgbe_ptp_set_ts_config(adapter, req);
        case SIOCGHWTSTAMP:
                return ixgbe_ptp_get_ts_config(adapter, req);
+       case SIOCGMIIPHY:
+               if (!adapter->hw.phy.ops.read_reg)
+                       return -EOPNOTSUPP;
+               /* fall through */
        default:
                return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
        }
index 032089efc1a0fae72859a45284d99cc7f9f49f49..c16718d296d389b0330b8aa7a75f130a5eb4bdbf 100644 (file)
@@ -3505,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
                                         struct mlxsw_sp_fib *fib)
 {
-       struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
-       struct mlxsw_sp_lpm_tree *lpm_tree;
-
-       /* Aggregate prefix lengths across all virtual routers to make
-        * sure we only have used prefix lengths in the LPM tree.
-        */
-       mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
-       lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
-                                        fib->proto);
-       if (IS_ERR(lpm_tree))
-               goto err_tree_get;
-       mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
-
-err_tree_get:
        if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
                return;
        mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
index c3f77e3b7819ccd590cff067b3c7260382f91ce9..e365866600ba048f31664dddafccf314d640385f 100644 (file)
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
 
 static int ppp_dev_init(struct net_device *dev)
 {
+       struct ppp *ppp;
+
        netdev_lockdep_set_classes(dev);
+
+       ppp = netdev_priv(dev);
+       /* Let the netdevice take a reference on the ppp file. This ensures
+        * that ppp_destroy_interface() won't run before the device gets
+        * unregistered.
+        */
+       atomic_inc(&ppp->file.refcnt);
+
        return 0;
 }
 
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
        wake_up_interruptible(&ppp->file.rwait);
 }
 
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+       struct ppp *ppp;
+
+       ppp = netdev_priv(dev);
+       if (atomic_dec_and_test(&ppp->file.refcnt))
+               ppp_destroy_interface(ppp);
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
        .ndo_init        = ppp_dev_init,
        .ndo_uninit      = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
        dev->tx_queue_len = 3;
        dev->type = ARPHRD_PPP;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+       dev->priv_destructor = ppp_dev_priv_destructor;
        netif_keep_dst(dev);
 }
 
index 29c7e2ec0dcbe0a8ef158d80f0e3c6b21be8abf7..52ea80bcd6392eaeb5917f6f9600196ae08af9fe 100644 (file)
@@ -560,6 +560,7 @@ static const struct driver_info wwan_info = {
 #define NVIDIA_VENDOR_ID       0x0955
 #define HP_VENDOR_ID           0x03f0
 #define MICROSOFT_VENDOR_ID    0x045e
+#define UBLOX_VENDOR_ID                0x1546
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -868,6 +869,18 @@ static const struct usb_device_id  products[] = {
                                      USB_CDC_SUBCLASS_ETHERNET,
                                      USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&zte_cdc_info,
+}, {
+       /* U-blox TOBY-L2 */
+       USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
+}, {
+       /* U-blox SARA-U2 */
+       USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
 }, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
                        USB_CDC_PROTO_NONE),
index bb2aad07863736c44adbcad9f43f0efb14d17bb7..5a14cc7f28ee75e930f8aca11e35443812720536 100644 (file)
@@ -2136,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 
        if (a == &dev_attr_uuid.attr) {
-               if (uuid_is_null(&ns->uuid) ||
+               if (uuid_is_null(&ns->uuid) &&
                    !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
                        return 0;
        }
index cb73bc8cad3bd2c332bdf3c7c4903e1b0c0f035c..3f5a04c586cefdc8096469ba38d325004963b42d 100644 (file)
@@ -94,7 +94,7 @@ struct nvme_dev {
        struct mutex shutdown_lock;
        bool subsystem;
        void __iomem *cmb;
-       dma_addr_t cmb_dma_addr;
+       pci_bus_addr_t cmb_bus_addr;
        u64 cmb_size;
        u32 cmbsz;
        u32 cmbloc;
@@ -1226,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
        if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
                unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
                                                      dev->ctrl.page_size);
-               nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+               nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
                nvmeq->sq_cmds_io = dev->cmb + offset;
        } else {
                nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1527,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        resource_size_t bar_size;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        void __iomem *cmb;
-       dma_addr_t dma_addr;
+       int bar;
 
        dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1540,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
        size = szu * NVME_CMB_SZ(dev->cmbsz);
        offset = szu * NVME_CMB_OFST(dev->cmbloc);
-       bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
+       bar = NVME_CMB_BIR(dev->cmbloc);
+       bar_size = pci_resource_len(pdev, bar);
 
        if (offset > bar_size)
                return NULL;
@@ -1553,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        if (size > bar_size - offset)
                size = bar_size - offset;
 
-       dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
-       cmb = ioremap_wc(dma_addr, size);
+       cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
        if (!cmb)
                return NULL;
 
-       dev->cmb_dma_addr = dma_addr;
+       dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
        dev->cmb_size = size;
        return cmb;
 }
index 1778cf4f81c7853638b46139204d3ee71e3b396d..82cd8b08d71f5247c0595d1ba2be199e4fe6ecec 100644 (file)
@@ -100,6 +100,7 @@ config PINCTRL_AMD
        tristate "AMD GPIO pin control"
        depends on GPIOLIB
        select GPIOLIB_IRQCHIP
+       select PINMUX
        select PINCONF
        select GENERIC_PINCONF
        help
index 0944310225db92a8ee2c5011c7fb394044ccfd22..ff782445dfb75d1a8af9eed775b8e8f7b275fee3 100644 (file)
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
        unsigned long events;
        unsigned offset;
        unsigned gpio;
-       unsigned int type;
 
        events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
        events &= mask;
        events &= pc->enabled_irq_map[bank];
        for_each_set_bit(offset, &events, 32) {
                gpio = (32 * bank) + offset;
-               /* FIXME: no clue why the code looks up the type here */
-               type = pc->irq_type[gpio];
-
                generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
                                                     gpio));
        }
index 04e929fd0ffee494cc744cf495e5acd9e437ea6b..fadbca907c7c7ea552ffb2b401a6fc154a4d1a2d 100644 (file)
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        struct gpio_chip *chip = &pctrl->chip;
        bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
        int ret, i, offset;
+       int irq_base;
 
        *chip = chv_gpio_chip;
 
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
-       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+       if (!need_valid_mask) {
+               irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+                                               chip->ngpio, NUMA_NO_NODE);
+               if (irq_base < 0) {
+                       dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+                       return irq_base;
+               }
+       } else {
+               irq_base = 0;
+       }
+
+       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
                                   handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add IRQ chip\n");
index df63e44526ac42a814ee85c3286c9d1a9b45be48..bf04479456a050abb56290a71729a76f49a638b6 100644 (file)
@@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL
        depends on OF && ARCH_QCOM
        depends on QCOM_SMEM
        depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+       depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
        select MFD_SYSCON
        select QCOM_RPROC_COMMON
        select QCOM_SCM
@@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL
        tristate "Qualcomm WCNSS Peripheral Image Loader"
        depends on OF && ARCH_QCOM
        depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+       depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
        depends on QCOM_SMEM
        select QCOM_MDT_LOADER
        select QCOM_RPROC_COMMON
index 612d914033414e3b17dea2b8bac97584cb280f1c..633268e9d550de7001999052f2692239b4754f6b 100644 (file)
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
                if (!(att->flags & ATT_OWN))
                        continue;
 
-               if (b > IMX7D_RPROC_MEM_MAX)
+               if (b >= IMX7D_RPROC_MEM_MAX)
                        break;
 
                priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
                                                     att->sa, att->size);
-               if (IS_ERR(priv->mem[b].cpu_addr)) {
+               if (!priv->mem[b].cpu_addr) {
                        dev_err(dev, "devm_ioremap_resource failed\n");
-                       err = PTR_ERR(priv->mem[b].cpu_addr);
-                       return err;
+                       return -ENOMEM;
                }
                priv->mem[b].sys_addr = att->sa;
                priv->mem[b].size = att->size;
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
                        return err;
                }
 
-               if (b > IMX7D_RPROC_MEM_MAX)
+               if (b >= IMX7D_RPROC_MEM_MAX)
                        break;
 
                priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
index 5a5e927ea50f45ac307dc6ed0abfa4f8d4b3a815..5dcc9bf1c5bc5de65af2bfc5d778c4d494b20cee 100644 (file)
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
        unsigned long flags;
 
        intent = kzalloc(sizeof(*intent), GFP_KERNEL);
-
        if (!intent)
                return NULL;
 
        intent->data = kzalloc(size, GFP_KERNEL);
        if (!intent->data)
-               return NULL;
+               goto free_intent;
 
        spin_lock_irqsave(&channel->intent_lock, flags);
        ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
        if (ret < 0) {
                spin_unlock_irqrestore(&channel->intent_lock, flags);
-               return NULL;
+               goto free_data;
        }
        spin_unlock_irqrestore(&channel->intent_lock, flags);
 
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
        intent->reuse = reuseable;
 
        return intent;
+
+free_data:
+       kfree(intent->data);
+free_intent:
+       kfree(intent);
+       return NULL;
 }
 
 static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
 
        ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
        if (ret)
-               return ret;
+               goto unlock;
 
        ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
        if (!ret) {
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
                ret = channel->intent_req_result ? 0 : -ECANCELED;
        }
 
+unlock:
        mutex_unlock(&channel->intent_req_lock);
        return ret;
 }
index 785fb42f66502a40e005c912d1cf9022a86ddb62..2799a6b08f736052a52ae7901a06fe64fc486211 100644 (file)
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
         */
        if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
                pr_err("write_pending failed since: %d\n", vscsi->flags);
-               return 0;
+               return -EIO;
        }
 
        rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
index bd4605a34f54d655a09471243c8d4386b9e78b3d..c62e8d111fd9721e72374d1ed17db283f92564b6 100644 (file)
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup);
 /**
  * iscsi_session_teardown - destroy session, host, and cls_session
  * @cls_session: iscsi session
- *
- * The driver must have called iscsi_remove_session before
- * calling this.
  */
 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 {
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 
        iscsi_pool_free(&session->cmdpool);
 
+       iscsi_remove_session(cls_session);
+
        kfree(session->password);
        kfree(session->password_in);
        kfree(session->username);
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
        kfree(session->portal_type);
        kfree(session->discovery_parent_type);
 
-       iscsi_destroy_session(cls_session);
+       iscsi_free_session(cls_session);
+
        iscsi_host_dec_session_cnt(shost);
        module_put(owner);
 }
index e7818afeda2bea560101fe023983050d0b3086b2..15590a063ad94d00fe55076a51b8e49eca54dc9c 100644 (file)
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        if (*bflags & BLIST_NO_DIF)
                sdev->no_dif = 1;
 
+       if (*bflags & BLIST_UNMAP_LIMIT_WS)
+               sdev->unmap_limit_for_ws = 1;
+
        sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
 
        if (*bflags & BLIST_TRY_VPD_PAGES)
index 0190aeff5f7fde124da9e5d621a8e20d1f0b3eae..7404d26895f5b7de916f65e86c549790cf444d96 100644 (file)
@@ -2210,22 +2210,6 @@ void iscsi_free_session(struct iscsi_cls_session *session)
 }
 EXPORT_SYMBOL_GPL(iscsi_free_session);
 
-/**
- * iscsi_destroy_session - destroy iscsi session
- * @session: iscsi_session
- *
- * Can be called by a LLD or iscsi_transport. There must not be
- * any running connections.
- */
-int iscsi_destroy_session(struct iscsi_cls_session *session)
-{
-       iscsi_remove_session(session);
-       ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
-       iscsi_free_session(session);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(iscsi_destroy_session);
-
 /**
  * iscsi_create_conn - create iscsi class connection
  * @session: iscsi cls session
index fb9f8b5f4673c0883eefbd91242441e37d3d7189..d175c5c5ccf87eba361aa1bad5d84b56ce64d5b0 100644 (file)
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
                break;
 
        case SD_LBP_WS16:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks,
-                                         (u32)SD_MAX_WS16_BLOCKS);
+               if (sdkp->device->unmap_limit_for_ws)
+                       max_blocks = sdkp->max_unmap_blocks;
+               else
+                       max_blocks = sdkp->max_ws_blocks;
+
+               max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
                break;
 
        case SD_LBP_WS10:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks,
-                                         (u32)SD_MAX_WS10_BLOCKS);
+               if (sdkp->device->unmap_limit_for_ws)
+                       max_blocks = sdkp->max_unmap_blocks;
+               else
+                       max_blocks = sdkp->max_ws_blocks;
+
+               max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
                break;
 
        case SD_LBP_ZERO:
@@ -3099,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
                sd_read_security(sdkp, buffer);
        }
 
-       sdkp->first_scan = 0;
-
        /*
         * We now have all cache related info, determine how we deal
         * with flush requests.
@@ -3115,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
        q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
 
        /*
-        * Use the device's preferred I/O size for reads and writes
+        * Determine the device's preferred I/O size for reads and writes
         * unless the reported value is unreasonably small, large, or
         * garbage.
         */
@@ -3129,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
                rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
                                      (sector_t)BLK_DEF_MAX_SECTORS);
 
-       /* Combine with controller limits */
-       q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+       /* Do not exceed controller limit */
+       rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+       /*
+        * Only update max_sectors if previously unset or if the current value
+        * exceeds the capabilities of the hardware.
+        */
+       if (sdkp->first_scan ||
+           q->limits.max_sectors > q->limits.max_dev_sectors ||
+           q->limits.max_sectors > q->limits.max_hw_sectors)
+               q->limits.max_sectors = rw_max;
+
+       sdkp->first_scan = 0;
 
        set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
        sd_config_write_same(sdkp);
index adaf6f6dd858cb9b0a6b0077782d068b3d024e0b..e1cbdfdb7c684fd24fdb6f25ee03f4e253e9ef58 100644 (file)
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
 
        p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
 
-       if (unlikely(copied < len && !PageUptodate(page))) {
-               copied = 0;
-               goto out;
+       if (!PageUptodate(page)) {
+               if (unlikely(copied < len)) {
+                       copied = 0;
+                       goto out;
+               } else if (len == PAGE_SIZE) {
+                       SetPageUptodate(page);
+               }
        }
        /*
         * No need to use i_size_read() here, the i_size
index 899ddaeeacec8dcf1bf280e6c3b30c30eaa8c8c2..8fc690384c585779a9a6838b08228b41468a49b6 100644 (file)
@@ -722,7 +722,7 @@ struct btrfs_delayed_root;
  * Indicate that a whole-filesystem exclusive operation is running
  * (device replace, resize, device add/delete, balance)
  */
-#define BTRFS_FS_EXCL_OP                       14
+#define BTRFS_FS_EXCL_OP                       16
 
 struct btrfs_fs_info {
        u8 fsid[BTRFS_FSID_SIZE];
index 12ab19a4b93e16167d3c58f3a4991ed979fe46e5..970190cd347edd5878479618222dc814b4736573 100644 (file)
@@ -2801,7 +2801,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
                }
        }
 
-       bio = btrfs_bio_alloc(bdev, sector << 9);
+       bio = btrfs_bio_alloc(bdev, (u64)sector << 9);
        bio_add_page(bio, page, page_size, offset);
        bio->bi_end_io = end_io_func;
        bio->bi_private = tree;
index 84edfc60d87a898c810205b912a5eeb8dae9f446..f23c820daaedc6e460982d80d55a653a2379debf 100644 (file)
@@ -734,12 +734,13 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                        inode = req->r_inode;
                        ihold(inode);
                } else {
-                       /* req->r_dentry is non-null for LSSNAP request.
-                        * fall-thru */
-                       WARN_ON_ONCE(!req->r_dentry);
+                       /* req->r_dentry is non-null for LSSNAP request */
+                       rcu_read_lock();
+                       inode = get_nonsnap_parent(req->r_dentry);
+                       rcu_read_unlock();
+                       dout("__choose_mds using snapdir's parent %p\n", inode);
                }
-       }
-       if (!inode && req->r_dentry) {
+       } else if (req->r_dentry) {
                /* ignore race with rename; old or new d_parent is okay */
                struct dentry *parent;
                struct inode *dir;
index 1ffc8b426c1c4c9f444c5eda57cc803ca5016783..7fc0b850c35279646a6eb48a595870a2ce9d6cfd 100644 (file)
@@ -374,12 +374,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
             realm->ino, realm, snapc, snapc->seq,
             (unsigned int) snapc->num_snaps);
 
-       if (realm->cached_context) {
-               ceph_put_snap_context(realm->cached_context);
-               /* queue realm for cap_snap creation */
-               list_add_tail(&realm->dirty_item, dirty_realms);
-       }
+       ceph_put_snap_context(realm->cached_context);
        realm->cached_context = snapc;
+       /* queue realm for cap_snap creation */
+       list_add_tail(&realm->dirty_item, dirty_realms);
        return 0;
 
 fail:
index 62cf812ed0e5803ac9148a9ea8a27f00efca7f2a..96415c65bbdc102f6a5dff696231e944d4fdccdb 100644 (file)
@@ -866,7 +866,8 @@ out:
         */
        if (sdio->boundary) {
                ret = dio_send_cur_page(dio, sdio, map_bh);
-               dio_bio_submit(dio, sdio);
+               if (sdio->bio)
+                       dio_bio_submit(dio, sdio);
                put_page(sdio->cur_page);
                sdio->cur_page = NULL;
        }
index 9a7c903869477835afe192e15b064c6832297520..4b4a72f392be4be76575d9d485cce0ced2adfdfe 100644 (file)
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
 void stop_discard_thread(struct f2fs_sb_info *sbi);
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
 void release_discard_addrs(struct f2fs_sb_info *sbi);
 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
index 621b9b3d320bba293f68b5eaf74b91aa364dd567..c695ff462ee6990f1abd4d27458b9ad4bf89c025 100644 (file)
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
 }
 
 /* This comes from f2fs_put_super and f2fs_trim_fs */
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
 {
        __issue_discard_cmd(sbi, false);
        __drop_discard_cmd(sbi);
-       __wait_discard_cmd(sbi, false);
+       __wait_discard_cmd(sbi, !umount);
 }
 
 static void mark_discard_range_all(struct f2fs_sb_info *sbi)
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
        }
        /* It's time to issue all the filed discards */
        mark_discard_range_all(sbi);
-       f2fs_wait_discard_bios(sbi);
+       f2fs_wait_discard_bios(sbi, false);
 out:
        range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
        return err;
index 89f61eb3d1671c3c5686a002adca8c0c427abd1c..933c3d529e6531c827c2237c97f816103c6d4ef0 100644 (file)
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb)
        }
 
        /* be sure to wait for any on-going discard commands */
-       f2fs_wait_discard_bios(sbi);
+       f2fs_wait_discard_bios(sbi, true);
 
        if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
                struct cp_control cpc = {
index 54059b142d6ba1153b8adab7114cd7587e0c3aad..3b601f115b6cb72a9f02a82f5e8a41c458729b4b 100644 (file)
@@ -468,7 +468,9 @@ static inline int may_write_real(struct file *file)
 
        /* File refers to upper, writable layer? */
        upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
-       if (upperdentry && file_inode(file) == d_inode(upperdentry))
+       if (upperdentry &&
+           (file_inode(file) == d_inode(upperdentry) ||
+            file_inode(file) == d_inode(dentry)))
                return 0;
 
        /* Lower layer: can't write to real file, sorry... */
index efebe6cf4378e32a13d109eebd0b694dd629c2a5..22880ef6d8dd74f6b44d7d755bdb9c8e8de59917 100644 (file)
@@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
 static void pnfs_init_server(struct nfs_server *server)
 {
        rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
-       rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
 }
 
 #else
@@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void)
        ida_init(&server->openowner_id);
        ida_init(&server->lockowner_id);
        pnfs_init_server(server);
+       rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
 
        return server;
 }
index 44c638b7876cfd5824d2d6287731e4411ea02052..508126eb49f9577eeec6f9e1b97abf6286cc82eb 100644 (file)
@@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
        struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
 
        dprintk("--> %s\n", __func__);
-       nfs4_fl_put_deviceid(fl->dsaddr);
+       if (fl->dsaddr != NULL)
+               nfs4_fl_put_deviceid(fl->dsaddr);
        /* This assumes a single RW lseg */
        if (lseg->pls_range.iomode == IOMODE_RW) {
                struct nfs4_filelayout *flo;
index dd5d27da8c0cc5f19d96c33360400778a6a60b5c..30426c1a1bbda245b831bfbac5ff8f1aa570f71f 100644 (file)
@@ -274,7 +274,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen,
        ssize_t ret;
 
        ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
-       if (ret <= 0)
+       if (ret < 0)
                return ERR_PTR(ret);
 
        rkey = request_key(&key_type_id_resolver, desc, "");
index 6c61e2b996351cde05b1c922674cd3a2670913ea..f90090e8c959b14346baf76ac7eaefed7ae30f82 100644 (file)
@@ -8399,8 +8399,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                lo = NFS_I(inode)->layout;
                /* If the open stateid was bad, then recover it. */
                if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
-                   nfs4_stateid_match_other(&lgp->args.stateid,
-                                       &lgp->args.ctx->state->stateid)) {
+                   !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
                        spin_unlock(&inode->i_lock);
                        exception->state = lgp->args.ctx->state;
                        exception->stateid = &lgp->args.stateid;
index 37c8af00327588d772610cac487fcf0651cf8fbf..14ed9791ec9cf156905a3e686042c325173582c0 100644 (file)
@@ -1842,8 +1842,8 @@ static void encode_create_session(struct xdr_stream *xdr,
         * Assumes OPEN is the biggest non-idempotent compound.
         * 2 is the verifier.
         */
-       max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
-                             RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
+       max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 2)
+                               * XDR_UNIT + RPC_MAX_AUTH_SIZE;
 
        encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
        p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
index 3c69db7d4905e379487ebdec7f31c2b4670412d6..8487486ec4963efb72477e7cf2f19616108f12f2 100644 (file)
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u)
                exp_put(u->secinfo.si_exp);
 }
 
+static void
+nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
+{
+       if (u->secinfo_no_name.sin_exp)
+               exp_put(u->secinfo_no_name.sin_exp);
+}
+
 static __be32
 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
              union nfsd4_op_u *u)
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
        },
        [OP_SECINFO_NO_NAME] = {
                .op_func = nfsd4_secinfo_no_name,
-               .op_release = nfsd4_secinfo_release,
+               .op_release = nfsd4_secinfo_no_name_release,
                .op_flags = OP_HANDLES_WRONGSEC,
                .op_name = "OP_SECINFO_NO_NAME",
                .op_rsize_bop = nfsd4_secinfo_rsize,
index aad97b30d5e657199f741f02700208c41e5424ae..c441f9387a1ba0f2ddf1f1426a114a5533fb6e30 100644 (file)
@@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
                c->tmpfile = true;
                err = ovl_copy_up_locked(c);
        } else {
-               err = -EIO;
-               if (lock_rename(c->workdir, c->destdir) != NULL) {
-                       pr_err("overlayfs: failed to lock workdir+upperdir\n");
-               } else {
+               err = ovl_lock_rename_workdir(c->workdir, c->destdir);
+               if (!err) {
                        err = ovl_copy_up_locked(c);
                        unlock_rename(c->workdir, c->destdir);
                }
index 3309b1912241769bba93e2caba483a4e2f57035a..cc961a3bd3bdec34fcace34553f9e8cfb319db25 100644 (file)
@@ -216,26 +216,6 @@ out_unlock:
        return err;
 }
 
-static int ovl_lock_rename_workdir(struct dentry *workdir,
-                                  struct dentry *upperdir)
-{
-       /* Workdir should not be the same as upperdir */
-       if (workdir == upperdir)
-               goto err;
-
-       /* Workdir should not be subdir of upperdir and vice versa */
-       if (lock_rename(workdir, upperdir) != NULL)
-               goto err_unlock;
-
-       return 0;
-
-err_unlock:
-       unlock_rename(workdir, upperdir);
-err:
-       pr_err("overlayfs: failed to lock workdir+upperdir\n");
-       return -EIO;
-}
-
 static struct dentry *ovl_clear_empty(struct dentry *dentry,
                                      struct list_head *list)
 {
index c3addd1114f1f562464370e6d640abd22cd3a3e4..654bea1a5ac9f38c587919d1fac6d53fd6867ee0 100644 (file)
@@ -506,6 +506,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
 
        index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
        if (IS_ERR(index)) {
+               err = PTR_ERR(index);
                pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
                                    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
                                    d_inode(origin)->i_ino, name.len, name.name,
index d4e8c1a08fb0f5d7b4c79de7aa88a5211a4996a4..c706a6f999288136b567d75777f5f6742682a9a2 100644 (file)
@@ -235,6 +235,7 @@ bool ovl_inuse_trylock(struct dentry *dentry);
 void ovl_inuse_unlock(struct dentry *dentry);
 int ovl_nlink_start(struct dentry *dentry, bool *locked);
 void ovl_nlink_end(struct dentry *dentry, bool locked);
+int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
 
 static inline bool ovl_is_impuredir(struct dentry *dentry)
 {
index 878a750986dd799ad17d9acfc3818e93dcf288fa..25d9b5adcd429071537c5edf3185b58a6ab9cd60 100644 (file)
@@ -37,6 +37,9 @@ struct ovl_fs {
        bool noxattr;
        /* sb common to all layers */
        struct super_block *same_sb;
+       /* Did we take the inuse lock? */
+       bool upperdir_locked;
+       bool workdir_locked;
 };
 
 /* private information held for every overlayfs dentry */
index 62e9b22a2077ac2ac9acb382dd5617a34e9e7636..0f85ee9c3268adb320dcc6c4d37e88ae3ea92013 100644 (file)
@@ -988,6 +988,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
                         struct path *lowerstack, unsigned int numlower)
 {
        int err;
+       struct dentry *index = NULL;
        struct inode *dir = dentry->d_inode;
        struct path path = { .mnt = mnt, .dentry = dentry };
        LIST_HEAD(list);
@@ -1007,8 +1008,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
 
        inode_lock_nested(dir, I_MUTEX_PARENT);
        list_for_each_entry(p, &list, l_node) {
-               struct dentry *index;
-
                if (p->name[0] == '.') {
                        if (p->len == 1)
                                continue;
@@ -1018,6 +1017,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
                index = lookup_one_len(p->name, dentry, p->len);
                if (IS_ERR(index)) {
                        err = PTR_ERR(index);
+                       index = NULL;
                        break;
                }
                err = ovl_verify_index(index, lowerstack, numlower);
@@ -1029,7 +1029,9 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
                                break;
                }
                dput(index);
+               index = NULL;
        }
+       dput(index);
        inode_unlock(dir);
 out:
        ovl_cache_free(&list);
index fd5ea4facc622f80861f1495d93b148ecde2a003..092d150643c12061407be0ecab554a38fe88d9fc 100644 (file)
@@ -211,9 +211,10 @@ static void ovl_put_super(struct super_block *sb)
 
        dput(ufs->indexdir);
        dput(ufs->workdir);
-       ovl_inuse_unlock(ufs->workbasedir);
+       if (ufs->workdir_locked)
+               ovl_inuse_unlock(ufs->workbasedir);
        dput(ufs->workbasedir);
-       if (ufs->upper_mnt)
+       if (ufs->upper_mnt && ufs->upperdir_locked)
                ovl_inuse_unlock(ufs->upper_mnt->mnt_root);
        mntput(ufs->upper_mnt);
        for (i = 0; i < ufs->numlower; i++)
@@ -881,9 +882,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_put_upperpath;
 
                err = -EBUSY;
-               if (!ovl_inuse_trylock(upperpath.dentry)) {
-                       pr_err("overlayfs: upperdir is in-use by another mount\n");
+               if (ovl_inuse_trylock(upperpath.dentry)) {
+                       ufs->upperdir_locked = true;
+               } else if (ufs->config.index) {
+                       pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
                        goto out_put_upperpath;
+               } else {
+                       pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
                }
 
                err = ovl_mount_dir(ufs->config.workdir, &workpath);
@@ -901,9 +906,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                }
 
                err = -EBUSY;
-               if (!ovl_inuse_trylock(workpath.dentry)) {
-                       pr_err("overlayfs: workdir is in-use by another mount\n");
+               if (ovl_inuse_trylock(workpath.dentry)) {
+                       ufs->workdir_locked = true;
+               } else if (ufs->config.index) {
+                       pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
                        goto out_put_workpath;
+               } else {
+                       pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
                }
 
                ufs->workbasedir = workpath.dentry;
@@ -1156,11 +1165,13 @@ out_put_lowerpath:
 out_free_lowertmp:
        kfree(lowertmp);
 out_unlock_workdentry:
-       ovl_inuse_unlock(workpath.dentry);
+       if (ufs->workdir_locked)
+               ovl_inuse_unlock(workpath.dentry);
 out_put_workpath:
        path_put(&workpath);
 out_unlock_upperdentry:
-       ovl_inuse_unlock(upperpath.dentry);
+       if (ufs->upperdir_locked)
+               ovl_inuse_unlock(upperpath.dentry);
 out_put_upperpath:
        path_put(&upperpath);
 out_free_config:
index 117794582f9fa597d34d8843aa8bdfb7a99cbe76..b9b239fa5cfd28d18b120696d05e817330e44241 100644 (file)
@@ -430,7 +430,7 @@ void ovl_inuse_unlock(struct dentry *dentry)
        }
 }
 
-/* Called must hold OVL_I(inode)->oi_lock */
+/* Caller must hold OVL_I(inode)->lock */
 static void ovl_cleanup_index(struct dentry *dentry)
 {
        struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode;
@@ -469,6 +469,9 @@ static void ovl_cleanup_index(struct dentry *dentry)
        err = PTR_ERR(index);
        if (!IS_ERR(index))
                err = ovl_cleanup(dir, index);
+       else
+               index = NULL;
+
        inode_unlock(dir);
        if (err)
                goto fail;
@@ -557,3 +560,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked)
                mutex_unlock(&OVL_I(d_inode(dentry))->lock);
        }
 }
+
+int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
+{
+       /* Workdir should not be the same as upperdir */
+       if (workdir == upperdir)
+               goto err;
+
+       /* Workdir should not be subdir of upperdir and vice versa */
+       if (lock_rename(workdir, upperdir) != NULL)
+               goto err_unlock;
+
+       return 0;
+
+err_unlock:
+       unlock_rename(workdir, upperdir);
+err:
+       pr_err("overlayfs: failed to lock workdir+upperdir\n");
+       return -EIO;
+}
index bc6c6e10a9699f789b998035fde33973796f46fa..e9db7fc95b70bb4a2ba531af65d8718f73b61721 100644 (file)
@@ -2122,11 +2122,31 @@ xfs_swap_extents(
                ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
                tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
                tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
+       }
+
+       /* Swap the cow forks. */
+       if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+               xfs_extnum_t    extnum;
+
+               ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+               ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+
+               extnum = ip->i_cnextents;
+               ip->i_cnextents = tip->i_cnextents;
+               tip->i_cnextents = extnum;
+
                cowfp = ip->i_cowfp;
                ip->i_cowfp = tip->i_cowfp;
                tip->i_cowfp = cowfp;
-               xfs_inode_set_cowblocks_tag(ip);
-               xfs_inode_set_cowblocks_tag(tip);
+
+               if (ip->i_cowfp && ip->i_cnextents)
+                       xfs_inode_set_cowblocks_tag(ip);
+               else
+                       xfs_inode_clear_cowblocks_tag(ip);
+               if (tip->i_cowfp && tip->i_cnextents)
+                       xfs_inode_set_cowblocks_tag(tip);
+               else
+                       xfs_inode_clear_cowblocks_tag(tip);
        }
 
        xfs_trans_log_inode(tp, ip,  src_log_flags);
index 3246815c24d659d83da380c5cce04dd8ee40a101..37e603bf159137dfd38a1210b5d0bb31e5f4829f 100644 (file)
@@ -736,7 +736,13 @@ xfs_reflink_end_cow(
        /* If there is a hole at end_fsb - 1 go to the previous extent */
        if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
            got.br_startoff > end_fsb) {
-               ASSERT(idx > 0);
+               /*
+                * In case of racing, overlapping AIO writes no COW extents
+                * might be left by the time I/O completes for the loser of
+                * the race.  In that case we are done.
+                */
+               if (idx <= 0)
+                       goto out_cancel;
                xfs_iext_get_extent(ifp, --idx, &got);
        }
 
@@ -809,6 +815,7 @@ next_extent:
 
 out_defer:
        xfs_defer_cancel(&dfops);
+out_cancel:
        xfs_trans_cancel(tp);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out:
index 8390859e79e70b9a07f0948e1efaa5adbfb0c99c..f1af7d63d6786673ac087dbfca66310a1236126f 100644 (file)
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
 {
 }
 
+static inline int bpf_obj_get_user(const char __user *pathname)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
                                                       u32 key)
 {
index ab05a86269dc1770181340b8b21c460d83412cb9..d491027a7c22059a46e283ffd045fcf080a1b6c0 100644 (file)
@@ -289,6 +289,7 @@ struct hid_item {
 #define HID_DG_DEVICEINDEX     0x000d0053
 #define HID_DG_CONTACTCOUNT    0x000d0054
 #define HID_DG_CONTACTMAX      0x000d0055
+#define HID_DG_SCANTIME                0x000d0056
 #define HID_DG_BUTTONTYPE      0x000d0059
 #define HID_DG_BARRELSWITCH2   0x000d005a
 #define HID_DG_TOOLSERIALNUMBER        0x000d005b
@@ -753,6 +754,7 @@ struct hid_driver {
  * @stop: called on remove
  * @open: called by input layer on open
  * @close: called by input layer on close
+ * @power: request underlying hardware to enter requested power mode
  * @parse: this method is called only once to parse the device data,
  *        shouldn't allocate anything to not leak memory
  * @request: send report request to device (e.g. feature report)
index f3f2d07feb2a627d9b65409b7e5a0c0e53fe54c2..9a43763a68adb3e998ec942300688f99e065489b 100644 (file)
@@ -316,7 +316,7 @@ struct mmc_host {
 #define MMC_CAP_UHS_SDR50      (1 << 18)       /* Host supports UHS SDR50 mode */
 #define MMC_CAP_UHS_SDR104     (1 << 19)       /* Host supports UHS SDR104 mode */
 #define MMC_CAP_UHS_DDR50      (1 << 20)       /* Host supports UHS DDR50 mode */
-#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21)       /* Disable bounce buffers on host */
+/* (1 << 21) is free for reuse */
 #define MMC_CAP_DRIVER_TYPE_A  (1 << 23)       /* Host supports Driver Type A */
 #define MMC_CAP_DRIVER_TYPE_C  (1 << 24)       /* Host supports Driver Type C */
 #define MMC_CAP_DRIVER_TYPE_D  (1 << 25)       /* Host supports Driver Type D */
index 2c2a5514b0df98a0fd92294aad15b00855c9256a..528b24c78308e7fe128027f1717863d68e081cbc 100644 (file)
@@ -108,9 +108,10 @@ struct ebt_table {
 
 #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
                     ~(__alignof__(struct _xt_align)-1))
-extern struct ebt_table *ebt_register_table(struct net *net,
-                                           const struct ebt_table *table,
-                                           const struct nf_hook_ops *);
+extern int ebt_register_table(struct net *net,
+                             const struct ebt_table *table,
+                             const struct nf_hook_ops *ops,
+                             struct ebt_table **res);
 extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
                                 const struct nf_hook_ops *);
 extern unsigned int ebt_do_table(struct sk_buff *skb,
index a36abe2da13e1a23edc8aa152205af143f658990..27e249ed7c5c8dfcbd969e347ff844a56899edc7 100644 (file)
 
 #ifdef CONFIG_LOCKUP_DETECTOR
 void lockup_detector_init(void);
+void lockup_detector_soft_poweroff(void);
+void lockup_detector_cleanup(void);
+bool is_hardlockup(void);
+
+extern int watchdog_user_enabled;
+extern int nmi_watchdog_user_enabled;
+extern int soft_watchdog_user_enabled;
+extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
+
+extern struct cpumask watchdog_cpumask;
+extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
+extern int sysctl_softlockup_all_cpu_backtrace;
+extern int sysctl_hardlockup_all_cpu_backtrace;
 #else
-static inline void lockup_detector_init(void)
-{
-}
-#endif
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif /* !CONFIG_SMP */
+
+#else /* CONFIG_LOCKUP_DETECTOR */
+static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_soft_poweroff(void) { }
+static inline void lockup_detector_cleanup(void) { }
+#endif /* !CONFIG_LOCKUP_DETECTOR */
 
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
 extern void touch_softlockup_watchdog_sched(void);
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void);
 extern void touch_softlockup_watchdog_sync(void);
 extern void touch_all_softlockup_watchdogs(void);
 extern unsigned int  softlockup_panic;
-extern int soft_watchdog_enabled;
-extern atomic_t watchdog_park_in_progress;
 #else
-static inline void touch_softlockup_watchdog_sched(void)
-{
-}
-static inline void touch_softlockup_watchdog(void)
-{
-}
-static inline void touch_softlockup_watchdog_sync(void)
-{
-}
-static inline void touch_all_softlockup_watchdogs(void)
-{
-}
+static inline void touch_softlockup_watchdog_sched(void) { }
+static inline void touch_softlockup_watchdog(void) { }
+static inline void touch_softlockup_watchdog_sync(void) { }
+static inline void touch_all_softlockup_watchdogs(void) { }
 #endif
 
 #ifdef CONFIG_DETECT_HUNG_TASK
 void reset_hung_task_detector(void);
 #else
-static inline void reset_hung_task_detector(void)
-{
-}
+static inline void reset_hung_task_detector(void) { }
 #endif
 
 /*
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void)
  * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
  * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
  *
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
+ * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
+ * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'interface' between the parameters in /proc/sys/kernel and the internal
+ * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
+ * handled differently because its value is not boolean, and the lockup
+ * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
  */
 #define NMI_WATCHDOG_ENABLED_BIT   0
 #define SOFT_WATCHDOG_ENABLED_BIT  1
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic;
 static inline void hardlockup_detector_disable(void) {}
 #endif
 
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+# define NMI_WATCHDOG_SYSCTL_PERM      0644
+#else
+# define NMI_WATCHDOG_SYSCTL_PERM      0444
+#endif
+
 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
 extern void arch_touch_nmi_watchdog(void);
+extern void hardlockup_detector_perf_stop(void);
+extern void hardlockup_detector_perf_restart(void);
+extern void hardlockup_detector_perf_disable(void);
+extern void hardlockup_detector_perf_enable(void);
+extern void hardlockup_detector_perf_cleanup(void);
+extern int hardlockup_detector_perf_init(void);
 #else
-#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline void hardlockup_detector_perf_stop(void) { }
+static inline void hardlockup_detector_perf_restart(void) { }
+static inline void hardlockup_detector_perf_disable(void) { }
+static inline void hardlockup_detector_perf_enable(void) { }
+static inline void hardlockup_detector_perf_cleanup(void) { }
+# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
 static inline void arch_touch_nmi_watchdog(void) {}
+# else
+static inline int hardlockup_detector_perf_init(void) { return 0; }
+# endif
 #endif
-#endif
+
+void watchdog_nmi_stop(void);
+void watchdog_nmi_start(void);
+int watchdog_nmi_probe(void);
 
 /**
  * touch_nmi_watchdog - restart NMI watchdog timeout.
- * 
+ *
  * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
 u64 hw_nmi_get_sample_period(int watchdog_thresh);
 #endif
 
-#ifdef CONFIG_LOCKUP_DETECTOR
-extern int nmi_watchdog_enabled;
-extern int watchdog_user_enabled;
-extern int watchdog_thresh;
-extern unsigned long watchdog_enabled;
-extern struct cpumask watchdog_cpumask;
-extern unsigned long *watchdog_cpumask_bits;
-extern int __read_mostly watchdog_suspended;
-#ifdef CONFIG_SMP
-extern int sysctl_softlockup_all_cpu_backtrace;
-extern int sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
-#endif
-
 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
     defined(CONFIG_HARDLOCKUP_DETECTOR)
 void watchdog_update_hrtimer_threshold(u64 period);
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
 static inline void watchdog_update_hrtimer_threshold(u64 period) { }
 #endif
 
-extern bool is_hardlockup(void);
 struct ctl_table;
 extern int proc_watchdog(struct ctl_table *, int ,
                         void __user *, size_t *, loff_t *);
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
                                void __user *, size_t *, loff_t *);
 extern int proc_watchdog_cpumask(struct ctl_table *, int,
                                 void __user *, size_t *, loff_t *);
-extern int lockup_detector_suspend(void);
-extern void lockup_detector_resume(void);
-#else
-static inline int lockup_detector_suspend(void)
-{
-       return 0;
-}
-
-static inline void lockup_detector_resume(void)
-{
-}
-#endif
 
 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
 #include <asm/nmi.h>
index 12910cf19869c7db32d1ca34ddda0931e80570d1..c149aa7bedf306c8a04d9fd61cb8930af12d416f 100644 (file)
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
 }
 
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                        const struct cpumask *);
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+                                         const struct cpumask *);
 
 #endif
index 82e93ee94708c9f7de073b3e6c47826664525075..67c5a9f223f77aa19d6f5c8b7badba1ecf138299 100644 (file)
@@ -192,6 +192,7 @@ struct scsi_device {
        unsigned no_dif:1;      /* T10 PI (DIF) should be disabled */
        unsigned broken_fua:1;          /* Don't set FUA bit */
        unsigned lun_in_cdb:1;          /* Store LUN bits in CDB[1] */
+       unsigned unmap_limit_for_ws:1;  /* Use the UNMAP limit for WRITE SAME */
 
        atomic_t disk_events_disable_depth; /* disable depth for disk events */
 
index 9592570e092a3bba93065d6e66a21665f8bd52ae..36b03013d6291cf9fda3c99eec690d3841f0ca5e 100644 (file)
@@ -29,5 +29,6 @@
 #define BLIST_TRY_VPD_PAGES    0x10000000 /* Attempt to read VPD pages */
 #define BLIST_NO_RSOC          0x20000000 /* don't try to issue RSOC */
 #define BLIST_MAX_1024         0x40000000 /* maximum 1024 sector cdb length */
+#define BLIST_UNMAP_LIMIT_WS   0x80000000 /* Use UNMAP limit for WRITE SAME */
 
 #endif
index 6183d20a01fbe8f94bff514c8c8caa7585185b62..b266d2a3bcb1d099ed4a16237a8f06ab820714d5 100644 (file)
@@ -434,7 +434,6 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
                                                unsigned int target_id);
 extern void iscsi_remove_session(struct iscsi_cls_session *session);
 extern void iscsi_free_session(struct iscsi_cls_session *session);
-extern int iscsi_destroy_session(struct iscsi_cls_session *session);
 extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
                                                int dd_size, uint32_t cid);
 extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
index d0509db6d0ec56b8034388dcf1d3492c2dd7955e..f89cd5ee1c7aae51f6975d31b5e43007fc4499e0 100644 (file)
@@ -95,6 +95,7 @@ enum {
 #define AC_VERB_SET_EAPD_BTLENABLE             0x70c
 #define AC_VERB_SET_DIGI_CONVERT_1             0x70d
 #define AC_VERB_SET_DIGI_CONVERT_2             0x70e
+#define AC_VERB_SET_DIGI_CONVERT_3             0x73e
 #define AC_VERB_SET_VOLUME_KNOB_CONTROL                0x70f
 #define AC_VERB_SET_GPIO_DATA                  0x715
 #define AC_VERB_SET_GPIO_MASK                  0x716
index a03acd0d398a433d0d93af683416e5eb1e0688e6..695257ae64acccc1df2fc68e8b93b2744d5c98e8 100644 (file)
@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
        int port;                       /* created/attached port */
        unsigned int flags;             /* SNDRV_VIRMIDI_* */
        rwlock_t filelist_lock;
+       struct rw_semaphore filelist_sem;
        struct list_head filelist;
 };
 
index 412c06a624c83674e02f1d6669c4201d2a6c6e1b..ccaea525340b9d857a9f4fe8a5834def9c9367be 100644 (file)
@@ -269,9 +269,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY    _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR       4
-#define DM_VERSION_MINOR       36
+#define DM_VERSION_MINOR       37
 #define DM_VERSION_PATCHLEVEL  0
-#define DM_VERSION_EXTRA       "-ioctl (2017-06-09)"
+#define DM_VERSION_EXTRA       "-ioctl (2017-09-20)"
 
 /* Status bits */
 #define DM_READONLY_FLAG       (1 << 0) /* In/Out */
index b97725af2ac0ef14893ec459701a2d1ae4ac76a4..da161b56c79e443afacce238d4a120b373673db9 100644 (file)
@@ -23,6 +23,7 @@ enum xt_bpf_modes {
        XT_BPF_MODE_FD_PINNED,
        XT_BPF_MODE_FD_ELF,
 };
+#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
 
 struct xt_bpf_info_v1 {
        __u16 mode;
index e833ed91435832dc4d822ad4ed1c4a6a8a5893cf..be1dde967208eb19d1f92872275df5fdce437024 100644 (file)
@@ -363,6 +363,7 @@ out:
        putname(pname);
        return ret;
 }
+EXPORT_SYMBOL_GPL(bpf_obj_get_user);
 
 static void bpf_evict_inode(struct inode *inode)
 {
index b914fbe1383e83fa9609944bc54358456794f470..8b8d6ba39e238cb5eea09a71260ddc55630faa6f 100644 (file)
@@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
 {
        struct bpf_verifier_state *parent = state->parent;
 
+       if (regno == BPF_REG_FP)
+               /* We don't need to worry about FP liveness because it's read-only */
+               return;
+
        while (parent) {
                /* if read wasn't screened by an earlier write ... */
                if (state->regs[regno].live & REG_LIVE_WRITTEN)
@@ -2345,6 +2349,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                 * copy register state to dest reg
                                 */
                                regs[insn->dst_reg] = regs[insn->src_reg];
+                               regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
                        } else {
                                /* R1 = (u32) R2 */
                                if (is_pointer_value(env, insn->src_reg)) {
index 8de11a29e4955d846c0d1d90a2c0b3a207e21a3d..d851df22f5c5eef24b9f4e3ead6e986e4b87352f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/lockdep.h>
 #include <linux/tick.h>
 #include <linux/irq.h>
+#include <linux/nmi.h>
 #include <linux/smpboot.h>
 #include <linux/relay.h>
 #include <linux/slab.h>
@@ -897,6 +898,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 
 out:
        cpus_write_unlock();
+       /*
+        * Do post unplug cleanup. This is still protected against
+        * concurrent CPU hotplug via cpu_add_remove_lock.
+        */
+       lockup_detector_cleanup();
        return ret;
 }
 
index f2cd53e92147c35c43d773b1eb688098fe4b7db3..cf28528842bcf54f4125517fd6c7521b3c79685c 100644 (file)
@@ -1610,6 +1610,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
        if (!infop)
                return err;
 
+       if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+               goto Efault;
+
        user_access_begin();
        unsafe_put_user(signo, &infop->si_signo, Efault);
        unsafe_put_user(0, &infop->si_errno, Efault);
@@ -1735,6 +1738,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
        if (!infop)
                return err;
 
+       if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+               goto Efault;
+
        user_access_begin();
        unsafe_put_user(signo, &infop->si_signo, Efault);
        unsafe_put_user(0, &infop->si_errno, Efault);
index 3e2b4f519009709c61b8de20e7f159efd159df04..ccd2d20e6b067f6a7c3bb83a5848e9c0b333fade 100644 (file)
@@ -120,22 +120,26 @@ static void s2idle_loop(void)
                 * frozen processes + suspended devices + idle processors.
                 * Thus s2idle_enter() should be called right after
                 * all devices have been suspended.
+                *
+                * Wakeups during the noirq suspend of devices may be spurious,
+                * so prevent them from terminating the loop right away.
                 */
                error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
                if (!error)
                        s2idle_enter();
+               else if (error == -EBUSY && pm_wakeup_pending())
+                       error = 0;
 
-               dpm_noirq_resume_devices(PMSG_RESUME);
-               if (error && (error != -EBUSY || !pm_wakeup_pending())) {
-                       dpm_noirq_end();
-                       break;
-               }
-
-               if (s2idle_ops && s2idle_ops->wake)
+               if (!error && s2idle_ops && s2idle_ops->wake)
                        s2idle_ops->wake();
 
+               dpm_noirq_resume_devices(PMSG_RESUME);
+
                dpm_noirq_end();
 
+               if (error)
+                       break;
+
                if (s2idle_ops && s2idle_ops->sync)
                        s2idle_ops->sync();
 
index bb3a38005b9cc3f3b9f80d49c40bb9428e12a20d..0ae832e13b974041002c508f03a7c9cc8d6d7a6d 100644 (file)
@@ -473,7 +473,7 @@ static long seccomp_attach_filter(unsigned int flags,
        return 0;
 }
 
-void __get_seccomp_filter(struct seccomp_filter *filter)
+static void __get_seccomp_filter(struct seccomp_filter *filter)
 {
        /* Reference count is bounded by the number of total processes. */
        refcount_inc(&filter->usage);
index 1d71c051a9515c6acecd4be823465a4760e7cefa..5043e7433f4b15879a6498ed3d1ca6cfa2876f83 100644 (file)
@@ -344,39 +344,30 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
  * by the client, but only by calling this function.
  * This function can only be called on a registered smp_hotplug_thread.
  */
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                        const struct cpumask *new)
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+                                         const struct cpumask *new)
 {
        struct cpumask *old = plug_thread->cpumask;
-       cpumask_var_t tmp;
+       static struct cpumask tmp;
        unsigned int cpu;
 
-       if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
-               return -ENOMEM;
-
-       get_online_cpus();
+       lockdep_assert_cpus_held();
        mutex_lock(&smpboot_threads_lock);
 
        /* Park threads that were exclusively enabled on the old mask. */
-       cpumask_andnot(tmp, old, new);
-       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+       cpumask_andnot(&tmp, old, new);
+       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
                smpboot_park_thread(plug_thread, cpu);
 
        /* Unpark threads that are exclusively enabled on the new mask. */
-       cpumask_andnot(tmp, new, old);
-       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+       cpumask_andnot(&tmp, new, old);
+       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
                smpboot_unpark_thread(plug_thread, cpu);
 
        cpumask_copy(old, new);
 
        mutex_unlock(&smpboot_threads_lock);
-       put_online_cpus();
-
-       free_cpumask_var(tmp);
-
-       return 0;
 }
-EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
 
 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
 
index 4da9e622471f670c01356fab3b5311e95eb446e9..d9c31bc2eaea2c95a7a7be5a5321700b84d8f640 100644 (file)
@@ -872,9 +872,9 @@ static struct ctl_table kern_table[] = {
 #if defined(CONFIG_LOCKUP_DETECTOR)
        {
                .procname       = "watchdog",
-               .data           = &watchdog_user_enabled,
-               .maxlen         = sizeof (int),
-               .mode           = 0644,
+               .data           = &watchdog_user_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
                .proc_handler   = proc_watchdog,
                .extra1         = &zero,
                .extra2         = &one,
@@ -890,16 +890,12 @@ static struct ctl_table kern_table[] = {
        },
        {
                .procname       = "nmi_watchdog",
-               .data           = &nmi_watchdog_enabled,
-               .maxlen         = sizeof (int),
-               .mode           = 0644,
+               .data           = &nmi_watchdog_user_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = NMI_WATCHDOG_SYSCTL_PERM,
                .proc_handler   = proc_nmi_watchdog,
                .extra1         = &zero,
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
                .extra2         = &one,
-#else
-               .extra2         = &zero,
-#endif
        },
        {
                .procname       = "watchdog_cpumask",
@@ -911,9 +907,9 @@ static struct ctl_table kern_table[] = {
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
        {
                .procname       = "soft_watchdog",
-               .data           = &soft_watchdog_enabled,
-               .maxlen         = sizeof (int),
-               .mode           = 0644,
+               .data           = &soft_watchdog_user_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
                .proc_handler   = proc_soft_watchdog,
                .extra1         = &zero,
                .extra2         = &one,
index f5d52024f6b72a9d1354b1a44c12c3e3b6af06a9..6bcb854909c0b6f1563469ae3517fa1f89c43268 100644 (file)
 #include <linux/kvm_para.h>
 #include <linux/kthread.h>
 
-/* Watchdog configuration */
-static DEFINE_MUTEX(watchdog_proc_mutex);
-
-int __read_mostly nmi_watchdog_enabled;
+static DEFINE_MUTEX(watchdog_mutex);
 
 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
-                                               NMI_WATCHDOG_ENABLED;
+# define WATCHDOG_DEFAULT      (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
+# define NMI_WATCHDOG_DEFAULT  1
 #else
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+# define WATCHDOG_DEFAULT      (SOFT_WATCHDOG_ENABLED)
+# define NMI_WATCHDOG_DEFAULT  0
 #endif
 
+unsigned long __read_mostly watchdog_enabled;
+int __read_mostly watchdog_user_enabled = 1;
+int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
+int __read_mostly soft_watchdog_user_enabled = 1;
+int __read_mostly watchdog_thresh = 10;
+int __read_mostly nmi_watchdog_available;
+
+struct cpumask watchdog_allowed_mask __read_mostly;
+
+struct cpumask watchdog_cpumask __read_mostly;
+unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
+
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
-/* boot commands */
 /*
  * Should we panic when a soft-lockup or hard-lockup occurs:
  */
@@ -56,9 +65,9 @@ unsigned int __read_mostly hardlockup_panic =
  * kernel command line parameters are parsed, because otherwise it is not
  * possible to override this in hardlockup_panic_setup().
  */
-void hardlockup_detector_disable(void)
+void __init hardlockup_detector_disable(void)
 {
-       watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+       nmi_watchdog_user_enabled = 0;
 }
 
 static int __init hardlockup_panic_setup(char *str)
@@ -68,48 +77,24 @@ static int __init hardlockup_panic_setup(char *str)
        else if (!strncmp(str, "nopanic", 7))
                hardlockup_panic = 0;
        else if (!strncmp(str, "0", 1))
-               watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+               nmi_watchdog_user_enabled = 0;
        else if (!strncmp(str, "1", 1))
-               watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+               nmi_watchdog_user_enabled = 1;
        return 1;
 }
 __setup("nmi_watchdog=", hardlockup_panic_setup);
 
-#endif
-
-#ifdef CONFIG_SOFTLOCKUP_DETECTOR
-int __read_mostly soft_watchdog_enabled;
-#endif
-
-int __read_mostly watchdog_user_enabled;
-int __read_mostly watchdog_thresh = 10;
-
-#ifdef CONFIG_SMP
-int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+# ifdef CONFIG_SMP
 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#endif
-struct cpumask watchdog_cpumask __read_mostly;
-unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 
-/*
- * The 'watchdog_running' variable is set to 1 when the watchdog threads
- * are registered/started and is set to 0 when the watchdog threads are
- * unregistered/stopped, so it is an indicator whether the threads exist.
- */
-static int __read_mostly watchdog_running;
-/*
- * If a subsystem has a need to deactivate the watchdog temporarily, it
- * can use the suspend/resume interface to achieve this. The content of
- * the 'watchdog_suspended' variable reflects this state. Existing threads
- * are parked/unparked by the lockup_detector_{suspend|resume} functions
- * (see comment blocks pertaining to those functions for further details).
- *
- * 'watchdog_suspended' also prevents threads from being registered/started
- * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
- * of 'watchdog_running' cannot change while the watchdog is deactivated
- * temporarily (see related code in 'proc' handlers).
- */
-int __read_mostly watchdog_suspended;
+static int __init hardlockup_all_cpu_backtrace_setup(char *str)
+{
+       sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
+       return 1;
+}
+__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
+# endif /* CONFIG_SMP */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
 /*
  * These functions can be overridden if an architecture implements its
@@ -121,36 +106,68 @@ int __read_mostly watchdog_suspended;
  */
 int __weak watchdog_nmi_enable(unsigned int cpu)
 {
+       hardlockup_detector_perf_enable();
        return 0;
 }
+
 void __weak watchdog_nmi_disable(unsigned int cpu)
 {
+       hardlockup_detector_perf_disable();
 }
 
-/*
- * watchdog_nmi_reconfigure can be implemented to be notified after any
- * watchdog configuration change. The arch hardlockup watchdog should
- * respond to the following variables:
- * - nmi_watchdog_enabled
+/* Return 0, if a NMI watchdog is available. Error code otherwise */
+int __weak __init watchdog_nmi_probe(void)
+{
+       return hardlockup_detector_perf_init();
+}
+
+/**
+ * watchdog_nmi_stop - Stop the watchdog for reconfiguration
+ *
+ * The reconfiguration steps are:
+ * watchdog_nmi_stop();
+ * update_variables();
+ * watchdog_nmi_start();
+ */
+void __weak watchdog_nmi_stop(void) { }
+
+/**
+ * watchdog_nmi_start - Start the watchdog after reconfiguration
+ *
+ * Counterpart to watchdog_nmi_stop().
+ *
+ * The following variables have been updated in update_variables() and
+ * contain the currently valid configuration:
+ * - watchdog_enabled
  * - watchdog_thresh
  * - watchdog_cpumask
- * - sysctl_hardlockup_all_cpu_backtrace
- * - hardlockup_panic
- * - watchdog_suspended
  */
-void __weak watchdog_nmi_reconfigure(void)
+void __weak watchdog_nmi_start(void) { }
+
+/**
+ * lockup_detector_update_enable - Update the sysctl enable bit
+ *
+ * Caller needs to make sure that the NMI/perf watchdogs are off, so this
+ * can't race with watchdog_nmi_disable().
+ */
+static void lockup_detector_update_enable(void)
 {
+       watchdog_enabled = 0;
+       if (!watchdog_user_enabled)
+               return;
+       if (nmi_watchdog_available && nmi_watchdog_user_enabled)
+               watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+       if (soft_watchdog_user_enabled)
+               watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
 }
 
-
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
 
-/* Helper for online, unparked cpus. */
-#define for_each_watchdog_cpu(cpu) \
-       for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
-
-atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+/* Global variables, exported for sysctl */
+unsigned int __read_mostly softlockup_panic =
+                       CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 
+static bool softlockup_threads_initialized __read_mostly;
 static u64 __read_mostly sample_period;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -164,50 +181,40 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static unsigned long soft_lockup_nmi_warn;
 
-unsigned int __read_mostly softlockup_panic =
-                       CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
-
 static int __init softlockup_panic_setup(char *str)
 {
        softlockup_panic = simple_strtoul(str, NULL, 0);
-
        return 1;
 }
 __setup("softlockup_panic=", softlockup_panic_setup);
 
 static int __init nowatchdog_setup(char *str)
 {
-       watchdog_enabled = 0;
+       watchdog_user_enabled = 0;
        return 1;
 }
 __setup("nowatchdog", nowatchdog_setup);
 
 static int __init nosoftlockup_setup(char *str)
 {
-       watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
+       soft_watchdog_user_enabled = 0;
        return 1;
 }
 __setup("nosoftlockup", nosoftlockup_setup);
 
 #ifdef CONFIG_SMP
+int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+
 static int __init softlockup_all_cpu_backtrace_setup(char *str)
 {
-       sysctl_softlockup_all_cpu_backtrace =
-               !!simple_strtol(str, NULL, 0);
+       sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
        return 1;
 }
 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static int __init hardlockup_all_cpu_backtrace_setup(char *str)
-{
-       sysctl_hardlockup_all_cpu_backtrace =
-               !!simple_strtol(str, NULL, 0);
-       return 1;
-}
-__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
-#endif
 #endif
 
+static void __lockup_detector_cleanup(void);
+
 /*
  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  * lockups can have false positives under extreme conditions. So we generally
@@ -278,11 +285,15 @@ void touch_all_softlockup_watchdogs(void)
        int cpu;
 
        /*
-        * this is done lockless
-        * do we care if a 0 races with a timestamp?
-        * all it means is the softlock check starts one cycle later
+        * watchdog_mutex cannpt be taken here, as this might be called
+        * from (soft)interrupt context, so the access to
+        * watchdog_allowed_cpumask might race with a concurrent update.
+        *
+        * The watchdog time stamp can race against a concurrent real
+        * update as well, the only side effect might be a cycle delay for
+        * the softlockup check.
         */
-       for_each_watchdog_cpu(cpu)
+       for_each_cpu(cpu, &watchdog_allowed_mask)
                per_cpu(watchdog_touch_ts, cpu) = 0;
        wq_watchdog_touch(-1);
 }
@@ -322,9 +333,6 @@ static void watchdog_interrupt_count(void)
        __this_cpu_inc(hrtimer_interrupts);
 }
 
-static int watchdog_enable_all_cpus(void);
-static void watchdog_disable_all_cpus(void);
-
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
@@ -333,7 +341,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
-       if (atomic_read(&watchdog_park_in_progress) != 0)
+       if (!watchdog_enabled)
                return HRTIMER_NORESTART;
 
        /* kick the hardlockup detector */
@@ -447,32 +455,38 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
 
 static void watchdog_enable(unsigned int cpu)
 {
-       struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+       struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
 
-       /* kick off the timer for the hardlockup detector */
+       /*
+        * Start the timer first to prevent the NMI watchdog triggering
+        * before the timer has a chance to fire.
+        */
        hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hrtimer->function = watchdog_timer_fn;
-
-       /* Enable the perf event */
-       watchdog_nmi_enable(cpu);
-
-       /* done here because hrtimer_start can only pin to smp_processor_id() */
        hrtimer_start(hrtimer, ns_to_ktime(sample_period),
                      HRTIMER_MODE_REL_PINNED);
 
-       /* initialize timestamp */
-       watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
+       /* Initialize timestamp */
        __touch_watchdog();
+       /* Enable the perf event */
+       if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
+               watchdog_nmi_enable(cpu);
+
+       watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
 }
 
 static void watchdog_disable(unsigned int cpu)
 {
-       struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+       struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
 
        watchdog_set_prio(SCHED_NORMAL, 0);
-       hrtimer_cancel(hrtimer);
-       /* disable the perf event */
+       /*
+        * Disable the perf event first. That prevents that a large delay
+        * between disabling the timer and disabling the perf event causes
+        * the perf NMI to detect a false positive.
+        */
        watchdog_nmi_disable(cpu);
+       hrtimer_cancel(hrtimer);
 }
 
 static void watchdog_cleanup(unsigned int cpu, bool online)
@@ -499,21 +513,6 @@ static void watchdog(unsigned int cpu)
        __this_cpu_write(soft_lockup_hrtimer_cnt,
                         __this_cpu_read(hrtimer_interrupts));
        __touch_watchdog();
-
-       /*
-        * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
-        * failure path. Check for failures that can occur asynchronously -
-        * for example, when CPUs are on-lined - and shut down the hardware
-        * perf event on each CPU accordingly.
-        *
-        * The only non-obvious place this bit can be cleared is through
-        * watchdog_nmi_enable(), so a pr_info() is placed there.  Placing a
-        * pr_info here would be too noisy as it would result in a message
-        * every few seconds if the hardlockup was disabled but the softlockup
-        * enabled.
-        */
-       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-               watchdog_nmi_disable(cpu);
 }
 
 static struct smp_hotplug_thread watchdog_threads = {
@@ -527,295 +526,174 @@ static struct smp_hotplug_thread watchdog_threads = {
        .unpark                 = watchdog_enable,
 };
 
-/*
- * park all watchdog threads that are specified in 'watchdog_cpumask'
- *
- * This function returns an error if kthread_park() of a watchdog thread
- * fails. In this situation, the watchdog threads of some CPUs can already
- * be parked and the watchdog threads of other CPUs can still be runnable.
- * Callers are expected to handle this special condition as appropriate in
- * their context.
- *
- * This function may only be called in a context that is protected against
- * races with CPU hotplug - for example, via get_online_cpus().
- */
-static int watchdog_park_threads(void)
+static void softlockup_update_smpboot_threads(void)
 {
-       int cpu, ret = 0;
+       lockdep_assert_held(&watchdog_mutex);
 
-       atomic_set(&watchdog_park_in_progress, 1);
+       if (!softlockup_threads_initialized)
+               return;
 
-       for_each_watchdog_cpu(cpu) {
-               ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
-               if (ret)
-                       break;
-       }
-
-       atomic_set(&watchdog_park_in_progress, 0);
-
-       return ret;
+       smpboot_update_cpumask_percpu_thread(&watchdog_threads,
+                                            &watchdog_allowed_mask);
 }
 
-/*
- * unpark all watchdog threads that are specified in 'watchdog_cpumask'
- *
- * This function may only be called in a context that is protected against
- * races with CPU hotplug - for example, via get_online_cpus().
- */
-static void watchdog_unpark_threads(void)
+/* Temporarily park all watchdog threads */
+static void softlockup_park_all_threads(void)
 {
-       int cpu;
-
-       for_each_watchdog_cpu(cpu)
-               kthread_unpark(per_cpu(softlockup_watchdog, cpu));
+       cpumask_clear(&watchdog_allowed_mask);
+       softlockup_update_smpboot_threads();
 }
 
-static int update_watchdog_all_cpus(void)
+/* Unpark enabled threads */
+static void softlockup_unpark_threads(void)
 {
-       int ret;
-
-       ret = watchdog_park_threads();
-       if (ret)
-               return ret;
-
-       watchdog_unpark_threads();
-
-       return 0;
+       cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
+       softlockup_update_smpboot_threads();
 }
 
-static int watchdog_enable_all_cpus(void)
+static void lockup_detector_reconfigure(void)
 {
-       int err = 0;
-
-       if (!watchdog_running) {
-               err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
-                                                            &watchdog_cpumask);
-               if (err)
-                       pr_err("Failed to create watchdog threads, disabled\n");
-               else
-                       watchdog_running = 1;
-       } else {
-               /*
-                * Enable/disable the lockup detectors or
-                * change the sample period 'on the fly'.
-                */
-               err = update_watchdog_all_cpus();
-
-               if (err) {
-                       watchdog_disable_all_cpus();
-                       pr_err("Failed to update lockup detectors, disabled\n");
-               }
-       }
-
-       if (err)
-               watchdog_enabled = 0;
-
-       return err;
+       cpus_read_lock();
+       watchdog_nmi_stop();
+       softlockup_park_all_threads();
+       set_sample_period();
+       lockup_detector_update_enable();
+       if (watchdog_enabled && watchdog_thresh)
+               softlockup_unpark_threads();
+       watchdog_nmi_start();
+       cpus_read_unlock();
+       /*
+        * Must be called outside the cpus locked section to prevent
+        * recursive locking in the perf code.
+        */
+       __lockup_detector_cleanup();
 }
 
-static void watchdog_disable_all_cpus(void)
+/*
+ * Create the watchdog thread infrastructure and configure the detector(s).
+ *
+ * The threads are not unparked as watchdog_allowed_mask is empty.  When
+ * the threads are sucessfully initialized, take the proper locks and
+ * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
+ */
+static __init void lockup_detector_setup(void)
 {
-       if (watchdog_running) {
-               watchdog_running = 0;
-               smpboot_unregister_percpu_thread(&watchdog_threads);
-       }
-}
+       int ret;
 
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
-{
-       return smpboot_update_cpumask_percpu_thread(
-                   &watchdog_threads, &watchdog_cpumask);
-}
-#endif
+       /*
+        * If sysctl is off and watchdog got disabled on the command line,
+        * nothing to do here.
+        */
+       lockup_detector_update_enable();
 
-#else /* SOFTLOCKUP */
-static int watchdog_park_threads(void)
-{
-       return 0;
-}
+       if (!IS_ENABLED(CONFIG_SYSCTL) &&
+           !(watchdog_enabled && watchdog_thresh))
+               return;
 
-static void watchdog_unpark_threads(void)
-{
-}
+       ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
+                                                    &watchdog_allowed_mask);
+       if (ret) {
+               pr_err("Failed to initialize soft lockup detector threads\n");
+               return;
+       }
 
-static int watchdog_enable_all_cpus(void)
-{
-       return 0;
+       mutex_lock(&watchdog_mutex);
+       softlockup_threads_initialized = true;
+       lockup_detector_reconfigure();
+       mutex_unlock(&watchdog_mutex);
 }
 
-static void watchdog_disable_all_cpus(void)
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
+static inline int watchdog_park_threads(void) { return 0; }
+static inline void watchdog_unpark_threads(void) { }
+static inline int watchdog_enable_all_cpus(void) { return 0; }
+static inline void watchdog_disable_all_cpus(void) { }
+static void lockup_detector_reconfigure(void)
 {
+       cpus_read_lock();
+       watchdog_nmi_stop();
+       lockup_detector_update_enable();
+       watchdog_nmi_start();
+       cpus_read_unlock();
 }
-
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
+static inline void lockup_detector_setup(void)
 {
-       return 0;
+       lockup_detector_reconfigure();
 }
-#endif
+#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
 
-static void set_sample_period(void)
+static void __lockup_detector_cleanup(void)
 {
+       lockdep_assert_held(&watchdog_mutex);
+       hardlockup_detector_perf_cleanup();
 }
-#endif /* SOFTLOCKUP */
 
-/*
- * Suspend the hard and soft lockup detector by parking the watchdog threads.
+/**
+ * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
+ *
+ * Caller must not hold the cpu hotplug rwsem.
  */
-int lockup_detector_suspend(void)
+void lockup_detector_cleanup(void)
 {
-       int ret = 0;
-
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
-       /*
-        * Multiple suspend requests can be active in parallel (counted by
-        * the 'watchdog_suspended' variable). If the watchdog threads are
-        * running, the first caller takes care that they will be parked.
-        * The state of 'watchdog_running' cannot change while a suspend
-        * request is active (see related code in 'proc' handlers).
-        */
-       if (watchdog_running && !watchdog_suspended)
-               ret = watchdog_park_threads();
-
-       if (ret == 0)
-               watchdog_suspended++;
-       else {
-               watchdog_disable_all_cpus();
-               pr_err("Failed to suspend lockup detectors, disabled\n");
-               watchdog_enabled = 0;
-       }
-
-       watchdog_nmi_reconfigure();
-
-       mutex_unlock(&watchdog_proc_mutex);
-
-       return ret;
+       mutex_lock(&watchdog_mutex);
+       __lockup_detector_cleanup();
+       mutex_unlock(&watchdog_mutex);
 }
 
-/*
- * Resume the hard and soft lockup detector by unparking the watchdog threads.
+/**
+ * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
+ *
+ * Special interface for parisc. It prevents lockup detector warnings from
+ * the default pm_poweroff() function which busy loops forever.
  */
-void lockup_detector_resume(void)
+void lockup_detector_soft_poweroff(void)
 {
-       mutex_lock(&watchdog_proc_mutex);
-
-       watchdog_suspended--;
-       /*
-        * The watchdog threads are unparked if they were previously running
-        * and if there is no more active suspend request.
-        */
-       if (watchdog_running && !watchdog_suspended)
-               watchdog_unpark_threads();
-
-       watchdog_nmi_reconfigure();
-
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       watchdog_enabled = 0;
 }
 
 #ifdef CONFIG_SYSCTL
 
-/*
- * Update the run state of the lockup detectors.
- */
-static int proc_watchdog_update(void)
+/* Propagate any changes to the watchdog threads */
+static void proc_watchdog_update(void)
 {
-       int err = 0;
-
-       /*
-        * Watchdog threads won't be started if they are already active.
-        * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
-        * care of this. If those threads are already active, the sample
-        * period will be updated and the lockup detectors will be enabled
-        * or disabled 'on the fly'.
-        */
-       if (watchdog_enabled && watchdog_thresh)
-               err = watchdog_enable_all_cpus();
-       else
-               watchdog_disable_all_cpus();
-
-       watchdog_nmi_reconfigure();
-
-       return err;
-
+       /* Remove impossible cpus to keep sysctl output clean. */
+       cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
+       lockup_detector_reconfigure();
 }
 
 /*
  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
  *
- * caller             | table->data points to | 'which' contains the flag(s)
- * -------------------|-----------------------|-----------------------------
- * proc_watchdog      | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
- *                    |                       | with SOFT_WATCHDOG_ENABLED
- * -------------------|-----------------------|-----------------------------
- * proc_nmi_watchdog  | nmi_watchdog_enabled  | NMI_WATCHDOG_ENABLED
- * -------------------|-----------------------|-----------------------------
- * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
+ * caller             | table->data points to      | 'which'
+ * -------------------|----------------------------|--------------------------
+ * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
+ *                    |                            | SOFT_WATCHDOG_ENABLED
+ * -------------------|----------------------------|--------------------------
+ * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
+ * -------------------|----------------------------|--------------------------
+ * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
  */
 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int err, old, new;
-       int *watchdog_param = (int *)table->data;
+       int err, old, *param = table->data;
 
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
+       mutex_lock(&watchdog_mutex);
 
-       if (watchdog_suspended) {
-               /* no parameter changes allowed while watchdog is suspended */
-               err = -EAGAIN;
-               goto out;
-       }
-
-       /*
-        * If the parameter is being read return the state of the corresponding
-        * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
-        * run state of the lockup detectors.
-        */
        if (!write) {
-               *watchdog_param = (watchdog_enabled & which) != 0;
+               /*
+                * On read synchronize the userspace interface. This is a
+                * racy snapshot.
+                */
+               *param = (watchdog_enabled & which) != 0;
                err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        } else {
+               old = READ_ONCE(*param);
                err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-               if (err)
-                       goto out;
-
-               /*
-                * There is a race window between fetching the current value
-                * from 'watchdog_enabled' and storing the new value. During
-                * this race window, watchdog_nmi_enable() can sneak in and
-                * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
-                * The 'cmpxchg' detects this race and the loop retries.
-                */
-               do {
-                       old = watchdog_enabled;
-                       /*
-                        * If the parameter value is not zero set the
-                        * corresponding bit(s), else clear it(them).
-                        */
-                       if (*watchdog_param)
-                               new = old | which;
-                       else
-                               new = old & ~which;
-               } while (cmpxchg(&watchdog_enabled, old, new) != old);
-
-               /*
-                * Update the run state of the lockup detectors. There is _no_
-                * need to check the value returned by proc_watchdog_update()
-                * and to restore the previous value of 'watchdog_enabled' as
-                * both lockup detectors are disabled if proc_watchdog_update()
-                * returns an error.
-                */
-               if (old == new)
-                       goto out;
-
-               err = proc_watchdog_update();
+               if (!err && old != READ_ONCE(*param))
+                       proc_watchdog_update();
        }
-out:
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       mutex_unlock(&watchdog_mutex);
        return err;
 }
 
@@ -835,6 +713,8 @@ int proc_watchdog(struct ctl_table *table, int write,
 int proc_nmi_watchdog(struct ctl_table *table, int write,
                      void __user *buffer, size_t *lenp, loff_t *ppos)
 {
+       if (!nmi_watchdog_available && write)
+               return -ENOTSUPP;
        return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
                                    table, write, buffer, lenp, ppos);
 }
@@ -855,39 +735,17 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
 int proc_watchdog_thresh(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int err, old, new;
-
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
+       int err, old;
 
-       if (watchdog_suspended) {
-               /* no parameter changes allowed while watchdog is suspended */
-               err = -EAGAIN;
-               goto out;
-       }
+       mutex_lock(&watchdog_mutex);
 
-       old = ACCESS_ONCE(watchdog_thresh);
+       old = READ_ONCE(watchdog_thresh);
        err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
-       if (err || !write)
-               goto out;
-
-       /*
-        * Update the sample period. Restore on failure.
-        */
-       new = ACCESS_ONCE(watchdog_thresh);
-       if (old == new)
-               goto out;
+       if (!err && write && old != READ_ONCE(watchdog_thresh))
+               proc_watchdog_update();
 
-       set_sample_period();
-       err = proc_watchdog_update();
-       if (err) {
-               watchdog_thresh = old;
-               set_sample_period();
-       }
-out:
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       mutex_unlock(&watchdog_mutex);
        return err;
 }
 
@@ -902,45 +760,19 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
 {
        int err;
 
-       get_online_cpus();
-       mutex_lock(&watchdog_proc_mutex);
-
-       if (watchdog_suspended) {
-               /* no parameter changes allowed while watchdog is suspended */
-               err = -EAGAIN;
-               goto out;
-       }
+       mutex_lock(&watchdog_mutex);
 
        err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
-       if (!err && write) {
-               /* Remove impossible cpus to keep sysctl output cleaner. */
-               cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
-                           cpu_possible_mask);
-
-               if (watchdog_running) {
-                       /*
-                        * Failure would be due to being unable to allocate
-                        * a temporary cpumask, so we are likely not in a
-                        * position to do much else to make things better.
-                        */
-                       if (watchdog_update_cpus() != 0)
-                               pr_err("cpumask update failed\n");
-               }
+       if (!err && write)
+               proc_watchdog_update();
 
-               watchdog_nmi_reconfigure();
-       }
-out:
-       mutex_unlock(&watchdog_proc_mutex);
-       put_online_cpus();
+       mutex_unlock(&watchdog_mutex);
        return err;
 }
-
 #endif /* CONFIG_SYSCTL */
 
 void __init lockup_detector_init(void)
 {
-       set_sample_period();
-
 #ifdef CONFIG_NO_HZ_FULL
        if (tick_nohz_full_enabled()) {
                pr_info("Disabling watchdog on nohz_full cores by default\n");
@@ -951,6 +783,7 @@ void __init lockup_detector_init(void)
        cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
 #endif
 
-       if (watchdog_enabled)
-               watchdog_enable_all_cpus();
+       if (!watchdog_nmi_probe())
+               nmi_watchdog_available = true;
+       lockup_detector_setup();
 }
index 3a09ea1b1d3d5e6e284d058052403ac1396804ca..71a62ceacdc883aa77a711e4b1bfba8c3185dceb 100644 (file)
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static struct cpumask dead_events_mask;
 
 static unsigned long hardlockup_allcpu_dumped;
+static unsigned int watchdog_cpus;
 
 void arch_touch_nmi_watchdog(void)
 {
@@ -103,15 +105,12 @@ static struct perf_event_attr wd_hw_attr = {
 
 /* Callback function for perf event subsystem */
 static void watchdog_overflow_callback(struct perf_event *event,
-                struct perf_sample_data *data,
-                struct pt_regs *regs)
+                                      struct perf_sample_data *data,
+                                      struct pt_regs *regs)
 {
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
-       if (atomic_read(&watchdog_park_in_progress) != 0)
-               return;
-
        if (__this_cpu_read(watchdog_nmi_touch) == true) {
                __this_cpu_write(watchdog_nmi_touch, false);
                return;
@@ -160,104 +159,131 @@ static void watchdog_overflow_callback(struct perf_event *event,
        return;
 }
 
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long firstcpu_err;
-static atomic_t watchdog_cpus;
-
-int watchdog_nmi_enable(unsigned int cpu)
+static int hardlockup_detector_event_create(void)
 {
+       unsigned int cpu = smp_processor_id();
        struct perf_event_attr *wd_attr;
-       struct perf_event *event = per_cpu(watchdog_ev, cpu);
-       int firstcpu = 0;
-
-       /* nothing to do if the hard lockup detector is disabled */
-       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-               goto out;
-
-       /* is it already setup and enabled? */
-       if (event && event->state > PERF_EVENT_STATE_OFF)
-               goto out;
-
-       /* it is setup but not enabled */
-       if (event != NULL)
-               goto out_enable;
-
-       if (atomic_inc_return(&watchdog_cpus) == 1)
-               firstcpu = 1;
+       struct perf_event *evt;
 
        wd_attr = &wd_hw_attr;
        wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
 
        /* Try to register using hardware perf events */
-       event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+       evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
+                                              watchdog_overflow_callback, NULL);
+       if (IS_ERR(evt)) {
+               pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
+                       PTR_ERR(evt));
+               return PTR_ERR(evt);
+       }
+       this_cpu_write(watchdog_ev, evt);
+       return 0;
+}
 
-       /* save the first cpu's error for future comparision */
-       if (firstcpu && IS_ERR(event))
-               firstcpu_err = PTR_ERR(event);
+/**
+ * hardlockup_detector_perf_enable - Enable the local event
+ */
+void hardlockup_detector_perf_enable(void)
+{
+       if (hardlockup_detector_event_create())
+               return;
 
-       if (!IS_ERR(event)) {
-               /* only print for the first cpu initialized */
-               if (firstcpu || firstcpu_err)
-                       pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
-               goto out_save;
-       }
+       if (!watchdog_cpus++)
+               pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
 
-       /*
-        * Disable the hard lockup detector if _any_ CPU fails to set up
-        * set up the hardware perf event. The watchdog() function checks
-        * the NMI_WATCHDOG_ENABLED bit periodically.
-        *
-        * The barriers are for syncing up watchdog_enabled across all the
-        * cpus, as clear_bit() does not use barriers.
-        */
-       smp_mb__before_atomic();
-       clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
-       smp_mb__after_atomic();
-
-       /* skip displaying the same error again */
-       if (!firstcpu && (PTR_ERR(event) == firstcpu_err))
-               return PTR_ERR(event);
-
-       /* vary the KERN level based on the returned errno */
-       if (PTR_ERR(event) == -EOPNOTSUPP)
-               pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
-       else if (PTR_ERR(event) == -ENOENT)
-               pr_warn("disabled (cpu%i): hardware events not enabled\n",
-                        cpu);
-       else
-               pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
-                       cpu, PTR_ERR(event));
-
-       pr_info("Shutting down hard lockup detector on all cpus\n");
-
-       return PTR_ERR(event);
-
-       /* success path */
-out_save:
-       per_cpu(watchdog_ev, cpu) = event;
-out_enable:
-       perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
-       return 0;
+       perf_event_enable(this_cpu_read(watchdog_ev));
 }
 
-void watchdog_nmi_disable(unsigned int cpu)
+/**
+ * hardlockup_detector_perf_disable - Disable the local event
+ */
+void hardlockup_detector_perf_disable(void)
 {
-       struct perf_event *event = per_cpu(watchdog_ev, cpu);
+       struct perf_event *event = this_cpu_read(watchdog_ev);
 
        if (event) {
                perf_event_disable(event);
+               cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
+               watchdog_cpus--;
+       }
+}
+
+/**
+ * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
+ *
+ * Called from lockup_detector_cleanup(). Serialized by the caller.
+ */
+void hardlockup_detector_perf_cleanup(void)
+{
+       int cpu;
+
+       for_each_cpu(cpu, &dead_events_mask) {
+               struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+               /*
+                * Required because for_each_cpu() reports  unconditionally
+                * CPU0 as set on UP kernels. Sigh.
+                */
+               if (event)
+                       perf_event_release_kernel(event);
                per_cpu(watchdog_ev, cpu) = NULL;
+       }
+       cpumask_clear(&dead_events_mask);
+}
+
+/**
+ * hardlockup_detector_perf_stop - Globally stop watchdog events
+ *
+ * Special interface for x86 to handle the perf HT bug.
+ */
+void __init hardlockup_detector_perf_stop(void)
+{
+       int cpu;
+
+       lockdep_assert_cpus_held();
+
+       for_each_online_cpu(cpu) {
+               struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+               if (event)
+                       perf_event_disable(event);
+       }
+}
 
-               /* should be in cleanup, but blocks oprofile */
-               perf_event_release_kernel(event);
+/**
+ * hardlockup_detector_perf_restart - Globally restart watchdog events
+ *
+ * Special interface for x86 to handle the perf HT bug.
+ */
+void __init hardlockup_detector_perf_restart(void)
+{
+       int cpu;
+
+       lockdep_assert_cpus_held();
+
+       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+               return;
+
+       for_each_online_cpu(cpu) {
+               struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+               if (event)
+                       perf_event_enable(event);
+       }
+}
+
+/**
+ * hardlockup_detector_perf_init - Probe whether NMI event is available at all
+ */
+int __init hardlockup_detector_perf_init(void)
+{
+       int ret = hardlockup_detector_event_create();
 
-               /* watchdog_nmi_enable() expects this to be zero initially. */
-               if (atomic_dec_and_test(&watchdog_cpus))
-                       firstcpu_err = 0;
+       if (ret) {
+               pr_info("Perf NMI watchdog permanently disabled\n");
+       } else {
+               perf_event_release_kernel(this_cpu_read(watchdog_ev));
+               this_cpu_write(watchdog_ev, NULL);
        }
+       return ret;
 }
index 2585b100ebbbc332a33209c2b0bf3643eb0dd961..276b60262981c95a9fccd508e8d8123212d535de 100644 (file)
@@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
 
 static int __net_init broute_net_init(struct net *net)
 {
-       net->xt.broute_table = ebt_register_table(net, &broute_table, NULL);
-       return PTR_ERR_OR_ZERO(net->xt.broute_table);
+       return ebt_register_table(net, &broute_table, NULL,
+                                 &net->xt.broute_table);
 }
 
 static void __net_exit broute_net_exit(struct net *net)
index 45a00dbdbcad647f2342caa9fc9181f27b7f3a77..c41da5fac84f49a9cf5e58eaab88b3beb2d81fba 100644 (file)
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
 
 static int __net_init frame_filter_net_init(struct net *net)
 {
-       net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter);
-       return PTR_ERR_OR_ZERO(net->xt.frame_filter);
+       return ebt_register_table(net, &frame_filter, ebt_ops_filter,
+                                 &net->xt.frame_filter);
 }
 
 static void __net_exit frame_filter_net_exit(struct net *net)
index 57cd5bb154e7071096f3205cb5200e277e04c69d..08df7406ecb3835a664a695a239d73f62eeaf457 100644 (file)
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
 
 static int __net_init frame_nat_net_init(struct net *net)
 {
-       net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat);
-       return PTR_ERR_OR_ZERO(net->xt.frame_nat);
+       return ebt_register_table(net, &frame_nat, ebt_ops_nat,
+                                 &net->xt.frame_nat);
 }
 
 static void __net_exit frame_nat_net_exit(struct net *net)
index 83951f978445e5b9daede1eac715cec0d9f42987..3b3dcf719e0783e74ecf9018b3fd1728a31f6393 100644 (file)
@@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
        kfree(table);
 }
 
-struct ebt_table *
-ebt_register_table(struct net *net, const struct ebt_table *input_table,
-                  const struct nf_hook_ops *ops)
+int ebt_register_table(struct net *net, const struct ebt_table *input_table,
+                      const struct nf_hook_ops *ops, struct ebt_table **res)
 {
        struct ebt_table_info *newinfo;
        struct ebt_table *t, *table;
@@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
            repl->entries == NULL || repl->entries_size == 0 ||
            repl->counters != NULL || input_table->private != NULL) {
                BUGPRINT("Bad table data for ebt_register_table!!!\n");
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        }
 
        /* Don't add one table to multiple lists. */
@@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
        list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
        mutex_unlock(&ebt_mutex);
 
+       WRITE_ONCE(*res, table);
+
        if (!ops)
-               return table;
+               return 0;
 
        ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
        if (ret) {
                __ebt_unregister_table(net, table);
-               return ERR_PTR(ret);
+               *res = NULL;
        }
 
-       return table;
+       return ret;
 free_unlock:
        mutex_unlock(&ebt_mutex);
 free_chainstack:
@@ -1276,7 +1277,7 @@ free_newinfo:
 free_table:
        kfree(table);
 out:
-       return ERR_PTR(ret);
+       return ret;
 }
 
 void ebt_unregister_table(struct net *net, struct ebt_table *table,
index 416bb304a281a41970944e5f979eaf4c8aa1ad03..1859c473b21a862b383edebbcf2c1656f9c58b3b 100644 (file)
@@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                greh = (struct gre_base_hdr *)skb_transport_header(skb);
                pcsum = (__sum16 *)(greh + 1);
 
-               if (gso_partial) {
+               if (gso_partial && skb_is_gso(skb)) {
                        unsigned int partial_adj;
 
                        /* Adjust checksum to account for the fact that
index 811689e523c310dc41b71f6ec9fcff3d2749e590..f75fc6b531152a4d1f4fb96052ad47f3e76c9a21 100644 (file)
@@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
        if (synproxy == NULL)
                return NF_ACCEPT;
 
-       if (nf_is_loopback_packet(skb))
+       if (nf_is_loopback_packet(skb) ||
+           ip_hdr(skb)->protocol != IPPROTO_TCP)
                return NF_ACCEPT;
 
        thoff = ip_hdrlen(skb);
index ac6fde5d45f14dd258ffd6a8a5ba7a3f1ed0bf63..3d9f1c2f81c58afb45a1445f6ed06a97203606a1 100644 (file)
@@ -2513,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
        struct rtable *ort = (struct rtable *) dst_orig;
        struct rtable *rt;
 
-       rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
+       rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
        if (rt) {
                struct dst_entry *new = &rt->dst;
 
index 5676237d2b0f88e3bd400af3e1c375e51c333847..e45177ceb0ee514ed173f2a899d35e0bc1807f77 100644 (file)
@@ -2240,20 +2240,16 @@ int udp_v4_early_demux(struct sk_buff *skb)
        iph = ip_hdr(skb);
        uh = udp_hdr(skb);
 
-       if (skb->pkt_type == PACKET_BROADCAST ||
-           skb->pkt_type == PACKET_MULTICAST) {
+       if (skb->pkt_type == PACKET_MULTICAST) {
                in_dev = __in_dev_get_rcu(skb->dev);
 
                if (!in_dev)
                        return 0;
 
-               /* we are supposed to accept bcast packets */
-               if (skb->pkt_type == PACKET_MULTICAST) {
-                       ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
-                                              iph->protocol);
-                       if (!ours)
-                               return 0;
-               }
+               ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+                                      iph->protocol);
+               if (!ours)
+                       return 0;
 
                sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
                                                   uh->source, iph->saddr,
index 97658bfc1b58ab8a19026811e3aa917e598e554b..e360d55be5554d1bee56d3f493752ba9ae2c8015 100644 (file)
@@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
                 * will be using a length value equal to only one MSS sized
                 * segment instead of the entire frame.
                 */
-               if (gso_partial) {
+               if (gso_partial && skb_is_gso(skb)) {
                        uh->len = htons(skb_shinfo(skb)->gso_size +
                                        SKB_GSO_CB(skb)->data_offset +
                                        skb->head - (unsigned char *)uh);
index 96861c702c069d67112cace3196e657fbfaabf87..4a96ebbf8eda5f59a6ff88e836d666a404d2bf0d 100644 (file)
@@ -3820,8 +3820,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
                goto out;
 
        if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
-           dev_net(dev)->ipv6.devconf_all->accept_dad < 1 ||
-           idev->cnf.accept_dad < 1 ||
+           (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
+            idev->cnf.accept_dad < 1) ||
            !(ifp->flags&IFA_F_TENTATIVE) ||
            ifp->flags & IFA_F_NODAD) {
                bump_id = ifp->flags & IFA_F_TENTATIVE;
index cdb3728faca7746d91e2430f6024f060a82b24fd..4a87f9428ca519b475f8feaceaaa3a225bcfe6d2 100644 (file)
@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
        for (skb = segs; skb; skb = skb->next) {
                ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
-               if (gso_partial)
+               if (gso_partial && skb_is_gso(skb))
                        payload_len = skb_shinfo(skb)->gso_size +
                                      SKB_GSO_CB(skb)->data_offset +
                                      skb->head - (unsigned char *)(ipv6h + 1);
index a5cd43d75393db2152fa5a4edb0b505d20fc2f45..437af8c95277f7a3364f2d0492455a172cc22ab8 100644 (file)
@@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
        nexthdr = ipv6_hdr(skb)->nexthdr;
        thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
                                 &frag_off);
-       if (thoff < 0)
+       if (thoff < 0 || nexthdr != IPPROTO_TCP)
                return NF_ACCEPT;
 
        th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
index 26cc9f483b6d282f0a665bfc4c2c206da7981921..a96d5b385d8fa25bab416d3f6bc836e743b3ca99 100644 (file)
@@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
        struct dst_entry *new = NULL;
 
        rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
-                      DST_OBSOLETE_NONE, 0);
+                      DST_OBSOLETE_DEAD, 0);
        if (rt) {
                rt6_info_init(rt);
 
index e495b5e484b11c03c26674d999e3dc31422efab9..cf84f7b37cd9dfb828892e23b0aa2603827b6427 100644 (file)
@@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
              from->family == to->family))
                return -IPSET_ERR_TYPE_MISMATCH;
 
-       if (from->ref_netlink || to->ref_netlink)
+       write_lock_bh(&ip_set_ref_lock);
+
+       if (from->ref_netlink || to->ref_netlink) {
+               write_unlock_bh(&ip_set_ref_lock);
                return -EBUSY;
+       }
 
        strncpy(from_name, from->name, IPSET_MAXNAMELEN);
        strncpy(from->name, to->name, IPSET_MAXNAMELEN);
        strncpy(to->name, from_name, IPSET_MAXNAMELEN);
 
-       write_lock_bh(&ip_set_ref_lock);
        swap(from->ref, to->ref);
        ip_set(inst, from_id) = to;
        ip_set(inst, to_id) = from;
@@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = {
 static int __init
 ip_set_init(void)
 {
-       int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+       int ret = register_pernet_subsys(&ip_set_net_ops);
+
+       if (ret) {
+               pr_err("ip_set: cannot register pernet_subsys.\n");
+               return ret;
+       }
 
+       ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
        if (ret != 0) {
                pr_err("ip_set: cannot register with nfnetlink.\n");
+               unregister_pernet_subsys(&ip_set_net_ops);
                return ret;
        }
+
        ret = nf_register_sockopt(&so_set);
        if (ret != 0) {
                pr_err("SO_SET registry failed: %d\n", ret);
                nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+               unregister_pernet_subsys(&ip_set_net_ops);
                return ret;
        }
-       ret = register_pernet_subsys(&ip_set_net_ops);
-       if (ret) {
-               pr_err("ip_set: cannot register pernet_subsys.\n");
-               nf_unregister_sockopt(&so_set);
-               nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-               return ret;
-       }
+
        pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
        return 0;
 }
@@ -2098,9 +2104,10 @@ ip_set_init(void)
 static void __exit
 ip_set_fini(void)
 {
-       unregister_pernet_subsys(&ip_set_net_ops);
        nf_unregister_sockopt(&so_set);
        nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+
+       unregister_pernet_subsys(&ip_set_net_ops);
        pr_debug("these are the famous last words\n");
 }
 
index 20bfbd315f61822e53e90273356686eb9f1d3648..613eb212cb48896f1c5a45d4f4dfdd2d6a245c29 100644 (file)
@@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ret;
 
        ip &= ip_set_hostmask(h->netmask);
+       e.ip = htonl(ip);
+       if (e.ip == 0)
+               return -IPSET_ERR_HASH_ELEM;
 
-       if (adt == IPSET_TEST) {
-               e.ip = htonl(ip);
-               if (e.ip == 0)
-                       return -IPSET_ERR_HASH_ELEM;
+       if (adt == IPSET_TEST)
                return adtfn(set, &e, &ext, &ext, flags);
-       }
 
        ip_to = ip;
        if (tb[IPSET_ATTR_IP_TO]) {
@@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
-       if (retried)
+       if (retried) {
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip += hosts) {
                e.ip = htonl(ip);
-               if (e.ip == 0)
-                       return -IPSET_ERR_HASH_ELEM;
+       }
+       for (; ip <= ip_to;) {
                ret = adtfn(set, &e, &ext, &ext, flags);
-
                if (ret && !ip_set_eexist(ret, flags))
                        return ret;
 
+               ip += hosts;
+               e.ip = htonl(ip);
+               if (e.ip == 0)
+                       return 0;
+
                ret = 0;
        }
        return ret;
index b64cf14e8352f488588af54fc5c650b27f31a09d..f3ba8348cf9df331ea0f36ba1b1bac99123b0895 100644 (file)
@@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                e.ip = htonl(ip);
                ret = adtfn(set, &e, &ext, &ext, flags);
 
index f438740e6c6a4e4ee94d971de8cba7556ea65bec..ddb8039ec1d2736ae21f3160da52e95ff1e022a4 100644 (file)
@@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
                for (; p <= port_to; p++) {
index 6215fb898c509ebcd35d555ee2fc7a2371733f0d..a7f4d7a85420991e196f0a12de449ef1c20fa57b 100644 (file)
@@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
                for (; p <= port_to; p++) {
index 5ab1b99a53c2b4338837a1fc17e067b9fee6a9d7..a2f19b9906e90ebe4b990f9583ab9f95c7a6b68e 100644 (file)
@@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; !before(ip_to, ip); ip++) {
+       for (; ip <= ip_to; ip++) {
                e.ip = htonl(ip);
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
@@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                              ip == ntohl(h->next.ip) &&
                              p == ntohs(h->next.port)
                                ? ntohl(h->next.ip2) : ip2_from;
-                       while (!after(ip2, ip2_to)) {
+                       while (ip2 <= ip2_to) {
                                e.ip2 = htonl(ip2);
                                ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
                                                                &cidr);
index 5d9e895452e744a38c7324bcf37924c977b62727..1c67a1761e458e584b2277b8ae1462cb5fc666bb 100644 (file)
@@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
        if (retried)
                ip = ntohl(h->next.ip);
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
                ret = adtfn(set, &e, &ext, &ext, flags);
index 44cf11939c916473b024d0b3fddaa6dbb1777ed8..d417074f1c1a298c33a324748e3aa274ed66616b 100644 (file)
@@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
                ret = adtfn(set, &e, &ext, &ext, flags);
index db614e13b193ddb1733bcb098d53d2f12520066f..7f9ae2e9645be84faa6d24f2b7ee8b973ad11b6b 100644 (file)
@@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (retried)
                ip = ntohl(h->next.ip[0]);
 
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip[0] = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
                ip2 = (retried &&
                       ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
                                                   : ip2_from;
-               while (!after(ip2, ip2_to)) {
+               while (ip2 <= ip2_to) {
                        e.ip[1] = htonl(ip2);
                        last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
                        ret = adtfn(set, &e, &ext, &ext, flags);
index 54b64b6cd0cdb2196e1f507909784096af45e11f..e6ef382febe46e8a4d8af045abcb47ae546c8710 100644 (file)
@@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (retried)
                ip = ntohl(h->next.ip);
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &cidr);
                e.cidr = cidr - 1;
index aff846960ac4423da8ec5a99f4faccf294a812e0..8602f2595a1a1606f6380d6f00af40fa33759630 100644 (file)
@@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (retried)
                ip = ntohl(h->next.ip[0]);
 
-       while (!after(ip, ip_to)) {
+       while (ip <= ip_to) {
                e.ip[0] = htonl(ip);
                ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
                p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
@@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                        ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
                               p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
                                                         : ip2_from;
-                       while (!after(ip2, ip2_to)) {
+                       while (ip2 <= ip2_to) {
                                e.ip[1] = htonl(ip2);
                                ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
                                                                &e.cidr[1]);
index 90d396814798e15d327aafe89c4bbb5a611da480..4527921b1c3ac97b95a48c62f699b71d8b853be0 100644 (file)
@@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
 {
        struct sk_buff *new_skb = NULL;
        struct iphdr *old_iph = NULL;
+       __u8 old_dsfield;
 #ifdef CONFIG_IP_VS_IPV6
        struct ipv6hdr *old_ipv6h = NULL;
 #endif
@@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
                        *payload_len =
                                ntohs(old_ipv6h->payload_len) +
                                sizeof(*old_ipv6h);
-               *dsfield = ipv6_get_dsfield(old_ipv6h);
+               old_dsfield = ipv6_get_dsfield(old_ipv6h);
                *ttl = old_ipv6h->hop_limit;
                if (df)
                        *df = 0;
@@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
 
                /* fix old IP header checksum */
                ip_send_check(old_iph);
-               *dsfield = ipv4_get_dsfield(old_iph);
+               old_dsfield = ipv4_get_dsfield(old_iph);
                *ttl = old_iph->ttl;
                if (payload_len)
                        *payload_len = ntohs(old_iph->tot_len);
        }
 
+       /* Implement full-functionality option for ECN encapsulation */
+       *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
+
        return skb;
 error:
        kfree_skb(skb);
index 929927171426a6e286bd5cc4412aaa99b77fec77..64e1ee09122582bce81a4ee996064763083bcfa0 100644 (file)
@@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
                        goto nla_put_failure;
 
-               if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+               if (basechain->stats && nft_dump_stats(skb, basechain->stats))
                        goto nla_put_failure;
        }
 
@@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
 
                chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME],
                                                genmask);
-               if (IS_ERR(chain2))
-                       return PTR_ERR(chain2);
+               if (!IS_ERR(chain2))
+                       return -EEXIST;
        }
 
        if (nla[NFTA_CHAIN_COUNTERS]) {
@@ -2741,8 +2741,10 @@ cont:
        list_for_each_entry(i, &ctx->table->sets, list) {
                if (!nft_is_active_next(ctx->net, i))
                        continue;
-               if (!strcmp(set->name, i->name))
+               if (!strcmp(set->name, i->name)) {
+                       kfree(set->name);
                        return -ENFILE;
+               }
        }
        return 0;
 }
index c83a3b5e1c6c2a91b713b6681a794bd79ab3fa08..d8571f4142080a3c121fc90f0b52d81ee9df6712 100644 (file)
@@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
                        return ERR_PTR(-EFAULT);
 
-               strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+               memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
                info->num_counters = compat_tmp.num_counters;
                user += sizeof(compat_tmp);
        } else
@@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                if (copy_from_user(info, user, sizeof(*info)) != 0)
                        return ERR_PTR(-EFAULT);
 
-               info->name[sizeof(info->name) - 1] = '\0';
                user += sizeof(*info);
        }
+       info->name[sizeof(info->name) - 1] = '\0';
 
        size = sizeof(struct xt_counters);
        size *= info->num_counters;
index 38986a95216cd2c3f7a0f83deedcb42153f5a937..29123934887bbfe5081178f9ce2425c5bb618a9c 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/syscalls.h>
 #include <linux/skbuff.h>
 #include <linux/filter.h>
 #include <linux/bpf.h>
@@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
        return 0;
 }
 
+static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
+{
+       mm_segment_t oldfs = get_fs();
+       int retval, fd;
+
+       set_fs(KERNEL_DS);
+       fd = bpf_obj_get_user(path);
+       set_fs(oldfs);
+       if (fd < 0)
+               return fd;
+
+       retval = __bpf_mt_check_fd(fd, ret);
+       sys_close(fd);
+       return retval;
+}
+
 static int bpf_mt_check(const struct xt_mtchk_param *par)
 {
        struct xt_bpf_info *info = par->matchinfo;
@@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
                return __bpf_mt_check_bytecode(info->bpf_program,
                                               info->bpf_program_num_elem,
                                               &info->filter);
-       else if (info->mode == XT_BPF_MODE_FD_PINNED ||
-                info->mode == XT_BPF_MODE_FD_ELF)
+       else if (info->mode == XT_BPF_MODE_FD_ELF)
                return __bpf_mt_check_fd(info->fd, &info->filter);
+       else if (info->mode == XT_BPF_MODE_PATH_PINNED)
+               return __bpf_mt_check_path(info->path, &info->filter);
        else
                return -EINVAL;
 }
index e75ef39669c5a9a5b72c9a1cec8b72020600eae1..575d2153e3b819f32e9a262abddca95a108eee02 100644 (file)
@@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                        transparent = nf_sk_is_transparent(sk);
 
                if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-                   transparent)
+                   transparent && sk_fullsock(sk))
                        pskb->mark = sk->sk_mark;
 
                if (sk != skb->sk)
@@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
                        transparent = nf_sk_is_transparent(sk);
 
                if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-                   transparent)
+                   transparent && sk_fullsock(sk))
                        pskb->mark = sk->sk_mark;
 
                if (sk != skb->sk)
index 94c11cf0459d33bb12d26e246e7e9c66314e9e85..f34750691c5c0e24aef976f59041d44e8342d2a0 100644 (file)
@@ -2266,16 +2266,17 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        cb->min_dump_alloc = control->min_dump_alloc;
        cb->skb = skb;
 
+       if (cb->start) {
+               ret = cb->start(cb);
+               if (ret)
+                       goto error_unlock;
+       }
+
        nlk->cb_running = true;
 
        mutex_unlock(nlk->cb_mutex);
 
-       ret = 0;
-       if (cb->start)
-               ret = cb->start(cb);
-
-       if (!ret)
-               ret = netlink_dump(sk);
+       ret = netlink_dump(sk);
 
        sock_put(sk);
 
index 9b5de31aa42939cbc3f6aa4a0a94d7a28f711e84..c1841f234a717fdcf46413e25cb1e265912a2dfb 100644 (file)
@@ -2203,7 +2203,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
        struct sock_xprt *transport =
                container_of(work, struct sock_xprt, connect_worker.work);
        struct rpc_xprt *xprt = &transport->xprt;
-       struct socket *sock = transport->sock;
+       struct socket *sock;
        int status = -EIO;
 
        sock = xs_create_sock(xprt, transport,
index 7d99029df342f15b28c1f8e49d43bc90c709d1b0..a140dd4a84afc44a2574b2fac25969ceba28aaf9 100644 (file)
@@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
        struct sk_buff_head xmitq;
        int rc = 0;
 
-       __skb_queue_head_init(&xmitq);
+       skb_queue_head_init(&xmitq);
        tipc_bcast_lock(net);
        if (tipc_link_bc_peers(l))
                rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
        u32 dst, selector;
 
        selector = msg_link_selector(buf_msg(skb_peek(pkts)));
-       __skb_queue_head_init(&_pkts);
+       skb_queue_head_init(&_pkts);
 
        list_for_each_entry_safe(n, tmp, &dests->list, list) {
                dst = n->value;
index 121e59a1d0e729088170586e6714a0245680e4c7..17146c16ee2df5bdefcb949ecb40dd81b00f9940 100644 (file)
@@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
        msg_set_destnode(msg, dnode);
        msg_set_destport(msg, dport);
        *err = TIPC_OK;
+
+       if (!skb_cloned(skb))
+               return true;
+
+       /* Unclone buffer in case it was bundled */
+       if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
+               return false;
+
        return true;
 }
 
index 690874293cfc6103a08eecf096a94184f36868f9..d396cb61a280d24b6c4bd4733885ff9693f1c673 100644 (file)
@@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
        [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
 };
 
+/* policy for packet pattern attributes */
+static const struct nla_policy
+nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
+       [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
+       [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
+       [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
+};
+
 static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                                     struct netlink_callback *cb,
                                     struct cfg80211_registered_device **rdev,
@@ -10532,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                        u8 *mask_pat;
 
                        nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                        NULL, info->extack);
+                                        nl80211_packet_pattern_policy,
+                                        info->extack);
                        err = -EINVAL;
                        if (!pat_tb[NL80211_PKTPAT_MASK] ||
                            !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -10781,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                            rem) {
                u8 *mask_pat;
 
-               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL);
+               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                nl80211_packet_pattern_policy, NULL);
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
                    !pat_tb[NL80211_PKTPAT_PATTERN])
                        return -EINVAL;
index acf00104ef312b563be9f3aa9698fa2a9dbdfea7..30e5746085b8fcfc5aa8abc7a8a23753c510a630 100644 (file)
@@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        }
 
        if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
+               xso->dev = NULL;
                dev_put(dev);
                return 0;
        }
index 2515cd2bc5db1a56915856aa5252f2e01592c9e7..8ac9d32fb79d8bbb9e6d6765c9d7eb8b13ebedb5 100644 (file)
@@ -429,7 +429,8 @@ resume:
        nf_reset(skb);
 
        if (decaps) {
-               skb->sp->olen = 0;
+               if (skb->sp)
+                       skb->sp->olen = 0;
                skb_dst_drop(skb);
                gro_cells_receive(&gro_cells, skb);
                return 0;
@@ -440,7 +441,8 @@ resume:
 
                err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
                if (xfrm_gro) {
-                       skb->sp->olen = 0;
+                       if (skb->sp)
+                               skb->sp->olen = 0;
                        skb_dst_drop(skb);
                        gro_cells_receive(&gro_cells, skb);
                        return err;
index 0dab1cd79ce4d1afe84ba9422a740689a9ebdf71..12213477cd3ad90af9dc2e1bed236e461621115b 100644 (file)
@@ -732,12 +732,12 @@ restart:
                        }
                }
        }
+out:
+       spin_unlock_bh(&net->xfrm.xfrm_state_lock);
        if (cnt) {
                err = 0;
                xfrm_policy_cache_flush();
        }
-out:
-       spin_unlock_bh(&net->xfrm.xfrm_state_lock);
        return err;
 }
 EXPORT_SYMBOL(xfrm_state_flush);
index 2bfbd9121e3b21b0eb793d2d3a685bd4cebde22b..b997f1395357e8696657d3c07b04318b908fff89 100644 (file)
@@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
+               xfrm_dev_state_delete(x);
                __xfrm_state_put(x);
                goto out;
        }
index fec1dfdb14adfa7c7edc9f7aa2aba35c9e0cb2a5..4490a699030b10725015e323a18a09286ea02db0 100644 (file)
@@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = {
 static int snd_compress_dev_register(struct snd_device *device)
 {
        int ret = -EINVAL;
-       char str[16];
        struct snd_compr *compr;
 
        if (snd_BUG_ON(!device || !device->device_data))
                return -EBADFD;
        compr = device->device_data;
 
-       pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
+       pr_debug("reg device %s, direction %d\n", compr->name,
                        compr->direction);
        /* register compressed device */
        ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
index 3a1cc7b97e468bc19fad2fadfd9fb68053e6bc6d..b719d0bd833ecb6d7560380db5eb3b78a9b040c4 100644 (file)
@@ -547,6 +547,7 @@ struct snd_pcm_mmap_status_x32 {
        u32 pad2; /* alignment */
        struct timespec tstamp;
        s32 suspended_state;
+       s32 pad3;
        struct timespec audio_tstamp;
 } __packed;
 
index ea2d0ae85bd367d5ea70068ee74d925a349789c3..6c9cba2166d95b3b9175cc9eb39ca14e3a14f0f5 100644 (file)
@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
        struct snd_seq_port_info *info = arg;
        struct snd_seq_client_port *port;
        struct snd_seq_port_callback *callback;
+       int port_idx;
 
        /* it is not allowed to create the port for an another client */
        if (info->addr.client != client->number)
@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
                return -ENOMEM;
 
        if (client->type == USER_CLIENT && info->kernel) {
-               snd_seq_delete_port(client, port->addr.port);
+               port_idx = port->addr.port;
+               snd_seq_port_unlock(port);
+               snd_seq_delete_port(client, port_idx);
                return -EINVAL;
        }
        if (client->type == KERNEL_CLIENT) {
@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
 
        snd_seq_set_port_info(port, info);
        snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
+       snd_seq_port_unlock(port);
 
        return 0;
 }
index 0a7020c82bfc76ac295d084fa2d98abe5dc5f647..d21ece9f8d7365e5e621156f1e44b85aa11eb1a1 100644 (file)
@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
 }
 
 
-/* create a port, port number is returned (-1 on failure) */
+/* create a port, port number is returned (-1 on failure);
+ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
+ */
 struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
                                                int port)
 {
@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
        snd_use_lock_init(&new_port->use_lock);
        port_subs_info_init(&new_port->c_src);
        port_subs_info_init(&new_port->c_dest);
+       snd_use_lock_use(&new_port->use_lock);
 
        num = port >= 0 ? port : 0;
        mutex_lock(&client->ports_mutex);
@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
        list_add_tail(&new_port->list, &p->list);
        client->num_ports++;
        new_port->addr.port = num;      /* store the port number in the port */
+       sprintf(new_port->name, "port-%d", num);
        write_unlock_irqrestore(&client->ports_lock, flags);
        mutex_unlock(&client->ports_mutex);
-       sprintf(new_port->name, "port-%d", num);
 
        return new_port;
 }
index 8d93a4021c78ab58cab236c3b9d170bde8a81cb1..f48a4cd24ffce2d50639e177822bb96b31a93d02 100644 (file)
@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
  * decode input event and put to read buffer of each opened file
  */
 static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
-                                        struct snd_seq_event *ev)
+                                        struct snd_seq_event *ev,
+                                        bool atomic)
 {
        struct snd_virmidi *vmidi;
        unsigned char msg[4];
        int len;
 
-       read_lock(&rdev->filelist_lock);
+       if (atomic)
+               read_lock(&rdev->filelist_lock);
+       else
+               down_read(&rdev->filelist_sem);
        list_for_each_entry(vmidi, &rdev->filelist, list) {
                if (!vmidi->trigger)
                        continue;
@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
                                snd_rawmidi_receive(vmidi->substream, msg, len);
                }
        }
-       read_unlock(&rdev->filelist_lock);
+       if (atomic)
+               read_unlock(&rdev->filelist_lock);
+       else
+               up_read(&rdev->filelist_sem);
 
        return 0;
 }
@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
        struct snd_virmidi_dev *rdev;
 
        rdev = rmidi->private_data;
-       return snd_virmidi_dev_receive_event(rdev, ev);
+       return snd_virmidi_dev_receive_event(rdev, ev, true);
 }
 #endif  /*  0  */
 
@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
        rdev = private_data;
        if (!(rdev->flags & SNDRV_VIRMIDI_USE))
                return 0; /* ignored */
-       return snd_virmidi_dev_receive_event(rdev, ev);
+       return snd_virmidi_dev_receive_event(rdev, ev, atomic);
 }
 
 /*
@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
        struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
        struct snd_virmidi *vmidi;
-       unsigned long flags;
 
        vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
        if (vmidi == NULL)
@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
        vmidi->client = rdev->client;
        vmidi->port = rdev->port;       
        runtime->private_data = vmidi;
-       write_lock_irqsave(&rdev->filelist_lock, flags);
+       down_write(&rdev->filelist_sem);
+       write_lock_irq(&rdev->filelist_lock);
        list_add_tail(&vmidi->list, &rdev->filelist);
-       write_unlock_irqrestore(&rdev->filelist_lock, flags);
+       write_unlock_irq(&rdev->filelist_lock);
+       up_write(&rdev->filelist_sem);
        vmidi->rdev = rdev;
        return 0;
 }
@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
        struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
        struct snd_virmidi *vmidi = substream->runtime->private_data;
 
+       down_write(&rdev->filelist_sem);
        write_lock_irq(&rdev->filelist_lock);
        list_del(&vmidi->list);
        write_unlock_irq(&rdev->filelist_lock);
+       up_write(&rdev->filelist_sem);
        snd_midi_event_free(vmidi->parser);
        substream->runtime->private_data = NULL;
        kfree(vmidi);
@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
        rdev->rmidi = rmidi;
        rdev->device = device;
        rdev->client = -1;
+       init_rwsem(&rdev->filelist_sem);
        rwlock_init(&rdev->filelist_lock);
        INIT_LIST_HEAD(&rdev->filelist);
        rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
index 7e3aa50b21f9d2d2f5ca49f3f9a779ab1276ee4a..5badd08e1d69cc12657410359255ab691eff62f0 100644 (file)
@@ -103,6 +103,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        void __user *puhr;
        union hpi_message_buffer_v1 *hm;
        union hpi_response_buffer_v1 *hr;
+       u16 msg_size;
        u16 res_max_size;
        u32 uncopied_bytes;
        int err = 0;
@@ -127,22 +128,25 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
 
        /* Now read the message size and data from user space.  */
-       if (get_user(hm->h.size, (u16 __user *)puhm)) {
+       if (get_user(msg_size, (u16 __user *)puhm)) {
                err = -EFAULT;
                goto out;
        }
-       if (hm->h.size > sizeof(*hm))
-               hm->h.size = sizeof(*hm);
+       if (msg_size > sizeof(*hm))
+               msg_size = sizeof(*hm);
 
        /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */
 
-       uncopied_bytes = copy_from_user(hm, puhm, hm->h.size);
+       uncopied_bytes = copy_from_user(hm, puhm, msg_size);
        if (uncopied_bytes) {
                HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
                err = -EFAULT;
                goto out;
        }
 
+       /* Override h.size in case it is changed between two userspace fetches */
+       hm->h.size = msg_size;
+
        if (get_user(res_max_size, (u16 __user *)puhr)) {
                err = -EFAULT;
                goto out;
index 7326695bca3371d192904d6d351dd4a87144e720..d68f99e076a879def2241365e0dd78b3aeaa5611 100644 (file)
@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
 
        chip = snd_kcontrol_chip(kcontrol);
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+       uinfo->count = 1;
        uinfo->value.integer.min = ECHOGAIN_MINOUT;
        uinfo->value.integer.max = ECHOGAIN_MAXOUT;
        uinfo->dimen.d[0] = num_busses_out(chip);
        uinfo->dimen.d[1] = num_busses_in(chip);
-       uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
        return 0;
 }
 
@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
 
        chip = snd_kcontrol_chip(kcontrol);
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+       uinfo->count = 1;
        uinfo->value.integer.min = ECHOGAIN_MINOUT;
        uinfo->value.integer.max = ECHOGAIN_MAXOUT;
        uinfo->dimen.d[0] = num_busses_out(chip);
        uinfo->dimen.d[1] = num_pipes_out(chip);
-       uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
        return 0;
 }
 
@@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
                                  struct snd_ctl_elem_info *uinfo)
 {
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+       uinfo->count = 96;
        uinfo->value.integer.min = ECHOGAIN_MINOUT;
        uinfo->value.integer.max = 0;
 #ifdef ECHOCARD_HAS_VMIXER
@@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
 #endif
        uinfo->dimen.d[1] = 16; /* 16 channels */
        uinfo->dimen.d[2] = 2;  /* 0=level, 1=peak */
-       uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
        return 0;
 }
 
index 2b64fabd5faa5f1e85681592910fd61ad2aa89f9..c19c81d230bd7423b4153d2266a45e09333f8714 100644 (file)
@@ -906,6 +906,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
                              hda_nid_t pin_nid, u32 stream_tag, int format)
 {
        struct hdmi_spec *spec = codec->spec;
+       unsigned int param;
        int err;
 
        err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
@@ -915,6 +916,26 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
                return err;
        }
 
+       if (is_haswell_plus(codec)) {
+
+               /*
+                * on recent platforms IEC Coding Type is required for HBR
+                * support, read current Digital Converter settings and set
+                * ICT bitfield if needed.
+                */
+               param = snd_hda_codec_read(codec, cvt_nid, 0,
+                                          AC_VERB_GET_DIGI_CONVERT_1, 0);
+
+               param = (param >> 16) & ~(AC_DIG3_ICT);
+
+               /* on recent platforms ICT mode is required for HBR support */
+               if (is_hbr_format(format))
+                       param |= 0x1;
+
+               snd_hda_codec_write(codec, cvt_nid, 0,
+                                   AC_VERB_SET_DIGI_CONVERT_3, param);
+       }
+
        snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format);
        return 0;
 }
index 0fb6b1b7926170030661c6724e1be6260da6cfa6..d8409d9ae55b63fc86e7971f8b94b593bd681de6 100644 (file)
@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
 
        err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
        if (err)
-               return err;
+               goto err_kill_urb;
 
-       if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
-               return -ENODEV;
+       if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
+               err = -ENODEV;
+               goto err_kill_urb;
+       }
 
        usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
                   cdev->vendor_name, CAIAQ_USB_STR_LEN);
@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
 
        setup_card(cdev);
        return 0;
+
+ err_kill_urb:
+       usb_kill_urb(&cdev->ep1_in_urb);
+       return err;
 }
 
 static int snd_probe(struct usb_interface *intf,
index 3dc36d91355020e14ba0db0a1372a689ed66d84c..23d1d23aefec375c2c857f090f4920894e4bc7f8 100644 (file)
@@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        struct usb_interface_descriptor *altsd;
        void *control_header;
        int i, protocol;
+       int rest_bytes;
 
        /* find audiocontrol interface */
        host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
@@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
                return -EINVAL;
        }
 
+       rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
+               control_header;
+
+       /* just to be sure -- this shouldn't hit at all */
+       if (rest_bytes <= 0) {
+               dev_err(&dev->dev, "invalid control header\n");
+               return -EINVAL;
+       }
+
        switch (protocol) {
        default:
                dev_warn(&dev->dev,
@@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        case UAC_VERSION_1: {
                struct uac1_ac_header_descriptor *h1 = control_header;
 
+               if (rest_bytes < sizeof(*h1)) {
+                       dev_err(&dev->dev, "too short v1 buffer descriptor\n");
+                       return -EINVAL;
+               }
+
                if (!h1->bInCollection) {
                        dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
                        return -EINVAL;
                }
 
+               if (rest_bytes < h1->bLength) {
+                       dev_err(&dev->dev, "invalid buffer length (v1)\n");
+                       return -EINVAL;
+               }
+
                if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
                        dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
                        return -EINVAL;
index 0ff5a7d2e19fe1cc584a6420461841be26781b9b..c8f723c3a0336905d02afc8d33a51031985430e6 100644 (file)
@@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface,
        return 0;
 
  error:
-       if (line6->disconnect)
-               line6->disconnect(line6);
-       snd_card_free(card);
+       /* we can call disconnect callback here because no close-sync is
+        * needed yet at this point
+        */
+       line6_disconnect(interface);
        return ret;
 }
 EXPORT_SYMBOL_GPL(line6_probe);
index 956f847a96e45fdfbd171d81c525fd869acad1f8..451007c2774344be753e38fd6863b03eaa284955 100644 (file)
@@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6)
 
                intf = usb_ifnum_to_if(line6->usbdev,
                                        pod->line6.properties->ctrl_if);
-               usb_driver_release_interface(&podhd_driver, intf);
+               if (intf)
+                       usb_driver_release_interface(&podhd_driver, intf);
        }
 }
 
@@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6,
 
        line6->disconnect = podhd_disconnect;
 
+       init_timer(&pod->startup_timer);
+       INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
+
        if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
                /* claim the data interface */
                intf = usb_ifnum_to_if(line6->usbdev,
@@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6,
        }
 
        /* init device and delay registering */
-       init_timer(&pod->startup_timer);
-       INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
        podhd_startup(pod);
        return 0;
 }
index 9732edf77f860dbb668ea14206097914e2fcbdf5..91bc8f18791e46bc80adfb8503e310e2fdc7a298 100644 (file)
@@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
 
 static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
 {
+       /* kill pending URBs */
+       snd_usb_mixer_disconnect(mixer);
+
        kfree(mixer->id_elems);
        if (mixer->urb) {
                kfree(mixer->urb->transfer_buffer);
@@ -2584,8 +2587,13 @@ _error:
 
 void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
 {
-       usb_kill_urb(mixer->urb);
-       usb_kill_urb(mixer->rc_urb);
+       if (mixer->disconnected)
+               return;
+       if (mixer->urb)
+               usb_kill_urb(mixer->urb);
+       if (mixer->rc_urb)
+               usb_kill_urb(mixer->rc_urb);
+       mixer->disconnected = true;
 }
 
 #ifdef CONFIG_PM
index 2b4b067646ab099653fe7ea79d9af1570e2971f6..545d99b09706b37cee252e08b75e43ced50e93d3 100644 (file)
@@ -22,6 +22,8 @@ struct usb_mixer_interface {
        struct urb *rc_urb;
        struct usb_ctrlrequest *rc_setup_packet;
        u8 rc_buffer[6];
+
+       bool disconnected;
 };
 
 #define MAX_CHANNELS   16      /* max logical channels */
index 9135520782854233bb246f976d1044f7b8762aa4..9ddaae3784f5312a135733f5e7f1019609080073 100644 (file)
@@ -1137,6 +1137,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
        case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+       case USB_ID(0x047F, 0xC022): /* Plantronics C310 */
+       case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */
+       case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
index 4dab490807009a96b4d43ba86dc53bcaf52b94e1..e229abd216526cae577aafd3dd2f2f3ed3abc5ba 100644 (file)
@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
        }
 
        pg = get_order(read_size);
-       sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+       sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+                                         __GFP_NOWARN, pg);
        if (!sk->s) {
                snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
                goto out;
@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
        pg = get_order(write_size);
 
        sk->write_page =
-               (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+               (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+                                        __GFP_NOWARN, pg);
        if (!sk->write_page) {
                snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
                usb_stream_free(sk);
index 0f5e347b068d3bb22d07ddd8308cc55fc263aebb..152823b6cb2152d1661e0fc9dd287ed23831ef75 100644 (file)
@@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests
 include ../lib.mk
 
 override define RUN_TESTS
-       $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
-       $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
+       @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
+       @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
 endef
 
 override define EMIT_TESTS
index 00f286661dcd2c0459941e03dcccf0af4d17fd37..dd4162fc0419bca7a7dccd17db8b99b61fb7f4a3 100644 (file)
@@ -341,7 +341,7 @@ int main(int argc, char **argv)
                        return 0;
                case 'n':
                        t = atoi(optarg);
-                       if (t > ARRAY_SIZE(test_cases))
+                       if (t >= ARRAY_SIZE(test_cases))
                                error(1, 0, "Invalid test case: %d", t);
                        all_tests = false;
                        test_cases[t].enabled = true;
index 97f187e2663f3adaf37a50674c36aafec11d291f..0a74a20ca32bae76629e956a862eb051ebcd0a32 100644 (file)
@@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
 BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
 BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
 
-CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
+CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
 
 UNAME_M := $(shell uname -m)
 CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)