Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Oct 2017 10:58:23 +0000 (06:58 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Oct 2017 10:58:23 +0000 (06:58 -0400)
Pull x86 fixes from Thomas Gleixner:
 "A couple of fixes addressing the following issues:

   - The last polishing for the TLB code, removing the last BUG_ON() and
     the debug file along with tidying up the lazy TLB code.

   - Prevent triple fault on 1st Gen. 486 caused by stupidly calling the
     early IDT setup after the first function which causes a fault which
     should be caught by the exception table.

   - Limit the mmap of /dev/mem to valid addresses

   - Prevent late microcode loading on Broadwell X

   - Remove a redundant assignment in the cache info code"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Limit mmap() of /dev/mem to valid physical addresses
  x86/mm: Remove debug/x86/tlb_defer_switch_to_init_mm
  x86/mm: Tidy up "x86/mm: Flush more aggressively in lazy TLB mode"
  x86/mm/64: Remove the last VM_BUG_ON() from the TLB code
  x86/microcode/intel: Disable late loading on model 79
  x86/idt: Initialize early IDT before cr4_init_shadow()
  x86/cpu/intel_cacheinfo: Remove redundant assignment to 'this_leaf'

276 files changed:
Documentation/core-api/kernel-api.rst
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
Documentation/process/index.rst
Documentation/process/kernel-enforcement-statement.rst [new file with mode: 0644]
MAINTAINERS
arch/arm/Makefile
arch/arm/boot/compressed/debug.S
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/at91-sama5d27_som1.dtsi
arch/arm/boot/dts/at91-sama5d2_xplained.dts
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/bcm2837-rpi-3-b.dts
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/gemini.dtsi
arch/arm/boot/dts/imx7d.dtsi
arch/arm/boot/dts/moxart.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/kernel/debug.S
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-ux500/pm.c
arch/arm/mm/nommu.c
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/boot/dts/renesas/salvator-common.dtsi
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/time.c
arch/s390/configs/zfcpdump_defconfig
arch/s390/kernel/smp.c
crypto/asymmetric_keys/asymmetric_type.c
crypto/asymmetric_keys/pkcs7_parser.c
drivers/block/nbd.c
drivers/block/skd_main.c
drivers/bus/mvebu-mbus.c
drivers/clocksource/cs5535-clockevt.c
drivers/dma/altera-msgdma.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-piix4.c
drivers/input/input.c
drivers/input/joydev.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/misc/axp20x-pek.c
drivers/input/misc/ims-pcu.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/stmfts.c
drivers/input/touchscreen/ti_am335x_tsc.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-tango.c
drivers/media/cec/cec-adap.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-frontends/dib3000mc.c
drivers/media/dvb-frontends/dvb-pll.c
drivers/media/platform/Kconfig
drivers/media/platform/qcom/camss-8x16/camss-vfe.c
drivers/media/platform/qcom/venus/helpers.c
drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
drivers/media/platform/s5p-cec/s5p_cec.c
drivers/media/platform/s5p-cec/s5p_cec.h
drivers/media/tuners/mt2060.c
drivers/mmc/host/sdhci-pci-core.c
drivers/net/can/flexcan.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/gs_usb.c
drivers/net/dsa/mv88e6060.c
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/geneve.c
drivers/net/macsec.c
drivers/net/tun.c
drivers/net/wimax/i2400m/fw.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/intel/iwlwifi/cfg/7000.c
drivers/net/wireless/intel/iwlwifi/cfg/8000.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/cfg/a000.c
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/of/of_mdio.c
drivers/reset/reset-socfpga.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/staging/media/imx/imx-media-dev.c
fs/btrfs/super.c
fs/crypto/keyinfo.c
fs/direct-io.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/keystore.c
fs/exec.c
fs/ext4/super.c
fs/fscache/object-list.c
fs/fuse/inode.c
fs/iomap.c
fs/namespace.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_super.c
include/linux/filter.h
include/linux/input.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/key.h
include/linux/mbus.h
include/linux/mm_types.h
include/linux/mod_devicetable.h
include/linux/netdevice.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/sched/mm.h
include/linux/srcu.h
include/net/inet_sock.h
include/net/tcp.h
include/sound/control.h
include/uapi/linux/membarrier.h
kernel/bpf/arraymap.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/sockmap.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/exit.c
kernel/irq/generic-chip.c
kernel/rcu/srcutree.c
kernel/rcu/sync.c
kernel/rcu/tree.c
kernel/sched/membarrier.c
lib/digsig.c
lib/ts_fsm.c
lib/ts_kmp.c
mm/memcontrol.c
mm/percpu.c
net/bridge/br_netlink.c
net/can/af_can.c
net/can/bcm.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/ethtool.c
net/core/filter.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_reuseport.c
net/dccp/ipv4.c
net/dns_resolver/dns_key.c
net/ipv4/Kconfig
net/ipv4/cipso_ipv4.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/syncookies.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/l2tp/l2tp_ppp.c
net/mac80211/key.c
net/ncsi/internal.h
net/ncsi/ncsi-aen.c
net/ncsi/ncsi-manage.c
net/ncsi/ncsi-rsp.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/rxrpc/af_rxrpc.c
net/sched/cls_flower.c
net/sctp/input.c
net/sctp/socket.c
net/vmw_vsock/hyperv_transport.c
samples/sockmap/sockmap_kern.c
samples/trace_events/trace-events-sample.c
scripts/mod/devicetable-offsets.c
scripts/mod/file2alias.c
security/commoncap.c
security/keys/Kconfig
security/keys/big_key.c
security/keys/encrypted-keys/encrypted.c
security/keys/gc.c
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/permission.c
security/keys/proc.c
security/keys/process_keys.c
security/keys/request_key.c
security/keys/request_key_auth.c
security/keys/trusted.c
security/keys/user_defined.c
sound/core/seq/seq_lock.c
sound/core/seq/seq_lock.h
sound/core/vmaster.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_codec.c
sound/usb/quirks.c
tools/include/uapi/linux/bpf.h
tools/objtool/check.c
tools/perf/Documentation/perf-record.txt
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
tools/perf/ui/hist.c
tools/perf/util/parse-events.l
tools/perf/util/session.c
tools/perf/util/xyarray.h
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/sockmap_verdict_prog.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_verifier.c

index 8282099e0cbf446bbebaaf63868ca04610999408..5da10184d9084a77c15e42b56f76a4571193344a 100644 (file)
@@ -352,44 +352,30 @@ Read-Copy Update (RCU)
 ----------------------
 
 .. kernel-doc:: include/linux/rcupdate.h
-   :external:
 
 .. kernel-doc:: include/linux/rcupdate_wait.h
-   :external:
 
 .. kernel-doc:: include/linux/rcutree.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree.c
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree_plugin.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree_exp.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/update.c
-   :external:
 
 .. kernel-doc:: include/linux/srcu.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/srcutree.c
-   :external:
 
 .. kernel-doc:: include/linux/rculist_bl.h
-   :external:
 
 .. kernel-doc:: include/linux/rculist.h
-   :external:
 
 .. kernel-doc:: include/linux/rculist_nulls.h
-   :external:
 
 .. kernel-doc:: include/linux/rcu_sync.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/sync.c
-   :external:
 
index 4c29cdab0ea5b72985168100891e2422e8dbb252..5eb108e180fa282711a4d448c8353196ae6cadf1 100644 (file)
@@ -99,7 +99,7 @@ Examples:
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
-                       reg = <0x0 0x2c200000 0 0x200000>;
+                       reg = <0x0 0x2c200000 0 0x20000>;
                };
        };
 
@@ -124,14 +124,14 @@ Examples:
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
-                       reg = <0x0 0x2c200000 0 0x200000>;
+                       reg = <0x0 0x2c200000 0 0x20000>;
                };
 
                gic-its@2c400000 {
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
-                       reg = <0x0 0x2c400000 0 0x200000>;
+                       reg = <0x0 0x2c400000 0 0x20000>;
                };
 
                ppi-partitions {
index 82fc399fcd33d1628289ec5ccfeb3193a368c2fc..61e43cc3ed171e2371b6372609533dc16fc9b057 100644 (file)
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
    submitting-patches
    coding-style
    email-clients
+   kernel-enforcement-statement
 
 Other guides to the community that are of interest to most developers are: 
 
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
new file mode 100644 (file)
index 0000000..1e23d42
--- /dev/null
@@ -0,0 +1,147 @@
+Linux Kernel Enforcement Statement
+----------------------------------
+
+As developers of the Linux kernel, we have a keen interest in how our software
+is used and how the license for our software is enforced.  Compliance with the
+reciprocal sharing obligations of GPL-2.0 is critical to the long-term
+sustainability of our software and community.
+
+Although there is a right to enforce the separate copyright interests in the
+contributions made to our community, we share an interest in ensuring that
+individual enforcement actions are conducted in a manner that benefits our
+community and do not have an unintended negative impact on the health and
+growth of our software ecosystem.  In order to deter unhelpful enforcement
+actions, we agree that it is in the best interests of our development
+community to undertake the following commitment to users of the Linux kernel
+on behalf of ourselves and any successors to our copyright interests:
+
+    Notwithstanding the termination provisions of the GPL-2.0, we agree that
+    it is in the best interests of our development community to adopt the
+    following provisions of GPL-3.0 as additional permissions under our
+    license with respect to any non-defensive assertion of rights under the
+    license.
+
+       However, if you cease all violation of this License, then your license
+       from a particular copyright holder is reinstated (a) provisionally,
+       unless and until the copyright holder explicitly and finally
+       terminates your license, and (b) permanently, if the copyright holder
+       fails to notify you of the violation by some reasonable means prior to
+       60 days after the cessation.
+
+       Moreover, your license from a particular copyright holder is
+       reinstated permanently if the copyright holder notifies you of the
+       violation by some reasonable means, this is the first time you have
+       received notice of violation of this License (for any work) from that
+       copyright holder, and you cure the violation prior to 30 days after
+       your receipt of the notice.
+
+Our intent in providing these assurances is to encourage more use of the
+software.  We want companies and individuals to use, modify and distribute
+this software.  We want to work with users in an open and transparent way to
+eliminate any uncertainty about our expectations regarding compliance or
+enforcement that might limit adoption of our software.  We view legal action
+as a last resort, to be initiated only when other community efforts have
+failed to resolve the problem.
+
+Finally, once a non-compliance issue is resolved, we hope the user will feel
+welcome to join us in our efforts on this project.  Working together, we will
+be stronger.
+
+Except where noted below, we speak only for ourselves, and not for any company
+we might work for today, have in the past, or will in the future.
+
+  - Bjorn Andersson (Linaro)
+  - Andrea Arcangeli (Red Hat)
+  - Neil Armstrong
+  - Jens Axboe
+  - Pablo Neira Ayuso
+  - Khalid Aziz
+  - Ralf Baechle
+  - Felipe Balbi
+  - Arnd Bergmann
+  - Ard Biesheuvel
+  - Paolo Bonzini (Red Hat)
+  - Christian Borntraeger
+  - Mark Brown (Linaro)
+  - Paul Burton
+  - Javier Martinez Canillas
+  - Rob Clark
+  - Jonathan Corbet
+  - Vivien Didelot (Savoir-faire Linux)
+  - Hans de Goede (Red Hat)
+  - Mel Gorman (SUSE)
+  - Sven Eckelmann
+  - Alex Elder (Linaro)
+  - Fabio Estevam
+  - Larry Finger
+  - Bhumika Goyal
+  - Andy Gross
+  - Juergen Gross
+  - Shawn Guo
+  - Ulf Hansson
+  - Tejun Heo
+  - Rob Herring
+  - Masami Hiramatsu
+  - Michal Hocko
+  - Simon Horman
+  - Johan Hovold (Hovold Consulting AB)
+  - Christophe JAILLET
+  - Olof Johansson
+  - Lee Jones (Linaro)
+  - Heiner Kallweit
+  - Srinivas Kandagatla
+  - Jan Kara
+  - Shuah Khan (Samsung)
+  - David Kershner
+  - Jaegeuk Kim
+  - Namhyung Kim
+  - Colin Ian King
+  - Jeff Kirsher
+  - Greg Kroah-Hartman (Linux Foundation)
+  - Christian König
+  - Vinod Koul
+  - Krzysztof Kozlowski
+  - Viresh Kumar
+  - Aneesh Kumar K.V
+  - Julia Lawall
+  - Doug Ledford (Red Hat)
+  - Chuck Lever (Oracle)
+  - Daniel Lezcano
+  - Shaohua Li
+  - Xin Long (Red Hat)
+  - Tony Luck
+  - Mike Marshall
+  - Chris Mason
+  - Paul E. McKenney
+  - David S. Miller
+  - Ingo Molnar
+  - Kuninori Morimoto
+  - Borislav Petkov
+  - Jiri Pirko
+  - Josh Poimboeuf
+  - Sebastian Reichel (Collabora)
+  - Guenter Roeck
+  - Joerg Roedel
+  - Leon Romanovsky
+  - Steven Rostedt (VMware)
+  - Ivan Safonov
+  - Ivan Safonov
+  - Anna Schumaker
+  - Jes Sorensen
+  - K.Y. Srinivasan
+  - Heiko Stuebner
+  - Jiri Kosina (SUSE)
+  - Dmitry Torokhov
+  - Linus Torvalds
+  - Thierry Reding
+  - Rik van Riel
+  - Geert Uytterhoeven (Glider bvba)
+  - Daniel Vetter
+  - Linus Walleij
+  - Richard Weinberger
+  - Dan Williams
+  - Rafael J. Wysocki
+  - Arvind Yadav
+  - Masahiro Yamada
+  - Wei Yongjun
+  - Lv Zheng
index a74227ad082ee84db1f0a0a693d5fe62a007d36a..e652a3e2929df9079337a840f7a7113eb5a911fe 100644 (file)
@@ -10560,6 +10560,8 @@ M:      Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     Arnaldo Carvalho de Melo <acme@kernel.org>
 R:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
+R:     Jiri Olsa <jolsa@redhat.com>
+R:     Namhyung Kim <namhyung@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Supported
index 47d3a1ab08d2491aff178040b309180226d131e8..817e5cfef83a933e8d66bfd62a7c908a1af92f2c 100644 (file)
@@ -131,7 +131,7 @@ endif
 KBUILD_CFLAGS  +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
 KBUILD_AFLAGS  +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
 
-CHECKFLAGS     += -D__arm__
+CHECKFLAGS     += -D__arm__ -m32
 
 #Default value
 head-y         := arch/arm/kernel/head$(MMUEXT).o
index 5392ee63338fac3453f30b125366e03241158133..8f6e37177de132252c560765440698ac08b69408 100644 (file)
@@ -23,7 +23,11 @@ ENTRY(putc)
        strb    r0, [r1]
        mov     r0, #0x03               @ SYS_WRITEC
    ARM(        svc     #0x123456       )
+#ifdef CONFIG_CPU_V7M
+ THUMB(        bkpt    #0xab           )
+#else
  THUMB(        svc     #0xab           )
+#endif
        mov     pc, lr
        .align  2
 1:     .word   _GLOBAL_OFFSET_TABLE_ - .
index 7ff0811e61db3ad73be8f0c9ea8f79ebde1681c7..4960722aab32a1cf644ddb1e69d792845c240d44 100644 (file)
                        };
 
                        i2c0: i2c@11000 {
-                               compatible = "marvell,mv64xxx-i2c";
+                               compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
                                reg = <0x11000 0x20>;
                                #address-cells = <1>;
                                #size-cells = <0>;
                        };
 
                        i2c1: i2c@11100 {
-                               compatible = "marvell,mv64xxx-i2c";
+                               compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
                                reg = <0x11100 0x20>;
                                #address-cells = <1>;
                                #size-cells = <0>;
index 63a5af8981659fb7f3162323be34e55dbcc345d6..cf0087b4c9e184259eb63667f8322067c80b584b 100644 (file)
@@ -67,8 +67,8 @@
                                pinctrl-0 = <&pinctrl_macb0_default>;
                                phy-mode = "rmii";
 
-                               ethernet-phy@1 {
-                                       reg = <0x1>;
+                               ethernet-phy@0 {
+                                       reg = <0x0>;
                                        interrupt-parent = <&pioA>;
                                        interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
                                        pinctrl-names = "default";
index c7e9ccf2bc8724304f44c574bc901b3e9b3999af..cbc26001247bea2fabcf186ad784f187810dbc63 100644 (file)
                                vddana-supply = <&vdd_3v3_lp_reg>;
                                vref-supply = <&vdd_3v3_lp_reg>;
                                pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_adc_default>;
+                               pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
                                status = "okay";
                        };
 
                                        bias-disable;
                                };
 
+                               /*
+                                * The ADTRG pin can work on any edge type.
+                                * In here it's being pulled up, so need to
+                                * connect it to ground to get an edge e.g.
+                                * Trigger can be configured on falling, rise
+                                * or any edge, and the pull-up can be changed
+                                * to pull-down or left floating according to
+                                * needs.
+                                */
+                               pinctrl_adtrg_default: adtrg_default {
+                                       pinmux = <PIN_PD31__ADTRG>;
+                                       bias-pull-up;
+                               };
+
                                pinctrl_charger_chglev: charger_chglev {
                                        pinmux = <PIN_PA12__GPIO>;
                                        bias-disable;
index 82651c3eb682a749652a31eadbb4f910961151f7..b8565fc33eea6bc18b88ab72cf8774b15f3c678d 100644 (file)
        compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
        model = "Raspberry Pi Zero W";
 
-       /* Needed by firmware to properly init UARTs */
-       aliases {
-               uart0 = "/soc/serial@7e201000";
-               uart1 = "/soc/serial@7e215040";
-               serial0 = "/soc/serial@7e201000";
-               serial1 = "/soc/serial@7e215040";
+       chosen {
+               /* 8250 auxiliary UART instead of pl011 */
+               stdout-path = "serial1:115200n8";
        };
 
        leds {
index 20725ca487f30afd5e84059fbcb1513d3399cfa8..c71a0d73d2a2e9fd64c1906b0fe42a6cfefbd230 100644 (file)
@@ -8,6 +8,11 @@
        compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
        model = "Raspberry Pi 3 Model B";
 
+       chosen {
+               /* 8250 auxiliary UART instead of pl011 */
+               stdout-path = "serial1:115200n8";
+       };
+
        memory {
                reg = <0 0x40000000>;
        };
index 431dcfc900c024d85a88231d1df007916dafdfeb..013431e3d7c3140d3a0645bdf4f130e9a860f984 100644 (file)
        #address-cells = <1>;
        #size-cells = <1>;
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart1;
+       };
+
        chosen {
-               bootargs = "earlyprintk console=ttyAMA0";
+               stdout-path = "serial0:115200n8";
        };
 
        thermal-zones {
index c68e8d430234c3824198d46b336f0d689cf38611..f0d178c77153fc6a3e4c84ebb798f3720995e066 100644 (file)
                };
 
                watchdog@41000000 {
-                       compatible = "cortina,gemini-watchdog";
+                       compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
                        reg = <0x41000000 0x1000>;
                        interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
                        resets = <&syscon GEMINI_RESET_WDOG>;
                        clocks = <&syscon GEMINI_CLK_APB>;
+                       clock-names = "PCLK";
                };
 
                uart0: serial@42000000 {
index f46814a7ea44100ff0dbd44f054bb17ed6c9d740..4d308d17f040c71157db72a8d81abf8de9dd4d8e 100644 (file)
                interrupt-names = "msi";
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0x7>;
-               interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
                         <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
                         <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
index 1f4c795d3f7210223d7cdab4b5e430affa133a35..da7b3237bfe9811ff9e8908310b8f146f938f21e 100644 (file)
                };
 
                watchdog: watchdog@98500000 {
-                       compatible = "moxa,moxart-watchdog";
+                       compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
                        reg = <0x98500000 0x10>;
                        clocks = <&clk_apb>;
+                       clock-names = "PCLK";
                };
 
                sdhci: sdhci@98e00000 {
index 38d2216c7ead9ff422cce1e740a820e47977a700..b1a26b42d1904a82817e1986da79fd963b934d2f 100644 (file)
                                atmel,min-sample-rate-hz = <200000>;
                                atmel,max-sample-rate-hz = <20000000>;
                                atmel,startup-time-ms = <4>;
+                               atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
index b147cb0dc14b26ce92db7ea70bba2a8f77bd0d38..eef072a21accaed0c29d6407d076331ad579a987 100644 (file)
                                        #size-cells = <0>;
                                        reg = <0>;
 
-                                       tcon1_in_drc1: endpoint@0 {
-                                               reg = <0>;
+                                       tcon1_in_drc1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&drc1_out_tcon1>;
                                        };
                                };
                                        #size-cells = <0>;
                                        reg = <1>;
 
-                                       be1_out_drc1: endpoint@0 {
-                                               reg = <0>;
+                                       be1_out_drc1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&drc1_in_be1>;
                                        };
                                };
                                        #size-cells = <0>;
                                        reg = <0>;
 
-                                       drc1_in_be1: endpoint@0 {
-                                               reg = <0>;
+                                       drc1_in_be1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&be1_out_drc1>;
                                        };
                                };
                                        #size-cells = <0>;
                                        reg = <1>;
 
-                                       drc1_out_tcon1: endpoint@0 {
-                                               reg = <0>;
+                                       drc1_out_tcon1: endpoint@1 {
+                                               reg = <1>;
                                                remote-endpoint = <&tcon1_in_drc1>;
                                        };
                                };
index ea9646cc2a0ed7eba2fa4f7f7638c0802642edcc..0a498cb3fad88d046c23073a5d1b7785fdfaf1a7 100644 (file)
@@ -115,7 +115,11 @@ ENTRY(printascii)
                mov     r1, r0
                mov     r0, #0x04               @ SYS_WRITE0
        ARM(    svc     #0x123456       )
+#ifdef CONFIG_CPU_V7M
+       THUMB(  bkpt    #0xab           )
+#else
        THUMB(  svc     #0xab           )
+#endif
                ret     lr
 ENDPROC(printascii)
 
@@ -124,7 +128,11 @@ ENTRY(printch)
                strb    r0, [r1]
                mov     r0, #0x03               @ SYS_WRITEC
        ARM(    svc     #0x123456       )
+#ifdef CONFIG_CPU_V7M
+       THUMB(  bkpt    #0xab           )
+#else
        THUMB(  svc     #0xab           )
+#endif
                ret     lr
 ENDPROC(printch)
 
index 71a34e8c345a5b19b98f42cc368f796118d3214f..57058ac46f49733887e439012afa3247f03e1737 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/mach/arch.h>
 
 #include "db8500-regs.h"
+#include "pm_domains.h"
 
 static int __init ux500_l2x0_unlock(void)
 {
@@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
 
 static void __init u8500_init_machine(void)
 {
+       /* Initialize ux500 power domains */
+       ux500_pm_domains_init();
+
        /* automatically probe child nodes of dbx5x0 devices */
        if (of_machine_is_compatible("st-ericsson,u8540"))
                of_platform_populate(NULL, u8500_local_bus_nodes,
index a970e7fcba9e02fe6e2651cd5cfca76321d26314..f6c33a0c1c610cf17881fc276725817acdb4b29d 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/of_address.h>
 
 #include "db8500-regs.h"
-#include "pm_domains.h"
 
 /* ARM WFI Standby signal register */
 #define PRCM_ARM_WFI_STANDBY    (prcmu_base + 0x130)
@@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
 
        /* Set up ux500 suspend callbacks. */
        suspend_set_ops(UX500_SUSPEND_OPS);
-
-       /* Initialize ux500 power domains */
-       ux500_pm_domains_init();
 }
index 3b8e728cc9443975c6cd66c63350a2074df310b7..91537d90f5f526e4e9135b2b0e5403141172a4cb 100644 (file)
@@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
         * reserved here.
         */
 #endif
+       /*
+        * In any case, always ensure address 0 is never used as many things
+        * get very confused if 0 is returned as a legitimate address.
+        */
+       memblock_reserve(0, 1);
 }
 
 void __init adjust_lowmem_bounds(void)
index caf8b6fbe5e350de2095d16489f19ce734ca4bc3..d06e34b5d192323ffef69303330e1f37a9a27310 100644 (file)
        chosen {
                stdout-path = "serial0:115200n8";
        };
-
-       reg_vcc3v3: vcc3v3 {
-               compatible = "regulator-fixed";
-               regulator-name = "vcc3v3";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-       };
 };
 
 &ehci0 {
@@ -91,7 +84,7 @@
 &mmc0 {
        pinctrl-names = "default";
        pinctrl-0 = <&mmc0_pins>;
-       vmmc-supply = <&reg_vcc3v3>;
+       vmmc-supply = <&reg_dcdc1>;
        cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
        cd-inverted;
        disable-wp;
index 8263a8a504a8fd11896da6aebc99eabdd4be928a..f2aa2a81de4dd2e982ec1e5fd5ae67f01bb08a63 100644 (file)
                                /* non-prefetchable memory */
                                0x82000000 0 0xf6000000 0  0xf6000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        num-lanes = <1>;
                        clocks = <&cpm_clk 1 13>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xf7000000 0  0xf7000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xf8000000 0  0xf8000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
index b71ee6c83668e8900a4a889614ed83e97e718f85..4fe70323abb3a58c374a762607bb63f08b2cb0e0 100644 (file)
                                /* non-prefetchable memory */
                                0x82000000 0 0xfa000000 0  0xfa000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
                        num-lanes = <1>;
                        clocks = <&cps_clk 1 13>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xfb000000 0  0xfb000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
                                /* non-prefetchable memory */
                                0x82000000 0 0xfc000000 0  0xfc000000 0 0xf00000>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
                        interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
 
                        num-lanes = <1>;
index 4786c67b5e6527fd99d27aa969ef0b2d4274b4ad..d9d885006a8e8c9a6630f635132b6d0c24370ccc 100644 (file)
@@ -62,6 +62,7 @@
                brightness-levels = <256 128 64 16 8 4 0>;
                default-brightness-level = <6>;
 
+               power-supply = <&reg_12v>;
                enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-always-on;
        };
 
+       reg_12v: regulator2 {
+               compatible = "regulator-fixed";
+               regulator-name = "fixed-12V";
+               regulator-min-microvolt = <12000000>;
+               regulator-max-microvolt = <12000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
        rsnd_ak4613: sound {
                compatible = "simple-audio-card";
 
index 6d615cb6e64d07cebcfa0a7ecebf04b8afb152b2..41d61840fb99ce52ec553c94e119ab63bb79cdbe 100644 (file)
        vop_mmu: iommu@ff373f00 {
                compatible = "rockchip,iommu";
                reg = <0x0 0xff373f00 0x0 0x100>;
-               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "vop_mmu";
                #iommu-cells = <0>;
                status = "disabled";
index 19fbaa5e7bdd573e6ba6959be9c72ccbb0a388a6..1070c8264c13376a578338e95421f71321825243 100644 (file)
        iep_mmu: iommu@ff900800 {
                compatible = "rockchip,iommu";
                reg = <0x0 0xff900800 0x0 0x100>;
-               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
+               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "iep_mmu";
                #iommu-cells = <0>;
                status = "disabled";
index 7fd4bfcaa38e33c8b58ef60dc7adb99772b04cc1..fef82274a39dac27fc6e293affc81b4ff5d5d64b 100644 (file)
                                regulator-always-on;
                                regulator-boot-on;
                                regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <3300000>;
+                               regulator-max-microvolt = <3000000>;
                                regulator-state-mem {
                                        regulator-on-in-suspend;
-                                       regulator-suspend-microvolt = <3300000>;
+                                       regulator-suspend-microvolt = <3000000>;
                                };
                        };
 
index 53ff3d191a1d176cc0d7c6d380894b48e42ec7da..910628d18add07d9a39974bc6ce2ac4a403adb81 100644 (file)
                        vcc_sd: LDO_REG4 {
                                regulator-name = "vcc_sd";
                                regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <3300000>;
+                               regulator-max-microvolt = <3000000>;
                                regulator-always-on;
                                regulator-boot-on;
                                regulator-state-mem {
                                        regulator-on-in-suspend;
-                                       regulator-suspend-microvolt = <3300000>;
+                                       regulator-suspend-microvolt = <3000000>;
                                };
                        };
 
index 6c30bb02210d80a2d78843cccaca7a3d83f971cd..0f873c897d0de5a75f9d4e4d90d7c658b7a173d3 100644 (file)
                                regulator-always-on;
                                regulator-boot-on;
                                regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <3300000>;
+                               regulator-max-microvolt = <3000000>;
                                regulator-state-mem {
                                        regulator-on-in-suspend;
-                                       regulator-suspend-microvolt = <3300000>;
+                                       regulator-suspend-microvolt = <3000000>;
                                };
                        };
 
index c6d6272a934f03823b655cf07b38e7bbc01ca12e..7baa2265d43927fd7e5a24269e627486c60c6b35 100644 (file)
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(__xchg8);
 EXPORT_SYMBOL(__xchg32);
 EXPORT_SYMBOL(__cmpxchg_u32);
+EXPORT_SYMBOL(__cmpxchg_u64);
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(__atomic_hash);
 #endif
 #ifdef CONFIG_64BIT
 EXPORT_SYMBOL(__xchg64);
-EXPORT_SYMBOL(__cmpxchg_u64);
 #endif
 
 #include <linux/uaccess.h>
index 23de307c3052aa9ecac21fd6c294657fb53de447..41e60a9c7db23b8384b18bf8ddd45f188ef4a0c3 100644 (file)
@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
 10:    ldd     0(%r25), %r25
 11:    ldd     0(%r24), %r24
 #else
-       /* Load new value into r22/r23 - high/low */
+       /* Load old value into r22/r23 - high/low */
 10:    ldw     0(%r25), %r22
 11:    ldw     4(%r25), %r23
        /* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@ cas2_action:
        copy    %r0, %r28
 #else
        /* Compare first word */
-19:    ldw,ma  0(%r26), %r29
+19:    ldw     0(%r26), %r29
        sub,=   %r29, %r22, %r0
        b,n     cas2_end
        /* Compare second word */
-20:    ldw,ma  4(%r26), %r29
+20:    ldw     4(%r26), %r29
        sub,=   %r29, %r23, %r0
        b,n     cas2_end
        /* Perform the store */
index 2d956aa0a38abbc3829757bab4749dd6a0037490..8c0105a49839cf018a80108f76dadccb5e793ee1 100644 (file)
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
                cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
 
                for_each_online_cpu(cpu) {
-                       if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
+                       if (cpu == 0)
+                               continue;
+                       if ((cpu0_loc != 0) &&
+                           (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
                                continue;
 
                        clocksource_cr16.name = "cr16_unstable";
index afa46a7406eaeddbbf70bbaf384f49ffa0535874..04e042edbab760f13a2da86a0cab071346bf2ab5 100644 (file)
@@ -27,6 +27,7 @@ CONFIG_NET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_RAM=y
 # CONFIG_BLK_DEV_XPRAM is not set
 # CONFIG_DCSSBLK is not set
 # CONFIG_DASD is not set
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
index 1cee6753d47a5cba115730cb73cef4324e8d3ed1..495ff6959dec76a904ee2f6ccc30f8ef7809960d 100644 (file)
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
        lc->lpp = LPP_MAGIC;
        lc->current_pid = tsk->pid;
        lc->user_timer = tsk->thread.user_timer;
+       lc->guest_timer = tsk->thread.guest_timer;
        lc->system_timer = tsk->thread.system_timer;
+       lc->hardirq_timer = tsk->thread.hardirq_timer;
+       lc->softirq_timer = tsk->thread.softirq_timer;
        lc->steal_timer = 0;
 }
 
index e4b0ed386bc82f339829fea2dcedd42fe2f0aff5..39aecad286fe482ff3f44fe08b286c2edbf3553b 100644 (file)
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
        char *req, *p;
        int len;
 
+       BUG_ON(!id_0 && !id_1);
+
        if (id_0) {
                lookup = id_0->data;
                len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
        if (id_0 && id_1) {
                const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
 
-               if (!kids->id[0]) {
+               if (!kids->id[1]) {
                        pr_debug("First ID matches, but second is missing\n");
                        goto reject;
                }
index af4cd864911752478ba5f3c2732273f9624d434f..d140d8bb2c96140c408b1e3450f288e562372743 100644 (file)
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
        bool want = false;
 
        sinfo = msg->signed_infos;
+       if (!sinfo)
+               goto inconsistent;
+
        if (sinfo->authattrs) {
                want = true;
                msg->have_authattrs = true;
index 883dfebd3014b506a0861aed0640dc313fd8b0cc..baebbdfd74d54f4969fac84e06c8b1741b831c0c 100644 (file)
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
        struct nbd_config *config = nbd->config;
        config->blksize = blocksize;
        config->bytesize = blocksize * nr_blocks;
-       nbd_size_update(nbd);
 }
 
 static void nbd_complete_rq(struct request *req)
@@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                args->index = i;
                queue_work(recv_workqueue, &args->work);
        }
+       nbd_size_update(nbd);
        return error;
 }
 
index 7cedb4295e9d325343e296b8b299cb407b7b2a55..64d0fc17c1742ab74aa232da503d08e344b594b2 100644 (file)
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
                return NULL;
        *dma_handle = dma_map_single(dev, buf, s->size, dir);
        if (dma_mapping_error(dev, *dma_handle)) {
-               kfree(buf);
+               kmem_cache_free(s, buf);
                buf = NULL;
        }
        return buf;
index c7f39690318473d4b75bca11c0c3fd4830f4d084..70db4d5638a6338632a916e4ee07f0f4b3ca5e04 100644 (file)
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
                        if (mbus->hw_io_coherency)
                                w->mbus_attr |= ATTR_HW_COHERENCY;
                        w->base = base & DDR_BASE_CS_LOW_MASK;
-                       w->size = (size | ~DDR_SIZE_MASK) + 1;
+                       w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
                }
        }
        mvebu_mbus_dram_info.num_cs = cs;
index a1df588343f2dac1fb1c6acb8f579f3688ad6938..1de8cac99a0e93b21d38f5c23f29739ec06caab7 100644 (file)
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
        /* Turn off the clock (and clear the event) */
        disable_timer(cs5535_event_clock);
 
-       if (clockevent_state_shutdown(&cs5535_clockevent))
+       if (clockevent_state_detached(&cs5535_clockevent) ||
+           clockevent_state_shutdown(&cs5535_clockevent))
                return IRQ_HANDLED;
 
        /* Clear the counter */
index 339186f25a2ae529c447bd8359a9c55e0049d2ea..55f9c62ee54b063521bfbf0d4b19627b43e8c155 100644 (file)
@@ -344,7 +344,7 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
 
        spin_lock_irqsave(&mdev->lock, irqflags);
        if (desc_cnt > mdev->desc_free_cnt) {
-               spin_unlock_bh(&mdev->lock);
+               spin_unlock_irqrestore(&mdev->lock, irqflags);
                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
                return NULL;
        }
@@ -407,7 +407,7 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 
        spin_lock_irqsave(&mdev->lock, irqflags);
        if (desc_cnt > mdev->desc_free_cnt) {
-               spin_unlock_bh(&mdev->lock);
+               spin_unlock_irqrestore(&mdev->lock, irqflags);
                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
                return NULL;
        }
index 97c94f9683fa047392ba62f128586e0b7e4492bc..38cea6fb25a8b9221d64b43da04c4268a2c986b8 100644 (file)
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity)
 {
        struct amd_sched_rq *rq = entity->rq;
-       int r;
 
        if (!amd_sched_entity_is_initialized(sched, entity))
                return;
+
        /**
         * The client will not queue more IBs during this fini, consume existing
-        * queued IBs or discard them on SIGKILL
+        * queued IBs
        */
-       if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
-               r = -ERESTARTSYS;
-       else
-               r = wait_event_killable(sched->job_scheduled,
-                                       amd_sched_entity_is_idle(entity));
-       amd_sched_rq_remove_entity(rq, entity);
-       if (r) {
-               struct amd_sched_job *job;
+       wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
 
-               /* Park the kernel for a moment to make sure it isn't processing
-                * our enity.
-                */
-               kthread_park(sched->thread);
-               kthread_unpark(sched->thread);
-               while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
-                       sched->ops->free_job(job);
-
-       }
+       amd_sched_rq_remove_entity(rq, entity);
        kfifo_free(&entity->job_queue);
 }
 
index e651a58c18cf2d5b743439776dd62d50b64f8a7a..82b72425a42f7977c993134a2142434d8689227f 100644 (file)
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
 static int exynos_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct exynos_drm_private *private = drm_dev->dev_private;
+       struct exynos_drm_private *private;
 
        if (pm_runtime_suspended(dev) || !drm_dev)
                return 0;
 
+       private = drm_dev->dev_private;
+
        drm_kms_helper_poll_disable(drm_dev);
        exynos_drm_fbdev_suspend(drm_dev);
        private->suspend_state = drm_atomic_helper_suspend(drm_dev);
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
 static int exynos_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct exynos_drm_private *private = drm_dev->dev_private;
+       struct exynos_drm_private *private;
 
        if (pm_runtime_suspended(dev) || !drm_dev)
                return 0;
 
+       private = drm_dev->dev_private;
        drm_atomic_helper_resume(drm_dev, private->suspend_state);
        exynos_drm_fbdev_resume(drm_dev);
        drm_kms_helper_poll_enable(drm_dev);
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
 
        kfree(drm->dev_private);
        drm->dev_private = NULL;
+       dev_set_drvdata(dev, NULL);
 
        drm_dev_unref(drm);
 }
index 436377da41baced8e81352587cc3e035d0dc1ba3..03532dfc0cd51b8342e50da61524024dafc8ac34 100644 (file)
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
 
 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
 {
-       struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
-       int ring_id;
-
        kfree(vgpu->sched_data);
        vgpu->sched_data = NULL;
-
-       spin_lock_bh(&scheduler->mmio_context_lock);
-       for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
-               if (scheduler->engine_owner[ring_id] == vgpu) {
-                       intel_gvt_switch_mmio(vgpu, NULL, ring_id);
-                       scheduler->engine_owner[ring_id] = NULL;
-               }
-       }
-       spin_unlock_bh(&scheduler->mmio_context_lock);
 }
 
 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 {
        struct intel_gvt_workload_scheduler *scheduler =
                &vgpu->gvt->scheduler;
+       int ring_id;
 
        gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
 
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->need_reschedule = true;
                scheduler->current_vgpu = NULL;
        }
+
+       spin_lock_bh(&scheduler->mmio_context_lock);
+       for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+               if (scheduler->engine_owner[ring_id] == vgpu) {
+                       intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+                       scheduler->engine_owner[ring_id] = NULL;
+               }
+       }
+       spin_unlock_bh(&scheduler->mmio_context_lock);
 }
index af289d35b77a249c8fa9a7aab4a9672592399d51..32e857dc507cf9b1a9247f3bab497616d82e60a9 100644 (file)
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        if (READ_ONCE(obj->mm.pages))
                return -ENODEV;
 
+       if (obj->mm.madv != I915_MADV_WILLNEED)
+               return -EFAULT;
+
        /* Before the pages are instantiated the object is treated as being
         * in the CPU domain. The pages will be clflushed as required before
         * use, and we can freely write into the pages directly. If userspace
index 4df039ef2ce316509ecc6faa04e707d135acf507..e161d383b526757a79097eadb9e65260392befe1 100644 (file)
 #include "intel_drv.h"
 #include "i915_trace.h"
 
-static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
 
-       for_each_engine(engine, dev_priv, id) {
-               struct intel_timeline *tl;
+       if (i915->gt.active_requests)
+              return false;
 
-               tl = &ggtt->base.timeline.engine[engine->id];
-               if (i915_gem_active_isset(&tl->last_request))
-                       return false;
-       }
+       for_each_engine(engine, i915, id) {
+              if (engine->last_retired_context != i915->kernel_context)
+                      return false;
+       }
 
-       return true;
+       return true;
 }
 
 static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
                                    min_size, alignment, cache_level,
                                    start, end, mode);
 
-       /* Retire before we search the active list. Although we have
+       /*
+        * Retire before we search the active list. Although we have
         * reasonable accuracy in our retirement lists, we may have
         * a stray pin (preventing eviction) that can only be resolved by
         * retiring.
@@ -182,7 +182,8 @@ search_again:
                BUG_ON(ret);
        }
 
-       /* Can we unpin some objects such as idle hw contents,
+       /*
+        * Can we unpin some objects such as idle hw contents,
         * or pending flips? But since only the GGTT has global entries
         * such as scanouts, rinbuffers and contexts, we can skip the
         * purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
        if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
                return -ENOSPC;
 
-       if (ggtt_is_idle(dev_priv)) {
-               /* If we still have pending pageflip completions, drop
-                * back to userspace to give our workqueues time to
-                * acquire our locks and unpin the old scanouts.
-                */
-               return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
-       }
+       /*
+        * Not everything in the GGTT is tracked via VMA using
+        * i915_vma_move_to_active(), otherwise we could evict as required
+        * with minimal stalling. Instead we are forced to idle the GPU and
+        * explicitly retire outstanding requests which will then remove
+        * the pinning for active objects such as contexts and ring,
+        * enabling us to evict them on the next iteration.
+        *
+        * To ensure that all user contexts are evictable, we perform
+        * a switch to the perma-pinned kernel context. This all also gives
+        * us a termination condition, when the last retired context is
+        * the kernel's there is no more we can evict.
+        */
+       if (!ggtt_is_idle(dev_priv)) {
+               ret = ggtt_flush(dev_priv);
+               if (ret)
+                       return ret;
 
-       ret = ggtt_flush(dev_priv);
-       if (ret)
-               return ret;
+               goto search_again;
+       }
 
-       goto search_again;
+       /*
+        * If we still have pending pageflip completions, drop
+        * back to userspace to give our workqueues time to
+        * acquire our locks and unpin the old scanouts.
+        */
+       return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
 
 found:
        /* drm_mm doesn't allow any other other operations while
index ed7cd9ee2c2af89737b2cc4618317c4d2fc9bedd..c9bcc6c450126e7cf638ba1c872a55938660add8 100644 (file)
@@ -6998,6 +6998,7 @@ enum {
  */
 #define  L3_GENERAL_PRIO_CREDITS(x)            (((x) >> 1) << 19)
 #define  L3_HIGH_PRIO_CREDITS(x)               (((x) >> 1) << 14)
+#define  L3_PRIO_CREDITS_MASK                  ((0x1f << 19) | (0x1f << 14))
 
 #define GEN7_L3CNTLREG1                                _MMIO(0xB01C)
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
index 476681d5940c7d381c2d48f2c4a1202054e79845..5e5fe03b638cbf2ee17206ccd4c6ee985134645e 100644 (file)
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
                            int *n_entries)
 {
        if (IS_BROADWELL(dev_priv)) {
-               *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
-               return hsw_ddi_translations_fdi;
+               *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+               return bdw_ddi_translations_fdi;
        } else if (IS_HASWELL(dev_priv)) {
                *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
                return hsw_ddi_translations_fdi;
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
                 * register writes.
                 */
                val = I915_READ(DPCLKA_CFGCR0);
-               val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
-                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
                I915_WRITE(DPCLKA_CFGCR0, val);
        } else if (IS_GEN9_BC(dev_priv)) {
                /* DDI -> PLL mapping  */
index a2a3d93d67bd252a3c9d137bedb66be26816bd23..df808a94c51194a886d8664ff8b8118ad05870ef 100644 (file)
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 
        /* 3. Configure DPLL_CFGCR0 */
        /* Avoid touch CFGCR1 if HDMI mode is not enabled */
-       if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+       if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
                val = pll->state.hw_state.cfgcr1;
                I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
                /* 4. Reab back to ensure writes completed */
index 9ab5969413722a5999a4266629ea2ba0fc2305f0..3c2d9cf22ed5a537253a14c2fe85ee200ce7b24c 100644 (file)
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
        }
 
        /* WaProgramL3SqcReg1DefaultForPerf:bxt */
-       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
-               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
-                                          L3_HIGH_PRIO_CREDITS(2));
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+               u32 val = I915_READ(GEN8_L3SQCREG1);
+               val &= ~L3_PRIO_CREDITS_MASK;
+               val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+               I915_WRITE(GEN8_L3SQCREG1, val);
+       }
 
        /* WaToEnableHwFixForPushConstHWBug:bxt */
        if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
index ed662937ec3c85685b7fed049f381c6d1cb5654c..0a09f8ff6aff6710ea3580329d83646a67593b8c 100644 (file)
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
                                   int high_prio_credits)
 {
        u32 misccpctl;
+       u32 val;
 
        /* WaTempDisableDOPClkGating:bdw */
        misccpctl = I915_READ(GEN7_MISCCPCTL);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 
-       I915_WRITE(GEN8_L3SQCREG1,
-                  L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
-                  L3_HIGH_PRIO_CREDITS(high_prio_credits));
+       val = I915_READ(GEN8_L3SQCREG1);
+       val &= ~L3_PRIO_CREDITS_MASK;
+       val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
+       val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
+       I915_WRITE(GEN8_L3SQCREG1, val);
 
        /*
         * Wait at least 100 clocks before re-enabling clock gating.
index f7707849bb538697009ca3b68c8296100a67b2a4..2b12d82aac1509f7023b24e15d5f04fe7ecc8290 100644 (file)
@@ -223,7 +223,7 @@ void
 nouveau_fbcon_accel_save_disable(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
+       if (drm->fbcon && drm->fbcon->helper.fbdev) {
                drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
                drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
        }
@@ -233,7 +233,7 @@ void
 nouveau_fbcon_accel_restore(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
+       if (drm->fbcon && drm->fbcon->helper.fbdev) {
                drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
        }
 }
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
        struct nouveau_fbdev *fbcon = drm->fbcon;
        if (fbcon && drm->channel) {
                console_lock();
-               fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+               if (fbcon->helper.fbdev)
+                       fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
                console_unlock();
                nouveau_channel_idle(drm->channel);
                nvif_object_fini(&fbcon->twod);
index 2dbf62a2ac413081f7a15cb7c8779667447015d4..e4751f92b342d60f44c7d1a73a981940f15cb400 100644 (file)
@@ -3265,11 +3265,14 @@ nv50_mstm = {
 void
 nv50_mstm_service(struct nv50_mstm *mstm)
 {
-       struct drm_dp_aux *aux = mstm->mgr.aux;
+       struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
        bool handled = true;
        int ret;
        u8 esi[8] = {};
 
+       if (!aux)
+               return;
+
        while (handled) {
                ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
                if (ret != 8) {
index 8e2e24a7477458d0ad2361885e79fca4f936068a..44e116f7880dd02e6754d3d328f1d909a0a7041a 100644 (file)
@@ -39,5 +39,5 @@ int
 g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
        return nvkm_xtensa_new_(&g84_bsp, device, index,
-                               true, 0x103000, pengine);
+                               device->chipset != 0x92, 0x103000, pengine);
 }
index d06ad2c372bf30efb6b8ecc5978776def5721222..455da298227f65c2b4c2cfc6a2cedebe12661877 100644 (file)
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
                        mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
                }
 
+               mmu->func->flush(vm);
+
                nvkm_memory_del(&pgt);
        }
 }
index 54a47b40546f69c7ea0d3dbf033c22c95f106516..f96830ffd9f1c1456965810fad723ab365a7f263 100644 (file)
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
        }
 
        dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
-                       rinfo->sda_gpio, rinfo->scl_gpio);
+                       rinfo->scl_gpio, rinfo->sda_gpio);
 
        rinfo->prepare_recovery = i2c_imx_prepare_recovery;
        rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
        }
 
        /* Request IRQ */
-       ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
+       ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
                                pdev->name, i2c_imx);
        if (ret) {
                dev_err(&pdev->dev, "can't claim irq %d\n", irq);
index 22ffcb73c185f592d8b4e6bdbb1ede45cbeb4951..b51adffa484109efb842bbe75afec593ac8a1731 100644 (file)
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
                        data->word = dma_buffer[0] | (dma_buffer[1] << 8);
                        break;
                case I2C_SMBUS_BLOCK_DATA:
-               case I2C_SMBUS_I2C_BLOCK_DATA:
                        if (desc->rxbytes != dma_buffer[0] + 1)
                                return -EMSGSIZE;
 
                        memcpy(data->block, dma_buffer, desc->rxbytes);
                        break;
+               case I2C_SMBUS_I2C_BLOCK_DATA:
+                       memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+                       data->block[0] = desc->rxbytes;
+                       break;
                }
                return 0;
        }
index 1ebb5e947e0b6625fcf0cda7a71f51e79ac29178..23c2ea2baedc07ee15dfab3e9ea0ce0629587374 100644 (file)
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
        unsigned long fclk_rate = 12000000;
        unsigned long internal_clk = 0;
        struct clk *fclk;
+       int error;
 
        if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
                /*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
                 * do this bit unconditionally.
                 */
                fclk = clk_get(omap->dev, "fck");
+               if (IS_ERR(fclk)) {
+                       error = PTR_ERR(fclk);
+                       dev_err(omap->dev, "could not get fck: %i\n", error);
+
+                       return error;
+               }
+
                fclk_rate = clk_get_rate(fclk);
                clk_put(fclk);
 
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
                else
                        internal_clk = 4000;
                fclk = clk_get(omap->dev, "fck");
+               if (IS_ERR(fclk)) {
+                       error = PTR_ERR(fclk);
+                       dev_err(omap->dev, "could not get fck: %i\n", error);
+
+                       return error;
+               }
                fclk_rate = clk_get_rate(fclk) / 1000;
                clk_put(fclk);
 
index 0ecdb47a23abcbf9691bf809b126d72d6c3a46f8..174579d32e5f39ecdc44d2c230b55fbfb5d073e2 100644 (file)
@@ -85,6 +85,9 @@
 /* SB800 constants */
 #define SB800_PIIX4_SMB_IDX            0xcd6
 
+#define KERNCZ_IMC_IDX                 0x3e
+#define KERNCZ_IMC_DATA                        0x3f
+
 /*
  * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
  * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
 #define SB800_PIIX4_PORT_IDX_ALT       0x2e
 #define SB800_PIIX4_PORT_IDX_SEL       0x2f
 #define SB800_PIIX4_PORT_IDX_MASK      0x06
+#define SB800_PIIX4_PORT_IDX_SHIFT     1
+
+/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ            0x02
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ       0x18
+#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ      3
 
 /* insmod parameters */
 
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
  */
 static DEFINE_MUTEX(piix4_mutex_sb800);
 static u8 piix4_port_sel_sb800;
+static u8 piix4_port_mask_sb800;
+static u8 piix4_port_shift_sb800;
 static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
        " port 0", " port 2", " port 3", " port 4"
 };
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
 
        /* SB800 */
        bool sb800_main;
+       bool notify_imc;
        u8 port;                /* Port number, shifted */
 };
 
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
 
        /* Find which register is used for port selection */
        if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
-               piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+               switch (PIIX4_dev->device) {
+               case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+                       piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+                       piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+                       piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+                       break;
+               case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+               default:
+                       piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+                       piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+                       piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+                       break;
+               }
        } else {
                mutex_lock(&piix4_mutex_sb800);
                outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
                piix4_port_sel_sb800 = (port_sel & 0x01) ?
                                       SB800_PIIX4_PORT_IDX_ALT :
                                       SB800_PIIX4_PORT_IDX;
+               piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+               piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
                mutex_unlock(&piix4_mutex_sb800);
        }
 
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
        return 0;
 }
 
+static uint8_t piix4_imc_read(uint8_t idx)
+{
+       outb_p(idx, KERNCZ_IMC_IDX);
+       return inb_p(KERNCZ_IMC_DATA);
+}
+
+static void piix4_imc_write(uint8_t idx, uint8_t value)
+{
+       outb_p(idx, KERNCZ_IMC_IDX);
+       outb_p(value, KERNCZ_IMC_DATA);
+}
+
+static int piix4_imc_sleep(void)
+{
+       int timeout = MAX_TIMEOUT;
+
+       if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+               return -EBUSY;
+
+       /* clear response register */
+       piix4_imc_write(0x82, 0x00);
+       /* request ownership flag */
+       piix4_imc_write(0x83, 0xB4);
+       /* kick off IMC Mailbox command 96 */
+       piix4_imc_write(0x80, 0x96);
+
+       while (timeout--) {
+               if (piix4_imc_read(0x82) == 0xfa) {
+                       release_region(KERNCZ_IMC_IDX, 2);
+                       return 0;
+               }
+               usleep_range(1000, 2000);
+       }
+
+       release_region(KERNCZ_IMC_IDX, 2);
+       return -ETIMEDOUT;
+}
+
+static void piix4_imc_wakeup(void)
+{
+       int timeout = MAX_TIMEOUT;
+
+       if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+               return;
+
+       /* clear response register */
+       piix4_imc_write(0x82, 0x00);
+       /* release ownership flag */
+       piix4_imc_write(0x83, 0xB5);
+       /* kick off IMC Mailbox command 96 */
+       piix4_imc_write(0x80, 0x96);
+
+       while (timeout--) {
+               if (piix4_imc_read(0x82) == 0xfa)
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       release_region(KERNCZ_IMC_IDX, 2);
+}
+
 /*
  * Handles access to multiple SMBus ports on the SB800.
  * The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
                return -EBUSY;
        }
 
+       /*
+        * Notify the IMC (Integrated Micro Controller) if required.
+        * Among other responsibilities, the IMC is in charge of monitoring
+        * the System fans and temperature sensors, and act accordingly.
+        * All this is done through SMBus and can/will collide
+        * with our transactions if they are long (BLOCK_DATA).
+        * Therefore we need to request the ownership flag during those
+        * transactions.
+        */
+       if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
+               int ret;
+
+               ret = piix4_imc_sleep();
+               switch (ret) {
+               case -EBUSY:
+                       dev_warn(&adap->dev,
+                                "IMC base address index region 0x%x already in use.\n",
+                                KERNCZ_IMC_IDX);
+                       break;
+               case -ETIMEDOUT:
+                       dev_warn(&adap->dev,
+                                "Failed to communicate with the IMC.\n");
+                       break;
+               default:
+                       break;
+               }
+
+               /* If IMC communication fails do not retry */
+               if (ret) {
+                       dev_warn(&adap->dev,
+                                "Continuing without IMC notification.\n");
+                       adapdata->notify_imc = false;
+               }
+       }
+
        outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
 
        port = adapdata->port;
-       if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
-               outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
+       if ((smba_en_lo & piix4_port_mask_sb800) != port)
+               outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
                       SB800_PIIX4_SMB_IDX + 1);
 
        retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
        /* Release the semaphore */
        outb_p(smbslvcnt | 0x20, SMBSLVCNT);
 
+       if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
+               piix4_imc_wakeup();
+
        mutex_unlock(&piix4_mutex_sb800);
 
        return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
 static struct i2c_adapter *piix4_aux_adapter;
 
 static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
-                            bool sb800_main, u8 port,
+                            bool sb800_main, u8 port, bool notify_imc,
                             const char *name, struct i2c_adapter **padap)
 {
        struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
 
        adapdata->smba = smba;
        adapdata->sb800_main = sb800_main;
-       adapdata->port = port << 1;
+       adapdata->port = port << piix4_port_shift_sb800;
+       adapdata->notify_imc = notify_imc;
 
        /* set up the sysfs linkage to our parent device */
        adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
        return 0;
 }
 
-static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
+static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
+                                   bool notify_imc)
 {
        struct i2c_piix4_adapdata *adapdata;
        int port;
        int retval;
 
        for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
-               retval = piix4_add_adapter(dev, smba, true, port,
+               retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
                                           piix4_main_port_names_sb800[port],
                                           &piix4_main_adapters[port]);
                if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
             dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
             dev->revision >= 0x40) ||
            dev->vendor == PCI_VENDOR_ID_AMD) {
+               bool notify_imc = false;
                is_sb800 = true;
 
                if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return -EBUSY;
                }
 
+               if (dev->vendor == PCI_VENDOR_ID_AMD &&
+                   dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
+                       u8 imc;
+
+                       /*
+                        * Detect if IMC is active or not, this method is
+                        * described on coreboot's AMD IMC notes
+                        */
+                       pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
+                                                0x40, &imc);
+                       if (imc & 0x80)
+                               notify_imc = true;
+               }
+
                /* base address location etc changed in SB800 */
                retval = piix4_setup_sb800(dev, id, 0);
                if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                 * Try to register multiplexed main SMBus adapter,
                 * give up if we can't
                 */
-               retval = piix4_add_adapters_sb800(dev, retval);
+               retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
                if (retval < 0) {
                        release_region(SB800_PIIX4_SMB_IDX, 2);
                        return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return retval;
 
                /* Try to register main SMBus adapter, give up if we can't */
-               retval = piix4_add_adapter(dev, retval, false, 0, "",
+               retval = piix4_add_adapter(dev, retval, false, 0, false, "",
                                           &piix4_main_adapters[0]);
                if (retval < 0)
                        return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (retval > 0) {
                /* Try to add the aux adapter if it exists,
                 * piix4_add_adapter will clean up if this fails */
-               piix4_add_adapter(dev, retval, false, 0,
+               piix4_add_adapter(dev, retval, false, 0, false,
                                  is_sb800 ? piix4_aux_port_name_sb800 : "",
                                  &piix4_aux_adapter);
        }
index d268fdc23c6419e2540cc941edb66231be3a900d..762bfb9487dc961cf1c7d12a18a0d10dd3386b4c 100644 (file)
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev,
 }
 EXPORT_SYMBOL(input_set_keycode);
 
+bool input_match_device_id(const struct input_dev *dev,
+                          const struct input_device_id *id)
+{
+       if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
+               if (id->bustype != dev->id.bustype)
+                       return false;
+
+       if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
+               if (id->vendor != dev->id.vendor)
+                       return false;
+
+       if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
+               if (id->product != dev->id.product)
+                       return false;
+
+       if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
+               if (id->version != dev->id.version)
+                       return false;
+
+       if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
+           !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
+           !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
+           !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
+           !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
+           !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
+           !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
+           !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
+           !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
+           !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(input_match_device_id);
+
 static const struct input_device_id *input_match_device(struct input_handler *handler,
                                                        struct input_dev *dev)
 {
        const struct input_device_id *id;
 
        for (id = handler->id_table; id->flags || id->driver_info; id++) {
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
-                       if (id->bustype != dev->id.bustype)
-                               continue;
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
-                       if (id->vendor != dev->id.vendor)
-                               continue;
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
-                       if (id->product != dev->id.product)
-                               continue;
-
-               if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
-                       if (id->version != dev->id.version)
-                               continue;
-
-               if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
-                       continue;
-
-               if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
-                       continue;
-
-               if (!handler->match || handler->match(handler, dev))
+               if (input_match_device_id(dev, id) &&
+                   (!handler->match || handler->match(handler, dev))) {
                        return id;
+               }
        }
 
        return NULL;
index 29d677c714d25fa2fca5d41713f32a5a9fe3e64f..7b29a894403981c233120fc282f62d93f3d5f71f 100644 (file)
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev)
                input_close_device(handle);
 }
 
+/*
+ * These codes are copied from from hid-ids.h, unfortunately there is no common
+ * usb_ids/bt_ids.h header.
+ */
+#define USB_VENDOR_ID_SONY                     0x054c
+#define USB_DEVICE_ID_SONY_PS3_CONTROLLER              0x0268
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER              0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2            0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE       0x0ba0
+
+#define USB_VENDOR_ID_THQ                      0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW                    0xcb17
+
+#define ACCEL_DEV(vnd, prd)                                            \
+       {                                                               \
+               .flags = INPUT_DEVICE_ID_MATCH_VENDOR |                 \
+                               INPUT_DEVICE_ID_MATCH_PRODUCT |         \
+                               INPUT_DEVICE_ID_MATCH_PROPBIT,          \
+               .vendor = (vnd),                                        \
+               .product = (prd),                                       \
+               .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) },      \
+       }
+
+static const struct input_device_id joydev_blacklist[] = {
+       /* Avoid touchpads and touchscreens */
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                               INPUT_DEVICE_ID_MATCH_KEYBIT,
+               .evbit = { BIT_MASK(EV_KEY) },
+               .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+       },
+       /* Avoid tablets, digitisers and similar devices */
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                               INPUT_DEVICE_ID_MATCH_KEYBIT,
+               .evbit = { BIT_MASK(EV_KEY) },
+               .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
+       },
+       /* Disable accelerometers on composite devices */
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+       ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+       ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
+       { /* sentinel */ }
+};
+
+static bool joydev_dev_is_blacklisted(struct input_dev *dev)
+{
+       const struct input_device_id *id;
+
+       for (id = joydev_blacklist; id->flags; id++) {
+               if (input_match_device_id(dev, id)) {
+                       dev_dbg(&dev->dev,
+                               "joydev: blacklisting '%s'\n", dev->name);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
 {
        DECLARE_BITMAP(jd_scratch, KEY_CNT);
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
 
 static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
 {
-       /* Avoid touchpads and touchscreens */
-       if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit))
-               return false;
-
-       /* Avoid tablets, digitisers and similar devices */
-       if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
+       /* Disable blacklisted devices */
+       if (joydev_dev_is_blacklisted(dev))
                return false;
 
        /* Avoid absolute mice */
index e37e335e406ffbb13d4fa323705373fa2fa58d16..6da607d3b81172a38f95c672e37976bedc8d097d 100644 (file)
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
 static int tca8418_configure(struct tca8418_keypad *keypad_data,
                             u32 rows, u32 cols)
 {
-       int reg, error;
-
-       /* Write config register, if this fails assume device not present */
-       error = tca8418_write_byte(keypad_data, REG_CFG,
-                               CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
-       if (error < 0)
-               return -ENODEV;
-
+       int reg, error = 0;
 
        /* Assemble a mask for row and column registers */
        reg  =  ~(~0 << rows);
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data,
        error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
        error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
 
+       if (error)
+               return error;
+
+       error = tca8418_write_byte(keypad_data, REG_CFG,
+                               CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+
        return error;
 }
 
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
        struct input_dev *input;
        u32 rows = 0, cols = 0;
        int error, row_shift, max_keys;
+       u8 reg;
 
        /* Check i2c driver capabilities */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client,
        keypad_data->client = client;
        keypad_data->row_shift = row_shift;
 
-       /* Initialize the chip or fail if chip isn't present */
-       error = tca8418_configure(keypad_data, rows, cols);
-       if (error < 0)
-               return error;
+       /* Read key lock register, if this fails assume device not present */
+       error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, &reg);
+       if (error)
+               return -ENODEV;
 
        /* Configure input device */
        input = devm_input_allocate_device(dev);
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
                return error;
        }
 
+       /* Initialize the chip */
+       error = tca8418_configure(keypad_data, rows, cols);
+       if (error < 0)
+               return error;
+
        error = input_register_device(input);
        if (error) {
                dev_err(dev, "Unable to register input device, error: %d\n",
index 6cee5adc3b5cfa1ec322db10c49d84f1bcbc2ae3..debeeaeb88127baae0a65dba44875084af034945 100644 (file)
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = {
        },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(platform, axp_pek_id_match);
 
 static struct platform_driver axp20x_pek_driver = {
        .probe          = axp20x_pek_probe,
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver);
 MODULE_DESCRIPTION("axp20x Power Button");
 MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:axp20x-pek");
index 6bf82ea8c918adcbbba700100ead7d270a60695c..ae473123583bb22bedbb33426f11eb8f802ea368 100644 (file)
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
                return NULL;
        }
 
-       while (buflen > 0) {
+       while (buflen >= sizeof(*union_desc)) {
                union_desc = (struct usb_cdc_union_desc *)buf;
 
+               if (union_desc->bLength > buflen) {
+                       dev_err(&intf->dev, "Too large descriptor\n");
+                       return NULL;
+               }
+
                if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
                    union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
                        dev_dbg(&intf->dev, "Found union header\n");
-                       return union_desc;
+
+                       if (union_desc->bLength >= sizeof(*union_desc))
+                               return union_desc;
+
+                       dev_err(&intf->dev,
+                               "Union descriptor to short (%d vs %zd\n)",
+                               union_desc->bLength, sizeof(*union_desc));
+                       return NULL;
                }
 
                buflen -= union_desc->bLength;
index 5af0b7d200bc23ff0f2287d6e0b69a631fb8bb22..ee5466a374bf0f370000cf002d94bf2514d02318 100644 (file)
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
                .sensor_pdata = {
                        .sensor_type = rmi_sensor_touchpad,
                        .axis_align.flip_y = true,
-                       /* to prevent cursors jumps: */
-                       .kernel_tracking = true,
+                       .kernel_tracking = false,
                        .topbuttonpad = topbuttonpad,
                },
                .f30_data = {
index 32d2762448aa24168c9dd0ad38c8c9eb1ec13ff3..b3bbad7d228296118f35a2d4bff7c295b5e9839c 100644 (file)
@@ -72,6 +72,9 @@ struct goodix_ts_data {
 #define GOODIX_REG_CONFIG_DATA         0x8047
 #define GOODIX_REG_ID                  0x8140
 
+#define GOODIX_BUFFER_STATUS_READY     BIT(7)
+#define GOODIX_BUFFER_STATUS_TIMEOUT   20
+
 #define RESOLUTION_LOC         1
 #define MAX_CONTACTS_LOC       5
 #define TRIGGER_LOC            6
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id)
 
 static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
 {
+       unsigned long max_timeout;
        int touch_num;
        int error;
 
-       error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data,
-                               GOODIX_CONTACT_SIZE + 1);
-       if (error) {
-               dev_err(&ts->client->dev, "I2C transfer error: %d\n", error);
-               return error;
-       }
+       /*
+        * The 'buffer status' bit, which indicates that the data is valid, is
+        * not set as soon as the interrupt is raised, but slightly after.
+        * This takes around 10 ms to happen, so we poll for 20 ms.
+        */
+       max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
+       do {
+               error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
+                                       data, GOODIX_CONTACT_SIZE + 1);
+               if (error) {
+                       dev_err(&ts->client->dev, "I2C transfer error: %d\n",
+                                       error);
+                       return error;
+               }
 
-       if (!(data[0] & 0x80))
-               return -EAGAIN;
+               if (data[0] & GOODIX_BUFFER_STATUS_READY) {
+                       touch_num = data[0] & 0x0f;
+                       if (touch_num > ts->max_touch_num)
+                               return -EPROTO;
+
+                       if (touch_num > 1) {
+                               data += 1 + GOODIX_CONTACT_SIZE;
+                               error = goodix_i2c_read(ts->client,
+                                               GOODIX_READ_COOR_ADDR +
+                                                       1 + GOODIX_CONTACT_SIZE,
+                                               data,
+                                               GOODIX_CONTACT_SIZE *
+                                                       (touch_num - 1));
+                               if (error)
+                                       return error;
+                       }
+
+                       return touch_num;
+               }
 
-       touch_num = data[0] & 0x0f;
-       if (touch_num > ts->max_touch_num)
-               return -EPROTO;
-
-       if (touch_num > 1) {
-               data += 1 + GOODIX_CONTACT_SIZE;
-               error = goodix_i2c_read(ts->client,
-                                       GOODIX_READ_COOR_ADDR +
-                                               1 + GOODIX_CONTACT_SIZE,
-                                       data,
-                                       GOODIX_CONTACT_SIZE * (touch_num - 1));
-               if (error)
-                       return error;
-       }
+               usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
+       } while (time_before(jiffies, max_timeout));
 
-       return touch_num;
+       /*
+        * The Goodix panel will send spurious interrupts after a
+        * 'finger up' event, which will always cause a timeout.
+        */
+       return 0;
 }
 
 static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
index 157fdb4bb2e8ba931697d18281afb3887beceaca..8c6c6178ec12fd26ed9832311419da243ed4c154 100644 (file)
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
        sdata->input->open = stmfts_input_open;
        sdata->input->close = stmfts_input_close;
 
+       input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
+       input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
        touchscreen_parse_properties(sdata->input, true, &sdata->prop);
 
-       input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
-                                               sdata->prop.max_x, 0, 0);
-       input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
-                                               sdata->prop.max_y, 0, 0);
        input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
        input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
        input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
index 7953381d939ab49f26f460bf7bac808600cb2549..f1043ae71dccb5c4a10126279ff0b1a3c6142089 100644 (file)
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev)
                break;
        case 5:
                config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 |
-                               ts_dev->bit_xn | ts_dev->bit_yp;
+                               STEPCONFIG_XNP | STEPCONFIG_YPN;
                break;
        case 8:
                config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp);
index e8d89343d6139424e2caf0baa112955b7005b2e4..e88395605e32dc32960974a4e8707a30037b107b 100644 (file)
@@ -107,6 +107,10 @@ struct its_node {
 
 #define ITS_ITT_ALIGN          SZ_256
 
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS     (16)
+#define ITS_MAX_VPEID          (1 << (ITS_MAX_VPEID_BITS))
+
 /* Convert page order to size in bytes */
 #define PAGE_ORDER_TO_SIZE(o)  (PAGE_SIZE << (o))
 
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size)
 
 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
 {
-       its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
+       its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
 }
 
 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid)
 
 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
 {
-       its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
+       its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
 }
 
 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
 
 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
 {
-       its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+       its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
 }
 
 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
        u64 val = its_read_baser(its, baser);
        u64 esz = GITS_BASER_ENTRY_SIZE(val);
        u64 type = GITS_BASER_TYPE(val);
+       u64 baser_phys, tmp;
        u32 alloc_pages;
        void *base;
-       u64 tmp;
 
 retry_alloc_baser:
        alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
@@ -1496,8 +1500,24 @@ retry_alloc_baser:
        if (!base)
                return -ENOMEM;
 
+       baser_phys = virt_to_phys(base);
+
+       /* Check if the physical address of the memory is above 48bits */
+       if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+
+               /* 52bit PA is supported only when PageSize=64K */
+               if (psz != SZ_64K) {
+                       pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+                       free_pages((unsigned long)base, order);
+                       return -ENXIO;
+               }
+
+               /* Convert 52bit PA to 48bit field */
+               baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+       }
+
 retry_baser:
-       val = (virt_to_phys(base)                                |
+       val = (baser_phys                                        |
                (type << GITS_BASER_TYPE_SHIFT)                  |
                ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)       |
                ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
@@ -1582,13 +1602,12 @@ retry_baser:
 
 static bool its_parse_indirect_baser(struct its_node *its,
                                     struct its_baser *baser,
-                                    u32 psz, u32 *order)
+                                    u32 psz, u32 *order, u32 ids)
 {
        u64 tmp = its_read_baser(its, baser);
        u64 type = GITS_BASER_TYPE(tmp);
        u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
        u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
-       u32 ids = its->device_ids;
        u32 new_order = *order;
        bool indirect = false;
 
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its)
                        continue;
 
                case GITS_BASER_TYPE_DEVICE:
+                       indirect = its_parse_indirect_baser(its, baser,
+                                                           psz, &order,
+                                                           its->device_ids);
                case GITS_BASER_TYPE_VCPU:
                        indirect = its_parse_indirect_baser(its, baser,
-                                                           psz, &order);
+                                                           psz, &order,
+                                                           ITS_MAX_VPEID_BITS);
                        break;
                }
 
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = {
 
 static int its_vpe_id_alloc(void)
 {
-       return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+       return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
 }
 
 static void its_vpe_id_free(u16 id)
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void)
                return -ENOMEM;
        }
 
-       BUG_ON(entries != vpe_proxy.dev->nr_ites);
+       BUG_ON(entries > vpe_proxy.dev->nr_ites);
 
        raw_spin_lock_init(&vpe_proxy.lock);
        vpe_proxy.next_victim = 0;
index bdbb5c0ff7fe3bd956c1c9d12364aee4cd8c5eba..0c085303a58302cc982f69e52b0a5ea8e5c60c25 100644 (file)
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
        for (i = 0; i < 2; i++) {
                ct[i].chip.irq_ack = irq_gc_ack_set_bit;
                ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
-               ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+               ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
                ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
                ct[i].chip.irq_set_type = tangox_irq_set_type;
                ct[i].chip.name = gc->domain->name;
index eed6c397d8400b0a25c57feb6ef23dc49eac71df..f8a808d45034e048f9b0bc2c764c51d95e22bdc9 100644 (file)
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
         */
        switch (msg->msg[1]) {
        case CEC_MSG_GET_CEC_VERSION:
-       case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
        case CEC_MSG_ABORT:
        case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
-       case CEC_MSG_GIVE_PHYSICAL_ADDR:
        case CEC_MSG_GIVE_OSD_NAME:
+               /*
+                * These messages reply with a directed message, so ignore if
+                * the initiator is Unregistered.
+                */
+               if (!adap->passthrough && from_unregistered)
+                       return 0;
+               /* Fall through */
+       case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
        case CEC_MSG_GIVE_FEATURES:
+       case CEC_MSG_GIVE_PHYSICAL_ADDR:
                /*
                 * Skip processing these messages if the passthrough mode
                 * is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
                if (adap->passthrough)
                        goto skip_processing;
                /* Ignore if addressing is wrong */
-               if (is_broadcast || from_unregistered)
+               if (is_broadcast)
                        return 0;
                break;
 
index 2fcba16161685888164565bb24e0e917024092eb..9139d01ba7ed6c9470896dea8500a433d9c05240 100644 (file)
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
 static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
                                        void (*release)(struct dvb_frontend *fe));
 
-static void dvb_frontend_free(struct kref *ref)
+static void __dvb_frontend_free(struct dvb_frontend *fe)
 {
-       struct dvb_frontend *fe =
-               container_of(ref, struct dvb_frontend, refcount);
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
 
+       if (!fepriv)
+               return;
+
        dvb_free_device(fepriv->dvbdev);
 
        dvb_frontend_invoke_release(fe, fe->ops.release);
 
        kfree(fepriv);
+       fe->frontend_priv = NULL;
+}
+
+static void dvb_frontend_free(struct kref *ref)
+{
+       struct dvb_frontend *fe =
+               container_of(ref, struct dvb_frontend, refcount);
+
+       __dvb_frontend_free(fe);
 }
 
 static void dvb_frontend_put(struct dvb_frontend *fe)
 {
-       kref_put(&fe->refcount, dvb_frontend_free);
+       /*
+        * Check if the frontend was registered, as otherwise
+        * kref was not initialized yet.
+        */
+       if (fe->frontend_priv)
+               kref_put(&fe->refcount, dvb_frontend_free);
+       else
+               __dvb_frontend_free(fe);
 }
 
 static void dvb_frontend_get(struct dvb_frontend *fe)
index 224283fe100a8fe6f6a3f17f52cfbff2a723b15e..4d086a7248e9b2508905cd026038793dc7882241 100644 (file)
@@ -55,29 +55,57 @@ struct dib3000mc_state {
 
 static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
 {
-       u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
-       u8 rb[2];
        struct i2c_msg msg[2] = {
-               { .addr = state->i2c_addr >> 1, .flags = 0,        .buf = wb, .len = 2 },
-               { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+               { .addr = state->i2c_addr >> 1, .flags = 0,        .len = 2 },
+               { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
        };
+       u16 word;
+       u8 *b;
+
+       b = kmalloc(4, GFP_KERNEL);
+       if (!b)
+               return 0;
+
+       b[0] = (reg >> 8) | 0x80;
+       b[1] = reg;
+       b[2] = 0;
+       b[3] = 0;
+
+       msg[0].buf = b;
+       msg[1].buf = b + 2;
 
        if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
                dprintk("i2c read error on %d\n",reg);
 
-       return (rb[0] << 8) | rb[1];
+       word = (b[2] << 8) | b[3];
+       kfree(b);
+
+       return word;
 }
 
 static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
 {
-       u8 b[4] = {
-               (reg >> 8) & 0xff, reg & 0xff,
-               (val >> 8) & 0xff, val & 0xff,
-       };
        struct i2c_msg msg = {
-               .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+               .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
        };
-       return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+       int rc;
+       u8 *b;
+
+       b = kmalloc(4, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       b[0] = reg >> 8;
+       b[1] = reg;
+       b[2] = val >> 8;
+       b[3] = val;
+
+       msg.buf = b;
+
+       rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+       kfree(b);
+
+       return rc;
 }
 
 static int dib3000mc_identify(struct dib3000mc_state *state)
index 7bec3e028beec10e188fea4d9f53cc40556f8ddf..5553b89b804e7d4219d6c767b96fbdb5006cce1b 100644 (file)
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                                    struct i2c_adapter *i2c,
                                    unsigned int pll_desc_id)
 {
-       u8 b1 [] = { 0 };
-       struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
-                              .buf = b1, .len = 1 };
+       u8 *b1;
+       struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
        struct dvb_pll_priv *priv = NULL;
        int ret;
        const struct dvb_pll_desc *desc;
 
+       b1 = kmalloc(1, GFP_KERNEL);
+       if (!b1)
+               return NULL;
+
+       b1[0] = 0;
+       msg.buf = b1;
+
        if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
            (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
                pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                        fe->ops.i2c_gate_ctrl(fe, 1);
 
                ret = i2c_transfer (i2c, &msg, 1);
-               if (ret != 1)
+               if (ret != 1) {
+                       kfree(b1);
                        return NULL;
+               }
                if (fe->ops.i2c_gate_ctrl)
                             fe->ops.i2c_gate_ctrl(fe, 0);
        }
 
        priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
-       if (priv == NULL)
+       if (!priv) {
+               kfree(b1);
                return NULL;
+       }
 
        priv->pll_i2c_address = pll_addr;
        priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                                "insmod option" : "autodetected");
        }
 
+       kfree(b1);
+
        return fe;
 }
 EXPORT_SYMBOL(dvb_pll_attach);
index 7e7cc49b867400093ba038baf9e14773970f15f4..3c4f7fa7b9d8ea06e7b1455ce3e0172d17322483 100644 (file)
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
 
 config VIDEO_QCOM_CAMSS
        tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
-       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
        depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
        select VIDEOBUF2_DMA_SG
        select V4L2_FWNODE
index b21b3c2dc77f2bb12f617f198a928a58cc17b592..b22d2dfcd3c29ec85c474948f67d0805571db3ab 100644 (file)
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
  *
  * Return -EINVAL or zero on success
  */
-int vfe_set_selection(struct v4l2_subdev *sd,
+static int vfe_set_selection(struct v4l2_subdev *sd,
                             struct v4l2_subdev_pad_config *cfg,
                             struct v4l2_subdev_selection *sel)
 {
index 68933d20806338629a89bdde9a5b05219ef2f5ac..9b2a401a4891c49e1388783cbf4111bf6df26af6 100644 (file)
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
                        hfi_session_abort(inst);
 
                load_scale_clocks(core);
+               INIT_LIST_HEAD(&inst->registeredbufs);
        }
 
        venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
index 1edf667d562a4df64a4806e947f192aa2a24357f..146ae6f25cdbb1eee53837aa1ea28c503163ed28 100644 (file)
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
 {
        u32 status = 0;
 
-       status = readb(cec->reg + S5P_CEC_STATUS_0);
+       status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
+       status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
        status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
        status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
        status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
index 58d200e7c8382de8edc841b17b1022a2c15ddb85..8837e2678bdeb3507e4ebce501e5ee58216334ad 100644 (file)
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
        dev_dbg(cec->dev, "irq received\n");
 
        if (status & CEC_STATUS_TX_DONE) {
-               if (status & CEC_STATUS_TX_ERROR) {
+               if (status & CEC_STATUS_TX_NACK) {
+                       dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
+                       cec->tx = STATE_NACK;
+               } else if (status & CEC_STATUS_TX_ERROR) {
                        dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
                        cec->tx = STATE_ERROR;
                } else {
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
                cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
                cec->tx = STATE_IDLE;
                break;
+       case STATE_NACK:
+               cec_transmit_done(cec->adap,
+                       CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
+                       0, 1, 0, 0);
+               cec->tx = STATE_IDLE;
+               break;
        case STATE_ERROR:
                cec_transmit_done(cec->adap,
                        CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
index 8bcd8dc1aeb9fb299d5b821511c9b1eba02d3c6a..86ded522ef27319a27fb72bd9f5285be6ac1ad8f 100644 (file)
@@ -35,6 +35,7 @@
 #define CEC_STATUS_TX_TRANSFERRING     (1 << 1)
 #define CEC_STATUS_TX_DONE             (1 << 2)
 #define CEC_STATUS_TX_ERROR            (1 << 3)
+#define CEC_STATUS_TX_NACK             (1 << 4)
 #define CEC_STATUS_TX_BYTES            (0xFF << 8)
 #define CEC_STATUS_RX_RUNNING          (1 << 16)
 #define CEC_STATUS_RX_RECEIVING                (1 << 17)
@@ -55,6 +56,7 @@ enum cec_state {
        STATE_IDLE,
        STATE_BUSY,
        STATE_DONE,
+       STATE_NACK,
        STATE_ERROR
 };
 
index 2e487f9a2cc3fb678aa173b93a9bb141cc23ba69..4983eeb39f369c961cfdf86b69a870d98727e736 100644 (file)
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
 static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
 {
        struct i2c_msg msg[2] = {
-               { .addr = priv->cfg->i2c_address, .flags = 0,        .buf = &reg, .len = 1 },
-               { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val,  .len = 1 },
+               { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
+               { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
        };
+       int rc = 0;
+       u8 *b;
+
+       b = kmalloc(2, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       b[0] = reg;
+       b[1] = 0;
+
+       msg[0].buf = b;
+       msg[1].buf = b + 1;
 
        if (i2c_transfer(priv->i2c, msg, 2) != 2) {
                printk(KERN_WARNING "mt2060 I2C read failed\n");
-               return -EREMOTEIO;
+               rc = -EREMOTEIO;
        }
-       return 0;
+       *val = b[1];
+       kfree(b);
+
+       return rc;
 }
 
 // Writes a single register
 static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
 {
-       u8 buf[2] = { reg, val };
        struct i2c_msg msg = {
-               .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2
+               .addr = priv->cfg->i2c_address, .flags = 0, .len = 2
        };
+       u8 *buf;
+       int rc = 0;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       buf[0] = reg;
+       buf[1] = val;
+
+       msg.buf = buf;
 
        if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
                printk(KERN_WARNING "mt2060 I2C write failed\n");
-               return -EREMOTEIO;
+               rc = -EREMOTEIO;
        }
-       return 0;
+       kfree(buf);
+       return rc;
 }
 
 // Writes a set of consecutive registers
 static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
 {
        int rem, val_len;
-       u8 xfer_buf[16];
+       u8 *xfer_buf;
+       int rc = 0;
        struct i2c_msg msg = {
-               .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf
+               .addr = priv->cfg->i2c_address, .flags = 0
        };
 
+       xfer_buf = kmalloc(16, GFP_KERNEL);
+       if (!xfer_buf)
+               return -ENOMEM;
+
+       msg.buf = xfer_buf;
+
        for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
                val_len = min_t(int, rem, priv->i2c_max_regs);
                msg.len = 1 + val_len;
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
 
                if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
                        printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
-                       return -EREMOTEIO;
+                       rc = -EREMOTEIO;
+                       break;
                }
        }
 
-       return 0;
+       kfree(xfer_buf);
+       return rc;
 }
 
 // Initialisation sequences
index d0ccc6729fd29e734996b28538cd6e8e1745bf2b..67d787fa330651738ce8c2bf4e58fd8eebcf6e2a 100644 (file)
@@ -448,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
        int err;
        u32 val;
 
+       intel_host->d3_retune = true;
+
        err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
        if (err) {
                pr_debug("%s: DSM not supported, error %d\n",
index 13f0f219d8aa83ab0ce52823f6ddb697d58c316b..a13a4896a8bddad19ae48f8c58bbaf2f3c8dce84 100644 (file)
 /* FLEXCAN hardware feature flags
  *
  * Below is some version info we got:
- *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT Memory err RTR re-
- *                                Filter? connected?  detection  ception in MB
- *   MX25  FlexCAN2  03.00.00.00     no        no         no        no
- *   MX28  FlexCAN2  03.00.04.00    yes       yes         no        no
- *   MX35  FlexCAN2  03.00.00.00     no        no         no        no
- *   MX53  FlexCAN2  03.00.00.00    yes        no         no        no
- *   MX6s  FlexCAN3  10.00.12.00    yes       yes         no       yes
- *   VF610 FlexCAN3  ?               no       yes        yes       yes?
+ *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
+ *                                Filter? connected?  Passive detection  ception in MB
+ *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
+ *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
+ *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
-#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE        BIT(1) /* [TR]WRN_INT not connected */
 #define FLEXCAN_QUIRK_DISABLE_RXFG     BIT(2) /* Disable RX FIFO Global mask */
 #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
 #define FLEXCAN_QUIRK_DISABLE_MECR     BIT(4) /* Disable Memory error detection */
 #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP        BIT(5) /* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE        BIT(6) /* No interrupt for error passive */
 
 /* Structure of the message buffer */
 struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
 };
 
 static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
-       .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
+       .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
-static const struct flexcan_devtype_data fsl_imx28_devtype_data;
+static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
+       .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
 
 static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
        .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
-               FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+               FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
 static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
 }
 #endif
 
+static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->regs;
+       u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
+
+       flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->regs;
+       u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
+
+       flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
 static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
 {
        if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
        struct flexcan_regs __iomem *regs = priv->regs;
        irqreturn_t handled = IRQ_NONE;
        u32 reg_iflag1, reg_esr;
+       enum can_state last_state = priv->can.state;
 
        reg_iflag1 = flexcan_read(&regs->iflag1);
 
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
        }
 
-       /* state change interrupt */
-       if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+       /* state change interrupt or broken error state quirk fix is enabled */
+       if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
+           (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+                                          FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
                flexcan_irq_state(dev, reg_esr);
 
        /* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
            (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
                flexcan_irq_bus_err(dev, reg_esr);
 
+       /* availability of error interrupt among state transitions in case
+        * bus error reporting is de-activated and
+        * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
+        *  +--------------------------------------------------------------+
+        *  | +----------------------------------------------+ [stopped /  |
+        *  | |                                              |  sleeping] -+
+        *  +-+-> active <-> warning <-> passive -> bus off -+
+        *        ___________^^^^^^^^^^^^_______________________________
+        *        disabled(1)  enabled             disabled
+        *
+        * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
+        */
+       if ((last_state != priv->can.state) &&
+           (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
+           !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
+               switch (priv->can.state) {
+               case CAN_STATE_ERROR_ACTIVE:
+                       if (priv->devtype_data->quirks &
+                           FLEXCAN_QUIRK_BROKEN_WERR_STATE)
+                               flexcan_error_irq_enable(priv);
+                       else
+                               flexcan_error_irq_disable(priv);
+                       break;
+
+               case CAN_STATE_ERROR_WARNING:
+                       flexcan_error_irq_enable(priv);
+                       break;
+
+               case CAN_STATE_ERROR_PASSIVE:
+               case CAN_STATE_BUS_OFF:
+                       flexcan_error_irq_disable(priv);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
        return handled;
 }
 
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
         * on most Flexcan cores, too. Otherwise we don't get
         * any error warning or passive interrupts.
         */
-       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE ||
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
            priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
                reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
        else
index be928ce62d32e01c9d6ad9ac1fbcb46916f492ec..9fdb0f0bfa06a00a2ade5e74daef15798ab7d4a1 100644 (file)
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
                }
 
                cf->can_id = id & ESD_IDMASK;
-               cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+               cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
 
                if (id & ESD_EXTID)
                        cf->can_id |= CAN_EFF_FLAG;
index afcc1312dbaf8f67bce640e17dbc20c06592dc45..68ac3e88a8cecbe5b4a58da8491756ad5c26039a 100644 (file)
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
 
                gs_free_tx_context(txc);
 
+               atomic_dec(&dev->active_tx_urbs);
+
                netif_wake_queue(netdev);
        }
 
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
                          urb->transfer_buffer_length,
                          urb->transfer_buffer,
                          urb->transfer_dma);
-
-       atomic_dec(&dev->active_tx_urbs);
-
-       if (!netif_device_present(netdev))
-               return;
-
-       if (netif_queue_stopped(netdev))
-               netif_wake_queue(netdev);
 }
 
 static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
index dce7fa57eb553a477faadd3f15d6f0e1dcf394af..f123ed57630d59815156c46b750d987bb0171966 100644 (file)
@@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds)
 
 static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
 {
-       /* Use the same MAC Address as FD Pause frames for all ports */
-       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
+       u16 val = addr[0] << 8 | addr[1];
+
+       /* The multicast bit is always transmitted as a zero, so the switch uses
+        * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
+        */
+       val &= 0xfeff;
+
+       REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
        REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
        REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
 
index b1212debc2e1c993f662ee35dcbbf6b5e5e3ac54..967020fb26ee17a6995a7ae11df668e8ab4fe9f0 100644 (file)
@@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev,
 {
        struct ena_adapter *adapter = netdev_priv(netdev);
 
-       channels->max_rx = ENA_MAX_NUM_IO_QUEUES;
-       channels->max_tx = ENA_MAX_NUM_IO_QUEUES;
+       channels->max_rx = adapter->num_queues;
+       channels->max_tx = adapter->num_queues;
        channels->max_other = 0;
        channels->max_combined = 0;
        channels->rx_count = adapter->num_queues;
index f7dc22f65d9f64ac50cd641d30e4d83f88fe5bba..c6bd5e24005d02a6c7098cdaf492e5152eac2c08 100644 (file)
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
                u64_stats_update_begin(&rx_ring->syncp);
                rx_ring->rx_stats.bad_csum++;
                u64_stats_update_end(&rx_ring->syncp);
-               netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+               netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
                          "RX IPv4 header checksum error\n");
                return;
        }
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
                        u64_stats_update_begin(&rx_ring->syncp);
                        rx_ring->rx_stats.bad_csum++;
                        u64_stats_update_end(&rx_ring->syncp);
-                       netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+                       netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
                                  "RX L4 checksum error\n");
                        skb->ip_summed = CHECKSUM_NONE;
                        return;
@@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
        if (ena_dev->mem_bar)
                devm_iounmap(&pdev->dev, ena_dev->mem_bar);
 
-       devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+       if (ena_dev->reg_bar)
+               devm_iounmap(&pdev->dev, ena_dev->reg_bar);
 
        release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
        pci_release_selected_regions(pdev, release_bars);
index 0fdaaa643073afabda6d66c9ebe0ef9e3b7695ae..57e796870595bb9a305a7579154b0dbd1cbeec60 100644 (file)
 
 #define AQ_CFG_FORCE_LEGACY_INT 0U
 
-#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF   1U
-#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
+#define AQ_CFG_INTERRUPT_MODERATION_OFF                0
+#define AQ_CFG_INTERRUPT_MODERATION_ON         1
+#define AQ_CFG_INTERRUPT_MODERATION_AUTO       0xFFFFU
+
+#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
+
 #define AQ_CFG_IRQ_MASK                      0x1FFU
 
 #define AQ_CFG_VECS_MAX   8U
index a761e91471dfd264f3aba944e5324f86d4cd9e12..d5e99b46887061a216652ca150897bba49f16223 100644 (file)
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
        return aq_nic_set_link_ksettings(aq_nic, cmd);
 }
 
-/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
-static const unsigned int aq_ethtool_stat_queue_lines = 5U;
-static const unsigned int aq_ethtool_stat_queue_chars =
-       5U * ETH_GSTRING_LEN;
 static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
        "InPackets",
        "InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
        "InOctetsDma",
        "OutOctetsDma",
        "InDroppedDma",
-       "Queue[0] InPackets",
-       "Queue[0] OutPackets",
-       "Queue[0] InJumboPackets",
-       "Queue[0] InLroPackets",
-       "Queue[0] InErrors",
-       "Queue[1] InPackets",
-       "Queue[1] OutPackets",
-       "Queue[1] InJumboPackets",
-       "Queue[1] InLroPackets",
-       "Queue[1] InErrors",
-       "Queue[2] InPackets",
-       "Queue[2] OutPackets",
-       "Queue[2] InJumboPackets",
-       "Queue[2] InLroPackets",
-       "Queue[2] InErrors",
-       "Queue[3] InPackets",
-       "Queue[3] OutPackets",
-       "Queue[3] InJumboPackets",
-       "Queue[3] InLroPackets",
-       "Queue[3] InErrors",
-       "Queue[4] InPackets",
-       "Queue[4] OutPackets",
-       "Queue[4] InJumboPackets",
-       "Queue[4] InLroPackets",
-       "Queue[4] InErrors",
-       "Queue[5] InPackets",
-       "Queue[5] OutPackets",
-       "Queue[5] InJumboPackets",
-       "Queue[5] InLroPackets",
-       "Queue[5] InErrors",
-       "Queue[6] InPackets",
-       "Queue[6] OutPackets",
-       "Queue[6] InJumboPackets",
-       "Queue[6] InLroPackets",
-       "Queue[6] InErrors",
-       "Queue[7] InPackets",
-       "Queue[7] OutPackets",
-       "Queue[7] InJumboPackets",
-       "Queue[7] InLroPackets",
-       "Queue[7] InErrors",
+};
+
+static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
+       "Queue[%d] InPackets",
+       "Queue[%d] OutPackets",
+       "Queue[%d] Restarts",
+       "Queue[%d] InJumboPackets",
+       "Queue[%d] InLroPackets",
+       "Queue[%d] InErrors",
 };
 
 static void aq_ethtool_stats(struct net_device *ndev,
                             struct ethtool_stats *stats, u64 *data)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
 
-/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
-       BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
-       memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
+       memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
+                               ARRAY_SIZE(aq_ethtool_queue_stat_names) *
+                               cfg->vecs) * sizeof(u64));
        aq_nic_get_stats(aq_nic, data);
 }
 
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
 
        strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
                sizeof(drvinfo->bus_info));
-       drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
-               (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
+       drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
+               cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = regs_count;
        drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
 static void aq_ethtool_get_strings(struct net_device *ndev,
                                   u32 stringset, u8 *data)
 {
+       int i, si;
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
        struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
-
-       if (stringset == ETH_SS_STATS)
-               memcpy(data, *aq_ethtool_stat_names,
-                      sizeof(aq_ethtool_stat_names) -
-                      (AQ_CFG_VECS_MAX - cfg->vecs) *
-                      aq_ethtool_stat_queue_chars);
+       u8 *p = data;
+
+       if (stringset == ETH_SS_STATS) {
+               memcpy(p, *aq_ethtool_stat_names,
+                      sizeof(aq_ethtool_stat_names));
+               p = p + sizeof(aq_ethtool_stat_names);
+               for (i = 0; i < cfg->vecs; i++) {
+                       for (si = 0;
+                               si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
+                               si++) {
+                               snprintf(p, ETH_GSTRING_LEN,
+                                        aq_ethtool_queue_stat_names[si], i);
+                               p += ETH_GSTRING_LEN;
+                       }
+               }
+       }
 }
 
 static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
 
        switch (stringset) {
        case ETH_SS_STATS:
-               ret = ARRAY_SIZE(aq_ethtool_stat_names) -
-                       (AQ_CFG_VECS_MAX - cfg->vecs) *
-                       aq_ethtool_stat_queue_lines;
+               ret = ARRAY_SIZE(aq_ethtool_stat_names) +
+                       cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
                break;
        default:
                ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
        return err;
 }
 
+int aq_ethtool_get_coalesce(struct net_device *ndev,
+                           struct ethtool_coalesce *coal)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+       if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
+           cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
+               coal->rx_coalesce_usecs = cfg->rx_itr;
+               coal->tx_coalesce_usecs = cfg->tx_itr;
+               coal->rx_max_coalesced_frames = 0;
+               coal->tx_max_coalesced_frames = 0;
+       } else {
+               coal->rx_coalesce_usecs = 0;
+               coal->tx_coalesce_usecs = 0;
+               coal->rx_max_coalesced_frames = 1;
+               coal->tx_max_coalesced_frames = 1;
+       }
+       return 0;
+}
+
+int aq_ethtool_set_coalesce(struct net_device *ndev,
+                           struct ethtool_coalesce *coal)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+       /* This is not yet supported
+        */
+       if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
+               return -EOPNOTSUPP;
+
+       /* Atlantic only supports timing based coalescing
+        */
+       if (coal->rx_max_coalesced_frames > 1 ||
+           coal->rx_coalesce_usecs_irq ||
+           coal->rx_max_coalesced_frames_irq)
+               return -EOPNOTSUPP;
+
+       if (coal->tx_max_coalesced_frames > 1 ||
+           coal->tx_coalesce_usecs_irq ||
+           coal->tx_max_coalesced_frames_irq)
+               return -EOPNOTSUPP;
+
+       /* We do not support frame counting. Check this
+        */
+       if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
+               return -EOPNOTSUPP;
+       if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
+               return -EOPNOTSUPP;
+
+       if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
+           coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
+               return -EINVAL;
+
+       cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
+
+       cfg->rx_itr = coal->rx_coalesce_usecs;
+       cfg->tx_itr = coal->tx_coalesce_usecs;
+
+       return aq_nic_update_interrupt_moderation_settings(aq_nic);
+}
+
 const struct ethtool_ops aq_ethtool_ops = {
        .get_link            = aq_ethtool_get_link,
        .get_regs_len        = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
        .get_ethtool_stats   = aq_ethtool_stats,
        .get_link_ksettings  = aq_ethtool_get_link_ksettings,
        .set_link_ksettings  = aq_ethtool_set_link_ksettings,
+       .get_coalesce        = aq_ethtool_get_coalesce,
+       .set_coalesce        = aq_ethtool_set_coalesce,
 };
index bf9b3f020e106cb07fd7630073f146d7f54ccfa9..0207927dc8a6ab4ac76c46fb17669b7e50e7ae1e 100644 (file)
@@ -151,8 +151,7 @@ struct aq_hw_ops {
                                     [ETH_ALEN],
                                     u32 count);
 
-       int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
-                                          bool itr_enabled);
+       int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
 
        int (*hw_rss_set)(struct aq_hw_s *self,
                          struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
        int (*hw_get_regs)(struct aq_hw_s *self,
                           struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
 
+       int (*hw_update_stats)(struct aq_hw_s *self);
+
        int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
                               unsigned int *p_count);
 
index 0a5bb4114eb4ca7529bc7a8164d3d12ffa4e78c8..483e97691eeae2de4604e49cdb8fd8d60fb0dda4 100644 (file)
@@ -16,6 +16,7 @@
 #include "aq_pci_func.h"
 #include "aq_nic_internal.h"
 
+#include <linux/moduleparam.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/timer.h>
 #include <linux/tcp.h>
 #include <net/ip.h>
 
+static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
+module_param_named(aq_itr, aq_itr, uint, 0644);
+MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
+
+static unsigned int aq_itr_tx;
+module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
+
+static unsigned int aq_itr_rx;
+module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+
 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
 {
        struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
 
        cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
 
-       cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
-       cfg->itr = cfg->is_interrupt_moderation ?
-               AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
+       cfg->itr = aq_itr;
+       cfg->tx_itr = aq_itr_tx;
+       cfg->rx_itr = aq_itr_rx;
 
        cfg->is_rss = AQ_CFG_IS_RSS_DEF;
        cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
        if (err)
                return err;
 
-       if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
+       if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
                pr_info("%s: link change old %d new %d\n",
                        AQ_CFG_DRV_NAME, self->link_status.mbps,
                        self->aq_hw->aq_link_status.mbps);
+               aq_nic_update_interrupt_moderation_settings(self);
+       }
 
        self->link_status = self->aq_hw->aq_link_status;
        if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
@@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param)
        if (err)
                goto err_exit;
 
-       self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
-                   self->aq_nic_cfg.is_interrupt_moderation);
+       if (self->aq_hw_ops.hw_update_stats)
+               self->aq_hw_ops.hw_update_stats(self->aq_hw);
 
        memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
        memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
        }
        if (netif_running(ndev))
                netif_tx_disable(ndev);
+       netif_carrier_off(self->ndev);
 
        for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
                self->aq_vecs++) {
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
-       err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
-                           self->aq_nic_cfg.is_interrupt_moderation);
-       if (err < 0)
+       err = aq_nic_update_interrupt_moderation_settings(self);
+       if (err)
                goto err_exit;
        setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
                    (unsigned long)self);
@@ -645,6 +660,11 @@ err_exit:
        return err;
 }
 
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
+{
+       return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+}
+
 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
 {
        int err = 0;
@@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self)
        unsigned int i = 0U;
 
        netif_tx_disable(self->ndev);
+       netif_carrier_off(self->ndev);
 
        del_timer_sync(&self->service_timer);
 
index 0ddd556ff901c25682739a059673fa3a8398aed3..4309983acdd6f7502fa05869f79336fd459dc2fe 100644 (file)
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
        u32 vecs;               /* vecs==allocated irqs */
        u32 irq_type;
        u32 itr;
+       u16 rx_itr;
+       u16 tx_itr;
        u32 num_rss_queues;
        u32 mtu;
        u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
        u16 is_mc_list_enabled;
        u16 mc_list_count;
        bool is_autoneg;
-       bool is_interrupt_moderation;
        bool is_polling;
        bool is_rss;
        bool is_lro;
@@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
 u32 aq_nic_get_fw_version(struct aq_nic_s *self);
 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
 
 #endif /* AQ_NIC_H */
index 4c6c882c6a1c424238473ea40ecf9f0ebf7cee28..cadaa646c89f4b741382b4beee72c6ec3e3bfc18 100644 (file)
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
        int err = 0;
        unsigned int bar = 0U;
        unsigned int port = 0U;
+       unsigned int numvecs = 0U;
 
        err = pci_enable_device(self->pdev);
        if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
                }
        }
 
-       /*enable interrupts */
+       numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
+       numvecs = min(numvecs, num_online_cpus());
+
+       /* enable interrupts */
 #if !AQ_CFG_FORCE_LEGACY_INT
-       err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
-                             self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
+       err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
 
        if (err < 0) {
                err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
                if (err < 0)
                        goto err_exit;
        }
-#endif
+#endif /* AQ_CFG_FORCE_LEGACY_INT */
 
        /* net device init */
        for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
                aq_nic_ndev_free(self->port[port]);
        }
 
+       if (self->mmio)
+               iounmap(self->mmio);
+
        kfree(self);
 
 err_exit:;
index 305ff8ffac2c9acc9633aa4fadde4a33d0392f8d..5fecc9a099ef7fd34d3a36b9ee1eb9c01a3f9fcd 100644 (file)
@@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
        memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
        aq_vec_add_stats(self, &stats_rx, &stats_tx);
 
+       /* This data should mimic aq_ethtool_queue_stat_names structure
+        */
        data[count] += stats_rx.packets;
        data[++count] += stats_tx.packets;
+       data[++count] += stats_tx.queue_restarts;
        data[++count] += stats_rx.jumbo_packets;
        data[++count] += stats_rx.lro_packets;
        data[++count] += stats_rx.errors;
index c5a02df7a48b719a65b169938746d777f3f0b5a0..07b3c49a16a4266b4fb312bb79198f9ba0c60f04 100644 (file)
@@ -765,24 +765,23 @@ err_exit:
        return err;
 }
 
-static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
-                                                bool itr_enabled)
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 {
        unsigned int i = 0U;
+       u32 itr_rx;
 
-       if (itr_enabled && self->aq_nic_cfg->itr) {
-               if (self->aq_nic_cfg->itr != 0xFFFFU) {
+       if (self->aq_nic_cfg->itr) {
+               if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
                        u32 itr_ = (self->aq_nic_cfg->itr >> 1);
 
                        itr_ = min(AQ_CFG_IRQ_MASK, itr_);
 
-                       PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
-                                       (itr_ << 0x10);
+                       itr_rx = 0x80000000U | (itr_ << 0x10);
                } else  {
                        u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
 
                        if (n < self->aq_link_status.mbps) {
-                               PHAL_ATLANTIC_A0->itr_rx = 0U;
+                               itr_rx = 0U;
                        } else {
                                static unsigned int hw_timers_tbl_[] = {
                                        0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
                                        hw_atl_utils_mbps_2_speed_index(
                                                self->aq_link_status.mbps);
 
-                               PHAL_ATLANTIC_A0->itr_rx =
-                                       0x80000000U |
+                               itr_rx = 0x80000000U |
                                        (hw_timers_tbl_[speed_index] << 0x10U);
                        }
 
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
                        aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
                }
        } else {
-               PHAL_ATLANTIC_A0->itr_rx = 0U;
+               itr_rx = 0U;
        }
 
        for (i = HW_ATL_A0_RINGS_MAX; i--;)
-               reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
+               reg_irq_thr_set(self, itr_rx, i);
 
        return aq_hw_err_from_flags(self);
 }
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
        .hw_rss_set                  = hw_atl_a0_hw_rss_set,
        .hw_rss_hash_set             = hw_atl_a0_hw_rss_hash_set,
        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
+       .hw_update_stats             = hw_atl_utils_update_stats,
        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
index 21784cc39dabdb9005a0c4bff26c64b0ac7a5286..ec68c20efcbdb6079b9dba4b8200ad8f1f450233 100644 (file)
@@ -788,39 +788,45 @@ err_exit:
        return err;
 }
 
-static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
-                                                bool itr_enabled)
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 {
        unsigned int i = 0U;
+       u32 itr_tx = 2U;
+       u32 itr_rx = 2U;
 
-       if (itr_enabled && self->aq_nic_cfg->itr) {
+       switch (self->aq_nic_cfg->itr) {
+       case  AQ_CFG_INTERRUPT_MODERATION_ON:
+       case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
                tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
                tdm_tdm_intr_moder_en_set(self, 1U);
                rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
                rdm_rdm_intr_moder_en_set(self, 1U);
 
-               PHAL_ATLANTIC_B0->itr_tx = 2U;
-               PHAL_ATLANTIC_B0->itr_rx = 2U;
+               if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+                       /* HW timers are in 2us units */
+                       int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+                       int tx_min_timer = tx_max_timer / 2;
 
-               if (self->aq_nic_cfg->itr != 0xFFFFU) {
-                       unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
-                       unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
+                       int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+                       int rx_min_timer = rx_max_timer / 2;
 
-                       max_timer = min(0x1FFU, max_timer);
-                       min_timer = min(0xFFU, min_timer);
+                       tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
+                       tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
+                       rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
+                       rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
 
-                       PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
-                       PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
-                       PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
-                       PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
+                       itr_tx |= tx_min_timer << 0x8U;
+                       itr_tx |= tx_max_timer << 0x10U;
+                       itr_rx |= rx_min_timer << 0x8U;
+                       itr_rx |= rx_max_timer << 0x10U;
                } else {
                        static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
-                               {0xffU, 0xffU}, /* 10Gbit */
-                               {0xffU, 0x1ffU}, /* 5Gbit */
-                               {0xffU, 0x1ffU}, /* 5Gbit 5GS */
-                               {0xffU, 0x1ffU}, /* 2.5Gbit */
-                               {0xffU, 0x1ffU}, /* 1Gbit */
-                               {0xffU, 0x1ffU}, /* 100Mbit */
+                               {0xfU, 0xffU}, /* 10Gbit */
+                               {0xfU, 0x1ffU}, /* 5Gbit */
+                               {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+                               {0xfU, 0x1ffU}, /* 2.5Gbit */
+                               {0xfU, 0x1ffU}, /* 1Gbit */
+                               {0xfU, 0x1ffU}, /* 100Mbit */
                        };
 
                        static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
                                        hw_atl_utils_mbps_2_speed_index(
                                                self->aq_link_status.mbps);
 
-                       PHAL_ATLANTIC_B0->itr_tx |=
-                               hw_atl_b0_timers_table_tx_[speed_index]
-                               [0] << 0x8U; /* set min timer value */
-                       PHAL_ATLANTIC_B0->itr_tx |=
-                               hw_atl_b0_timers_table_tx_[speed_index]
-                               [1] << 0x10U; /* set max timer value */
-
-                       PHAL_ATLANTIC_B0->itr_rx |=
-                               hw_atl_b0_timers_table_rx_[speed_index]
-                               [0] << 0x8U; /* set min timer value */
-                       PHAL_ATLANTIC_B0->itr_rx |=
-                               hw_atl_b0_timers_table_rx_[speed_index]
-                               [1] << 0x10U; /* set max timer value */
+                       /* Update user visible ITR settings */
+                       self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
+                                                       [speed_index][1] * 2;
+                       self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
+                                                       [speed_index][1] * 2;
+
+                       itr_tx |= hw_atl_b0_timers_table_tx_
+                                               [speed_index][0] << 0x8U;
+                       itr_tx |= hw_atl_b0_timers_table_tx_
+                                               [speed_index][1] << 0x10U;
+
+                       itr_rx |= hw_atl_b0_timers_table_rx_
+                                               [speed_index][0] << 0x8U;
+                       itr_rx |= hw_atl_b0_timers_table_rx_
+                                               [speed_index][1] << 0x10U;
                }
-       } else {
+               break;
+       case AQ_CFG_INTERRUPT_MODERATION_OFF:
                tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
                tdm_tdm_intr_moder_en_set(self, 0U);
                rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
                rdm_rdm_intr_moder_en_set(self, 0U);
-               PHAL_ATLANTIC_B0->itr_tx = 0U;
-               PHAL_ATLANTIC_B0->itr_rx = 0U;
+               itr_tx = 0U;
+               itr_rx = 0U;
+               break;
        }
 
        for (i = HW_ATL_B0_RINGS_MAX; i--;) {
-               reg_tx_intr_moder_ctrl_set(self,
-                                          PHAL_ATLANTIC_B0->itr_tx, i);
-               reg_rx_intr_moder_ctrl_set(self,
-                                          PHAL_ATLANTIC_B0->itr_rx, i);
+               reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+               reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
        }
 
        return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
        .hw_rss_set                  = hw_atl_b0_hw_rss_set,
        .hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
+       .hw_update_stats             = hw_atl_utils_update_stats,
        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
index fcf89e25a773ee869b70160a444ab43af076fbf8..9aa2c6edfca23276335cd9c4bf23c8528f9305e1 100644 (file)
 
 #define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
 
+#define HW_ATL_INTR_MODER_MAX  0x1FF
+#define HW_ATL_INTR_MODER_MIN  0xFF
+
 /* Hardware tx descriptor */
 struct __packed hw_atl_txd_s {
        u64 buf_addr;
index bf734b32e44b6b1638f043a75042e34fc7e8d7e2..1fe016fc4bc704361ca68ee39f3e443715505e8c 100644 (file)
@@ -255,6 +255,15 @@ err_exit:
        return err;
 }
 
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+                              struct hw_aq_atl_utils_mbox_header *pmbox)
+{
+       return hw_atl_utils_fw_downld_dwords(self,
+                                     PHAL_ATLANTIC->mbox_addr,
+                                     (u32 *)(void *)pmbox,
+                                     sizeof(*pmbox) / sizeof(u32));
+}
+
 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
                                 struct hw_aq_atl_utils_mbox *pmbox)
 {
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
        if (err < 0)
                goto err_exit;
 
-       if (pmbox != &PHAL_ATLANTIC->mbox)
-               memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
-
        if (IS_CHIP_FEATURE(REVISION_A0)) {
                unsigned int mtu = self->aq_nic_cfg ?
                                        self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 {
        int err = 0;
        u32 transaction_id = 0;
+       struct hw_aq_atl_utils_mbox_header mbox;
 
        if (state == MPI_RESET) {
-               hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+               hw_atl_utils_mpi_read_mbox(self, &mbox);
 
-               transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
+               transaction_id = mbox.transaction_id;
 
                AQ_HW_WAIT_FOR(transaction_id !=
-                               (hw_atl_utils_mpi_read_stats
-                                       (self, &PHAL_ATLANTIC->mbox),
-                                       PHAL_ATLANTIC->mbox.transaction_id),
-                                       1000U, 100U);
+                               (hw_atl_utils_mpi_read_mbox(self, &mbox),
+                                mbox.transaction_id),
+                              1000U, 100U);
                if (err < 0)
                        goto err_exit;
        }
@@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
        return 0;
 }
 
+int hw_atl_utils_update_stats(struct aq_hw_s *self)
+{
+       struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+       struct hw_aq_atl_utils_mbox mbox;
+
+       if (!self->aq_link_status.mbps)
+               return 0;
+
+       hw_atl_utils_mpi_read_stats(self, &mbox);
+
+#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
+                       mbox.stats._N_ - hw_self->last_stats._N_)
+
+       AQ_SDELTA(uprc);
+       AQ_SDELTA(mprc);
+       AQ_SDELTA(bprc);
+       AQ_SDELTA(erpt);
+
+       AQ_SDELTA(uptc);
+       AQ_SDELTA(mptc);
+       AQ_SDELTA(bptc);
+       AQ_SDELTA(erpr);
+
+       AQ_SDELTA(ubrc);
+       AQ_SDELTA(ubtc);
+       AQ_SDELTA(mbrc);
+       AQ_SDELTA(mbtc);
+       AQ_SDELTA(bbrc);
+       AQ_SDELTA(bbtc);
+       AQ_SDELTA(dpc);
+
+#undef AQ_SDELTA
+
+       memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+
+       return 0;
+}
+
 int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
                              u64 *data, unsigned int *p_count)
 {
-       struct hw_atl_stats_s *stats = NULL;
+       struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+       struct hw_atl_stats_s *stats = &hw_self->curr_stats;
        int i = 0;
 
-       hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
-
-       stats = &PHAL_ATLANTIC->mbox.stats;
-
        data[i] = stats->uprc + stats->mprc + stats->bprc;
        data[++i] = stats->uprc;
        data[++i] = stats->mprc;
index e0360a6b2202ef5b4ac683a44edcde9bf20ebedc..c99cc690e425bb72907df675e04a196819cfec02 100644 (file)
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
        };
 };
 
-struct __packed hw_aq_atl_utils_mbox {
+struct __packed hw_aq_atl_utils_mbox_header {
        u32 version;
        u32 transaction_id;
-       int error;
+       u32 error;
+};
+
+struct __packed hw_aq_atl_utils_mbox {
+       struct hw_aq_atl_utils_mbox_header header;
        struct hw_atl_stats_s stats;
 };
 
 struct __packed hw_atl_s {
        struct aq_hw_s base;
-       struct hw_aq_atl_utils_mbox mbox;
+       struct hw_atl_stats_s last_stats;
+       struct hw_atl_stats_s curr_stats;
        u64 speed;
-       u32 itr_tx;
-       u32 itr_rx;
        unsigned int chip_features;
        u32 fw_ver_actual;
        atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
 
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
 
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+                              struct hw_aq_atl_utils_mbox_header *pmbox);
+
 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
                                 struct hw_aq_atl_utils_mbox *pmbox);
 
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
 
 int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
 
+int hw_atl_utils_update_stats(struct aq_hw_s *self);
+
 int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
                              u64 *data,
                              unsigned int *p_count);
index aacec8bc19d5fbf6fe0f007d8a6a59fe2df23c8d..dc5de275352a7f589b0d410b6127bee4ea4b815c 100644 (file)
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
        ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
 };
 
+static struct workqueue_struct *bnxt_pf_wq;
+
 static bool bnxt_vf_pciid(enum board_idx idx)
 {
        return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
        return 0;
 }
 
+static void bnxt_queue_sp_work(struct bnxt *bp)
+{
+       if (BNXT_PF(bp))
+               queue_work(bnxt_pf_wq, &bp->sp_task);
+       else
+               schedule_work(&bp->sp_task);
+}
+
+static void bnxt_cancel_sp_work(struct bnxt *bp)
+{
+       if (BNXT_PF(bp))
+               flush_workqueue(bnxt_pf_wq);
+       else
+               cancel_work_sync(&bp->sp_task);
+}
+
 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
 {
        if (!rxr->bnapi->in_reset) {
                rxr->bnapi->in_reset = true;
                set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
        }
        rxr->rx_next_cons = 0xffff;
 }
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
        default:
                goto async_event_process_exit;
        }
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 async_event_process_exit:
        bnxt_ulp_async_events(bp, cmpl);
        return 0;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
 
                set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
                set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
                break;
 
        case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
        return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
 }
 
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
+                             int timeout)
+{
+       return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
+}
+
 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
 {
        int rc;
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        }
 
        if (link_re_init) {
+               mutex_lock(&bp->link_lock);
                rc = bnxt_update_phy_setting(bp);
+               mutex_unlock(&bp->link_lock);
                if (rc)
                        netdev_warn(bp->dev, "failed to update phy settings\n");
        }
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
                vnic->rx_mask = mask;
 
                set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
        }
 }
 
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
 
        netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
        set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
        if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
            bp->stats_coal_ticks) {
                set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
-               schedule_work(&bp->sp_task);
+               bnxt_queue_sp_work(bp);
        }
 bnxt_restart_timer:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
        if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
                bnxt_hwrm_port_qstats(bp);
 
-       /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
-        * must be the last functions to be called before exiting.
-        */
        if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
-               int rc = 0;
+               int rc;
 
+               mutex_lock(&bp->link_lock);
                if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
                                       &bp->sp_event))
                        bnxt_hwrm_phy_qcaps(bp);
 
-               bnxt_rtnl_lock_sp(bp);
-               if (test_bit(BNXT_STATE_OPEN, &bp->state))
-                       rc = bnxt_update_link(bp, true);
-               bnxt_rtnl_unlock_sp(bp);
+               rc = bnxt_update_link(bp, true);
+               mutex_unlock(&bp->link_lock);
                if (rc)
                        netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
                                   rc);
        }
        if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
-               bnxt_rtnl_lock_sp(bp);
-               if (test_bit(BNXT_STATE_OPEN, &bp->state))
-                       bnxt_get_port_module_status(bp);
-               bnxt_rtnl_unlock_sp(bp);
+               mutex_lock(&bp->link_lock);
+               bnxt_get_port_module_status(bp);
+               mutex_unlock(&bp->link_lock);
        }
+       /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
+        * must be the last functions to be called before exiting.
+        */
        if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
                bnxt_reset(bp, false);
 
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
        spin_unlock_bh(&bp->ntp_fltr_lock);
 
        set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 
        return new_fltr->sw_id;
 
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
                if (bp->vxlan_port_cnt == 1) {
                        bp->vxlan_port = ti->port;
                        set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
-                       schedule_work(&bp->sp_task);
+                       bnxt_queue_sp_work(bp);
                }
                break;
        case UDP_TUNNEL_TYPE_GENEVE:
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
                return;
        }
 
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 }
 
 static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
                return;
        }
 
-       schedule_work(&bp->sp_task);
+       bnxt_queue_sp_work(bp);
 }
 
 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        pci_disable_pcie_error_reporting(pdev);
        unregister_netdev(dev);
        bnxt_shutdown_tc(bp);
-       cancel_work_sync(&bp->sp_task);
+       bnxt_cancel_sp_work(bp);
        bp->sp_event = 0;
 
        bnxt_clear_int_mode(bp);
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
                           rc);
                return rc;
        }
+       mutex_init(&bp->link_lock);
 
        rc = bnxt_update_link(bp, false);
        if (rc) {
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
        enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
        enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
 
-       if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+       if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
            speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
                netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
        else
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        else
                device_set_wakeup_capable(&pdev->dev, false);
 
-       if (BNXT_PF(bp))
+       if (BNXT_PF(bp)) {
+               if (!bnxt_pf_wq) {
+                       bnxt_pf_wq =
+                               create_singlethread_workqueue("bnxt_pf_wq");
+                       if (!bnxt_pf_wq) {
+                               dev_err(&pdev->dev, "Unable to create workqueue.\n");
+                               goto init_err_pci_clean;
+                       }
+               }
                bnxt_init_tc(bp);
+       }
 
        rc = register_netdev(dev);
        if (rc)
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
 #endif
 };
 
-module_pci_driver(bnxt_pci_driver);
+static int __init bnxt_init(void)
+{
+       return pci_register_driver(&bnxt_pci_driver);
+}
+
+static void __exit bnxt_exit(void)
+{
+       pci_unregister_driver(&bnxt_pci_driver);
+       if (bnxt_pf_wq)
+               destroy_workqueue(bnxt_pf_wq);
+}
+
+module_init(bnxt_init);
+module_exit(bnxt_exit);
index 7b888d4b2b552a406b8157b3f037a299dc7645a4..c911e69ff25f67d0402b1f06e48de7245cea8a88 100644 (file)
@@ -1290,6 +1290,10 @@ struct bnxt {
        unsigned long           *ntp_fltr_bmap;
        int                     ntp_fltr_count;
 
+       /* To protect link related settings during link changes and
+        * ethtool settings changes.
+        */
+       struct mutex            link_lock;
        struct bnxt_link_info   link_info;
        struct ethtool_eee      eee;
        u32                     lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
 void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
 int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
 int hwrm_send_message(struct bnxt *, void *, u32, int);
 int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
index aa1f3a2c7a7865cce7970e2e7b75f74b218d5df7..fed37cd9ae1d464af02335c072b4b3676b024e0b 100644 (file)
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
        req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
-       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc) {
                u8 *pri2cos = &resp->pri0_cos_queue_id;
                int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
                        }
                }
        }
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
 }
 
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
        int rc, i;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
-       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (rc)
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
                return rc;
+       }
 
        data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
        for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
                        }
                }
        }
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return 0;
 }
 
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
        int rc;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
-       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-       if (rc)
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
                return rc;
+       }
 
        pri_mask = le32_to_cpu(resp->flags);
        pfc->pfc_en = pri_mask;
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return 0;
 }
 
index 8eff05a3e0e4b748c3a0d35661ce7b1901ddef61..3cbe771b335296ce526122e892349e741ddb43b9 100644 (file)
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
        u32 ethtool_speed;
 
        ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+       mutex_lock(&bp->link_lock);
        bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
 
        ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
                        base->port = PORT_FIBRE;
        }
        base->phy_address = link_info->phy_addr;
+       mutex_unlock(&bp->link_lock);
 
        return 0;
 }
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
        if (!BNXT_SINGLE_PF(bp))
                return -EOPNOTSUPP;
 
+       mutex_lock(&bp->link_lock);
        if (base->autoneg == AUTONEG_ENABLE) {
                BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
                                        advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
                rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
 
 set_setting_exit:
+       mutex_unlock(&bp->link_lock);
        return rc;
 }
 
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
        req.dir_ordinal = cpu_to_le16(ordinal);
        req.dir_ext = cpu_to_le16(ext);
        req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
-       rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc == 0) {
                if (index)
                        *index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
                if (data_length)
                        *data_length = le32_to_cpu(output->dir_data_length);
        }
+       mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
 }
 
index d37925a8a65b6d79d96063d0eabcac9bae8dadd9..5ee18660bc33a2572320ac3210029e883841b044 100644 (file)
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
        int rc = 0, vfs_supported;
        int min_rx_rings, min_tx_rings, min_rss_ctxs;
        int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+       int avail_cp, avail_stat;
 
        /* Check if we can enable requested num of vf's. At a mininum
         * we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
         */
        vfs_supported = *num_vfs;
 
+       avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
+       avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+       avail_cp = min_t(int, avail_cp, avail_stat);
+
        while (vfs_supported) {
                min_rx_rings = vfs_supported;
                min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
                            min_rx_rings)
                                rx_ok = 1;
                }
-               if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+               if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+                   avail_cp < min_rx_rings)
                        rx_ok = 0;
 
-               if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+               if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+                   avail_cp >= min_tx_rings)
                        tx_ok = 1;
 
                if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
index e7f54948173f7e6764b57ba5b99636ba206ba323..5b19826a7e16c731799a4b1cfc4c66d78d91f72d 100644 (file)
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
        struct lio *lio = container_of(ptp, struct lio, ptp_info);
        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
 
-       ns = timespec_to_ns(ts);
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&lio->ptp_lock, flags);
        lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
index cb8182f4fdfa1725f8d4024741e00219376d7326..c66abd476023a401b14c53abf7026e3147f13f62 100644 (file)
@@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
  * places them in a descriptor array, scrq_arr
  */
 
-static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
-                            union sub_crq *scrq_arr)
+static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+                           union sub_crq *scrq_arr)
 {
        union sub_crq hdr_desc;
        int tmp_len = len;
+       int num_descs = 0;
        u8 *data, *cur;
        int tmp;
 
@@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
                tmp_len -= tmp;
                *scrq_arr = hdr_desc;
                scrq_arr++;
+               num_descs++;
        }
+
+       return num_descs;
 }
 
 /**
@@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
                                int *num_entries, u8 hdr_field)
 {
        int hdr_len[3] = {0, 0, 0};
-       int tot_len, len;
+       int tot_len;
        u8 *hdr_data = txbuff->hdr_data;
 
        tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
                                 txbuff->hdr_data);
-       len = tot_len;
-       len -= 24;
-       if (len > 0)
-               num_entries += len % 29 ? len / 29 + 1 : len / 29;
-       create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+       *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
                         txbuff->indir_arr + 1);
 }
 
index 57505b1df98dfb65e5033808c05c4cf9954600de..d591b3e6bd7c511d974c20e2a2e27e44816fa84f 100644 (file)
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
 }
 
 /**
- * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
+ * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  * @data: word read from the Shadow RAM
index 1519dfb851d01a3628c3ad8411be96e3c2e0643e..2756131495f07f269f4c7f2e223dda4dcb7153f0 100644 (file)
@@ -1037,6 +1037,32 @@ reset_latency:
        return false;
 }
 
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+                              struct i40e_rx_buffer *old_buff)
+{
+       struct i40e_rx_buffer *new_buff;
+       u16 nta = rx_ring->next_to_alloc;
+
+       new_buff = &rx_ring->rx_bi[nta];
+
+       /* update, and store next to alloc */
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+       /* transfer page from old buffer to new buffer */
+       new_buff->dma           = old_buff->dma;
+       new_buff->page          = old_buff->page;
+       new_buff->page_offset   = old_buff->page_offset;
+       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
+}
+
 /**
  * i40e_rx_is_programming_status - check for programming status descriptor
  * @qw: qword representing status_error_len in CPU ordering
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
                                          union i40e_rx_desc *rx_desc,
                                          u64 qw)
 {
-       u32 ntc = rx_ring->next_to_clean + 1;
+       struct i40e_rx_buffer *rx_buffer;
+       u32 ntc = rx_ring->next_to_clean;
        u8 id;
 
        /* fetch, update, and store next to clean */
+       rx_buffer = &rx_ring->rx_bi[ntc++];
        ntc = (ntc < rx_ring->count) ? ntc : 0;
        rx_ring->next_to_clean = ntc;
 
        prefetch(I40E_RX_DESC(rx_ring, ntc));
 
+       /* place unused page back on the ring */
+       i40e_reuse_rx_page(rx_ring, rx_buffer);
+       rx_ring->rx_stats.page_reuse_count++;
+
+       /* clear contents of buffer_info */
+       rx_buffer->page = NULL;
+
        id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
                  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
 
@@ -1638,32 +1673,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
        return false;
 }
 
-/**
- * i40e_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
-                              struct i40e_rx_buffer *old_buff)
-{
-       struct i40e_rx_buffer *new_buff;
-       u16 nta = rx_ring->next_to_alloc;
-
-       new_buff = &rx_ring->rx_bi[nta];
-
-       /* update, and store next to alloc */
-       nta++;
-       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
-       /* transfer page from old buffer to new buffer */
-       new_buff->dma           = old_buff->dma;
-       new_buff->page          = old_buff->page;
-       new_buff->page_offset   = old_buff->page_offset;
-       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
-}
-
 /**
  * i40e_page_is_reusable - check if any reuse is possible
  * @page: page struct to check
index 9d5e7cf288bef2b1eb603843c7b57945918aeb72..f3315bc874adf8228f6277936c961d7afca95353 100644 (file)
@@ -96,6 +96,7 @@ struct mlxsw_core {
        const struct mlxsw_bus *bus;
        void *bus_priv;
        const struct mlxsw_bus_info *bus_info;
+       struct workqueue_struct *emad_wq;
        struct list_head rx_listener_list;
        struct list_head event_listener_list;
        struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 {
        unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
 
-       mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
+       queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
 }
 
 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
 
 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 {
+       struct workqueue_struct *emad_wq;
        u64 tid;
        int err;
 
        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
                return 0;
 
+       emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+       if (!emad_wq)
+               return -ENOMEM;
+       mlxsw_core->emad_wq = emad_wq;
+
        /* Set the upper 32 bits of the transaction ID field to a random
         * number. This allows us to discard EMADs addressed to other
         * devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 err_emad_trap_set:
        mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
                                   mlxsw_core);
+       destroy_workqueue(mlxsw_core->emad_wq);
        return err;
 }
 
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
        mlxsw_core->emad.use_emad = false;
        mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
                                   mlxsw_core);
+       destroy_workqueue(mlxsw_core->emad_wq);
 }
 
 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
index cc27c5de5a1dd83060508910a9e99b8353a7c2be..4afc8486eb9a7ee5242d58393fbba643e2b75d74 100644 (file)
@@ -6401,6 +6401,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
        mlxsw_reg_mgpc_opcode_set(payload, opcode);
 }
 
+/* TIGCR - Tunneling IPinIP General Configuration Register
+ * -------------------------------------------------------
+ * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
+ */
+#define MLXSW_REG_TIGCR_ID 0xA801
+#define MLXSW_REG_TIGCR_LEN 0x10
+
+MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
+
+/* reg_tigcr_ipip_ttlc
+ * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
+ * header.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
+
+/* reg_tigcr_ipip_ttl_uc
+ * The TTL for IPinIP Tunnel encapsulation of unicast packets if
+ * reg_tigcr_ipip_ttlc is unset.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
+
+static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
+{
+       MLXSW_REG_ZERO(tigcr, payload);
+       mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
+       mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
+}
+
 /* SBPR - Shared Buffer Pools Register
  * -----------------------------------
  * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -6881,6 +6911,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(mcc),
        MLXSW_REG(mcda),
        MLXSW_REG(mgpc),
+       MLXSW_REG(tigcr),
        MLXSW_REG(sbpr),
        MLXSW_REG(sbcm),
        MLXSW_REG(sbpm),
index c16718d296d389b0330b8aa7a75f130a5eb4bdbf..5189022a1c8c335c42901b5d288bbdf830512f46 100644 (file)
@@ -5896,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
        kfree(mlxsw_sp->router->rifs);
 }
 
+static int
+mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
+{
+       char tigcr_pl[MLXSW_REG_TIGCR_LEN];
+
+       mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
+}
+
 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
 {
        mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
        INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
-       return 0;
+       return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
 }
 
 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
index 1c0187f0af51f87b070c21a825b00e4a48887428..e118b5f2399669f172aaf6f80be6f6c221a235d0 100644 (file)
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
 {
        void *frag;
 
-       if (!dp->xdp_prog)
+       if (!dp->xdp_prog) {
                frag = netdev_alloc_frag(dp->fl_bufsz);
-       else
-               frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
+       } else {
+               struct page *page;
+
+               page = alloc_page(GFP_KERNEL | __GFP_COLD);
+               frag = page ? page_address(page) : NULL;
+       }
        if (!frag) {
                nn_dp_warn(dp, "Failed to alloc receive page frag\n");
                return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
 {
        void *frag;
 
-       if (!dp->xdp_prog)
+       if (!dp->xdp_prog) {
                frag = napi_alloc_frag(dp->fl_bufsz);
-       else
-               frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
+       } else {
+               struct page *page;
+
+               page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+               frag = page ? page_address(page) : NULL;
+       }
        if (!frag) {
                nn_dp_warn(dp, "Failed to alloc receive page frag\n");
                return NULL;
index 07969f06df102706ebae26ff052fae4ffa132d80..dc016dfec64d653946d0f5be3314d597b189f9ff 100644 (file)
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
 
                do {
                        start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
-                       *data++ = nn->r_vecs[i].rx_pkts;
+                       data[0] = nn->r_vecs[i].rx_pkts;
                        tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
                        tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
                        tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
 
                do {
                        start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
-                       *data++ = nn->r_vecs[i].tx_pkts;
-                       *data++ = nn->r_vecs[i].tx_busy;
+                       data[1] = nn->r_vecs[i].tx_pkts;
+                       data[2] = nn->r_vecs[i].tx_busy;
                        tmp[3] = nn->r_vecs[i].hw_csum_tx;
                        tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
                        tmp[5] = nn->r_vecs[i].tx_gather;
                        tmp[6] = nn->r_vecs[i].tx_lso;
                } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
 
+               data += 3;
+
                for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
                        gathered_stats[j] += tmp[j];
        }
index e03fcf914690c9a9e8fae548c4702f402d698f47..a3c949ea7d1a24bd8d04bf4ff75805359d838f0e 100644 (file)
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                rtl8168_driver_start(tp);
        }
 
-       device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
-
        if (pci_dev_run_wake(pdev))
                pm_runtime_put_noidle(&pdev->dev);
 
index e0ef02f9503bae027268b1b058f9875b6c1365a7..4b286e27c4ca5cdbbb7c457e31bef1b2e9e7bd94 100644 (file)
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
                                        goto exit;
                                i++;
 
-                       } while ((ret == 1) || (i < 10));
+                       } while ((ret == 1) && (i < 10));
 
                        if (i == 10)
                                ret = -EBUSY;
index 67af0bdd7f10f01ecd3adf87b59105cdd07ab40e..7516ca210855b49b30378fb0200e32949ca58e49 100644 (file)
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
 
        err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
                                 !(value & DMA_BUS_MODE_SFT_RESET),
-                                100000, 10000);
+                                10000, 100000);
        if (err)
                return -EBUSY;
 
index 1763e48c84e2090678f4ffc1b55cf1d7a62382e4..16bd509290844b7854437ecb97507126b6153413 100644 (file)
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                                   struct dma_desc *np, struct sk_buff *skb)
 {
        struct skb_shared_hwtstamps *shhwtstamp = NULL;
+       struct dma_desc *desc = p;
        u64 ns;
 
        if (!priv->hwts_rx_en)
                return;
+       /* For GMAC4, the valid timestamp is from CTX next desc. */
+       if (priv->plat->has_gmac4)
+               desc = np;
 
        /* Check if timestamp is available */
-       if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
-               /* For GMAC4, the valid timestamp is from CTX next desc. */
-               if (priv->plat->has_gmac4)
-                       ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
-               else
-                       ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
-
+       if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+               ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
 {
        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        unsigned int bytes_compl = 0, pkts_compl = 0;
-       unsigned int entry = tx_q->dirty_tx;
+       unsigned int entry;
 
        netif_tx_lock(priv->dev);
 
        priv->xstats.tx_clean++;
 
+       entry = tx_q->dirty_tx;
        while (entry != tx_q->cur_tx) {
                struct sk_buff *skb = tx_q->tx_skbuff[entry];
                struct dma_desc *p;
@@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                                 * them in stmmac_rx_refill() function so that
                                 * device can reuse it.
                                 */
+                               dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
                                rx_q->rx_skbuff[entry] = NULL;
                                dma_unmap_single(priv->device,
                                                 rx_q->rx_skbuff_dma[entry],
index f6404074b7b053e2af682c7e25ad36cec21ebe3b..ed51018a813e7ba6354d296e0d6c9fba3a1f76a1 100644 (file)
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
 
 static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
 {
-#ifdef __BIG_ENDIAN
-       return (vni[0] == tun_id[2]) &&
-              (vni[1] == tun_id[1]) &&
-              (vni[2] == tun_id[0]);
-#else
        return !memcmp(vni, &tun_id[5], 3);
-#endif
 }
 
 static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
index 98e4deaa3a6a1c2f89d55e8f2db54b6fc93380be..5ab1b8849c30496966be05115f752bfcf3385950 100644 (file)
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        if (unlikely(ret < 0)) {
+               aead_request_free(req);
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
                return ERR_PTR(ret);
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        if (unlikely(ret < 0)) {
+               aead_request_free(req);
                kfree_skb(skb);
                return ERR_PTR(ret);
        }
index 5ce580f413b93f01cdfb80ab8c3d90fec0ad897d..e21bf90b819f8f77e4886a6d1ff7c7b5ca12bad0 100644 (file)
@@ -2027,6 +2027,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                if (!dev)
                        return -ENOMEM;
+               err = dev_get_valid_name(net, dev, name);
+               if (err)
+                       goto err_free_dev;
 
                dev_net_set(dev, net);
                dev->rtnl_link_ops = &tun_link_ops;
index c9c711dcd0e6bb9d7ce988f42bc7e6dd7877a37b..a89b5685e68b36d5735bc9591f97f621440cb424 100644 (file)
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
        struct device *dev = i2400m_dev(i2400m);
        struct {
                struct i2400m_bootrom_header cmd;
-               u8 cmd_payload[chunk_len];
+               u8 cmd_payload[];
        } __packed *buf;
        struct i2400m_bootrom_header ack;
 
index 4eb1e1ce9aceccc9214657fa6081ba5327c021ca..ef72baf6dd969c2478a2138ea57b9f691fdf12d8 100644 (file)
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
        if (code != BRCMF_E_IF && !fweh->evt_handler[code])
                return;
 
-       if (datalen > BRCMF_DCMD_MAXLEN)
+       if (datalen > BRCMF_DCMD_MAXLEN ||
+           datalen + sizeof(*event_packet) > packet_len)
                return;
 
        if (in_interrupt())
index b3aab2fe96eb79f751d46c0f309f33eb6042d025..ef685465f80ad6f7ae84b0763ca8afd4d8f1ea73 100644 (file)
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
 }
 
 static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
-                      u8 len)
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
+                      const u8 *dlys, u8 len)
 {
        u32 t1_offset, t2_offset;
        u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
 static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
 {
        u16 currband;
-       s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
-       s8 *lna1_gain_db = NULL;
-       s8 *lna1_gain_db_2 = NULL;
-       s8 *lna2_gain_db = NULL;
-       s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
-       s8 *tia_gain_db;
-       s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
-       s8 *tia_gainbits;
-       u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
-       u16 *rfseq_init_gain;
+       static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+       const s8 *lna1_gain_db = NULL;
+       const s8 *lna1_gain_db_2 = NULL;
+       const s8 *lna2_gain_db = NULL;
+       static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+       const s8 *tia_gain_db;
+       static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+       const s8 *tia_gainbits;
+       static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+       const u16 *rfseq_init_gain;
        u16 init_gaincode;
        u16 clip1hi_gaincode;
        u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
 
                        if ((freq <= 5080) || (freq == 5825)) {
 
-                               s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
-                               s8 lna1A_gain_db_2_rev7[] = {
-                                       11, 17, 22, 25};
-                               s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+                               static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+                               static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
+                               static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
 
                                crsminu_th = 0x3e;
                                lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
                                lna2_gain_db = lna2A_gain_db_rev7;
                        } else if ((freq >= 5500) && (freq <= 5700)) {
 
-                               s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
-                               s8 lna1A_gain_db_2_rev7[] = {
-                                       12, 18, 22, 26};
-                               s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+                               static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+                               static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+                               static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
 
                                crsminu_th = 0x45;
                                clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
                                lna2_gain_db = lna2A_gain_db_rev7;
                        } else {
 
-                               s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
-                               s8 lna1A_gain_db_2_rev7[] = {
-                                       12, 18, 22, 26};
-                               s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+                               static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+                               static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+                               static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
 
                                crsminu_th = 0x41;
                                lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
                NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
                NPHY_RFSEQ_CMD_SET_HPF_BW
        };
-       u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
-       s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
-       s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
-       s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
-       s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
-       s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
-       s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
-       s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
-       s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
-       s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
-       s8 *lna1_gain_db = NULL;
-       s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
-       s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
-       s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
-       s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
-       s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
-       s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
-       s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
-       s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
-       s8 *lna2_gain_db = NULL;
-       s8 tiaG_gain_db[] = {
+       static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+       static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+       static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+       static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+       static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+       static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+       static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+       static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+       static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+       static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+       const s8 *lna1_gain_db = NULL;
+       static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+       static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+       static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+       static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+       static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+       static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+       static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+       static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+       const s8 *lna2_gain_db = NULL;
+       static const s8 tiaG_gain_db[] = {
                0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
-       s8 tiaA_gain_db[] = {
+       static const s8 tiaA_gain_db[] = {
                0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
-       s8 tiaA_gain_db_rev4[] = {
+       static const s8 tiaA_gain_db_rev4[] = {
                0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-       s8 tiaA_gain_db_rev5[] = {
+       static const s8 tiaA_gain_db_rev5[] = {
                0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-       s8 tiaA_gain_db_rev6[] = {
+       static const s8 tiaA_gain_db_rev6[] = {
                0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-       s8 *tia_gain_db;
-       s8 tiaG_gainbits[] = {
+       const s8 *tia_gain_db;
+       static const s8 tiaG_gainbits[] = {
                0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
-       s8 tiaA_gainbits[] = {
+       static const s8 tiaA_gainbits[] = {
                0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
-       s8 tiaA_gainbits_rev4[] = {
+       static const s8 tiaA_gainbits_rev4[] = {
                0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-       s8 tiaA_gainbits_rev5[] = {
+       static const s8 tiaA_gainbits_rev5[] = {
                0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-       s8 tiaA_gainbits_rev6[] = {
+       static const s8 tiaA_gainbits_rev6[] = {
                0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-       s8 *tia_gainbits;
-       s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
-       s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
-       u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
-       u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
-       u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
-       u16 rfseqG_init_gain_rev5_elna[] = {
+       const s8 *tia_gainbits;
+       static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+       static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+       static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+       static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+       static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+       static const u16 rfseqG_init_gain_rev5_elna[] = {
                0x013f, 0x013f, 0x013f, 0x013f };
-       u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
-       u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
-       u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
-       u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
-       u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
-       u16 rfseqA_init_gain_rev4_elna[] = {
+       static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+       static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+       static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+       static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+       static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+       static const u16 rfseqA_init_gain_rev4_elna[] = {
                0x314f, 0x314f, 0x314f, 0x314f };
-       u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
-       u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
-       u16 *rfseq_init_gain;
+       static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+       static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+       const u16 *rfseq_init_gain;
        u16 initG_gaincode = 0x627e;
        u16 initG_gaincode_rev4 = 0x527e;
        u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
        u16 clip1mdA_gaincode_rev6 = 0x2084;
        u16 clip1md_gaincode = 0;
        u16 clip1loG_gaincode = 0x0074;
-       u16 clip1loG_gaincode_rev5[] = {
+       static const u16 clip1loG_gaincode_rev5[] = {
                0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
        };
-       u16 clip1loG_gaincode_rev6[] = {
+       static const u16 clip1loG_gaincode_rev6[] = {
                0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
        };
        u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
 
 static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
 {
-       u8 rfseq_rx2tx_events[] = {
+       static const u8 rfseq_rx2tx_events[] = {
                NPHY_RFSEQ_CMD_NOP,
                NPHY_RFSEQ_CMD_RXG_FBW,
                NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_RFSEQ_CMD_EXT_PA
        };
        u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
-       u8 rfseq_tx2rx_events[] = {
+       static const u8 rfseq_tx2rx_events[] = {
                NPHY_RFSEQ_CMD_NOP,
                NPHY_RFSEQ_CMD_EXT_PA,
                NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_RFSEQ_CMD_RXG_FBW,
                NPHY_RFSEQ_CMD_CLR_HIQ_DIS
        };
-       u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
-       u8 rfseq_tx2rx_events_rev3[] = {
+       static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+       static const u8 rfseq_tx2rx_events_rev3[] = {
                NPHY_REV3_RFSEQ_CMD_EXT_PA,
                NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
                NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
                NPHY_REV3_RFSEQ_CMD_END
        };
-       u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+       static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
        u8 rfseq_rx2tx_events_rev3[] = {
                NPHY_REV3_RFSEQ_CMD_NOP,
                NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
        };
        u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
 
-       u8 rfseq_rx2tx_events_rev3_ipa[] = {
+       static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
                NPHY_REV3_RFSEQ_CMD_NOP,
                NPHY_REV3_RFSEQ_CMD_RXG_FBW,
                NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
                NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
                NPHY_REV3_RFSEQ_CMD_END
        };
-       u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
-       u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+       static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+       static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
 
        s16 alpha0, alpha1, alpha2;
        s16 beta0, beta1, beta2;
        u32 leg_data_weights, ht_data_weights, nss1_data_weights,
            stbc_data_weights;
        u8 chan_freq_range = 0;
-       u16 dac_control = 0x0002;
+       static const u16 dac_control = 0x0002;
        u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
        u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
        u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
        u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
        u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
        u16 *aux_adc_gain;
-       u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
-       u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+       static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+       static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
        s32 min_nvar_val = 0x18d;
        s32 min_nvar_offset_6mbps = 20;
        u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
        u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
        u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
        u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
-       u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
-       u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
-       u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+       static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+       static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+       static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
        u16 ipalvlshift_3p3_war_en = 0;
        u16 rccal_bcap_val, rccal_scap_val;
        u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
        u16 bbmult;
        u16 tblentry;
 
-       struct nphy_txiqcal_ladder ladder_lo[] = {
+       static const struct nphy_txiqcal_ladder ladder_lo[] = {
                {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
                {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
                {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
        };
 
-       struct nphy_txiqcal_ladder ladder_iq[] = {
+       static const struct nphy_txiqcal_ladder ladder_iq[] = {
                {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
                {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
                {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
        u16 cal_gain[2];
        struct nphy_iqcal_params cal_params[2];
        u32 tbl_len;
-       void *tbl_ptr;
+       const void *tbl_ptr;
        bool ladder_updated[2];
        u8 mphase_cal_lastphase = 0;
        int bcmerror = 0;
        bool phyhang_avoid_state = false;
 
-       u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+       static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
                0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
                0x1902,
                0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
                0x6407
        };
 
-       u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+       static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
                0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
                0x3200,
                0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
                0x6407
        };
 
-       u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+       static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
                0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
                0x1202,
                0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
                0x4707
        };
 
-       u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+       static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
                0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
                0x2300,
                0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
                0x4707
        };
 
-       u16 tbl_tx_iqlo_cal_startcoefs[] = {
+       static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
                0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
                0x0000
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
                0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
                0x9123, 0x9264, 0x9086, 0x9245, 0x9056
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
                0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
                0x9101, 0x9253, 0x9053, 0x9234, 0x9034
        };
 
-       u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+       static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
                0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
                0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
                0x0000
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
                0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
                0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
        };
 
-       u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+       static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
                0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
                0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
        };
index 45e2efc70d19e5f44c7a5e2a1cde2cf9a7448f91..ce741beec1fcf7c779cc993c4f6b240151257028 100644 (file)
@@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
        .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
        .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
        .dccm_len = IWL7265_DCCM_LEN,
+       .nvm_type = IWL_NVM_SDP,
 };
 
 const struct iwl_cfg iwl7265_2ac_cfg = {
index 2e6c52664ceedddfcba3ad1a20954328348d121b..c2a5936ccede26dbbd10512a21ef5da162e4b2d7 100644 (file)
@@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
        .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,       \
        .thermal_params = &iwl8000_tt_params,                           \
        .apmg_not_supported = true,                                     \
-       .ext_nvm = true,                                                \
+       .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true
 
 #define IWL_DEVICE_8000                                                        \
index 2babe0a1f18bcfb675fa21e089dff3b97f873b50..e8b5ff42f5a8f3711d0d27bcf64790d895384368 100644 (file)
@@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .vht_mu_mimo_supported = true,                                  \
        .mac_addr_from_csr = true,                                      \
        .rf_id = true,                                                  \
-       .ext_nvm = true,                                                \
+       .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true
 
 const struct iwl_cfg iwl9160_2ac_cfg = {
index 76ba1f8bc72feb51e96ac060fe8d5afaf1bab4f2..a440140ed8dda3d889629238af0d1aff4af725b4 100644 (file)
@@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
        .use_tfh = true,                                                \
        .rf_id = true,                                                  \
        .gen2 = true,                                                   \
-       .ext_nvm = true,                                                \
+       .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true
 
 const struct iwl_cfg iwla000_2ac_cfg_hr = {
index 00bc7a25dece29bd80392383eef7186aa8b961c0..3fd07bc80f54dd77410f4911e991b9460ca7ebb0 100644 (file)
@@ -108,6 +108,7 @@ enum iwl_nvm_access_target {
  * @NVM_SECTION_TYPE_REGULATORY: regulatory section
  * @NVM_SECTION_TYPE_CALIBRATION: calibration section
  * @NVM_SECTION_TYPE_PRODUCTION: production section
+ * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
  * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
  * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
  * @NVM_MAX_NUM_SECTIONS: number of sections
@@ -117,6 +118,7 @@ enum iwl_nvm_section_type {
        NVM_SECTION_TYPE_REGULATORY = 3,
        NVM_SECTION_TYPE_CALIBRATION = 4,
        NVM_SECTION_TYPE_PRODUCTION = 5,
+       NVM_SECTION_TYPE_REGULATORY_SDP = 8,
        NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
        NVM_SECTION_TYPE_PHY_SKU = 12,
        NVM_MAX_NUM_SECTIONS = 13,
index 6afc7a799892f424c64934f67b949d4c9beaa48d..f5dd7d83cd0a8eb8da5fc150ebb3d035c2c03a97 100644 (file)
@@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
 
        if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
                /* stop recording */
-               iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+               iwl_fw_dbg_stop_recording(fwrt);
 
                iwl_fw_error_dump(fwrt);
 
@@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
                u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
                u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
 
-               /* stop recording */
-               iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
-               udelay(100);
-               iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+               iwl_fw_dbg_stop_recording(fwrt);
                /* wait before we collect the data till the DBGC stop */
                udelay(500);
 
index 0f810ea89d31fa6394e80b90bac0dab38754aaae..9c889a32fe2424941d9bceb89b8cd1f593e4f3ab 100644 (file)
@@ -68,6 +68,8 @@
 #include <linux/workqueue.h>
 #include <net/cfg80211.h>
 #include "runtime.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
 #include "file.h"
 #include "error-dump.h"
 
@@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
                                        iwl_fw_dbg_get_trigger((fwrt)->fw,\
                                                               (trig)))
 
+static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+{
+       if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       } else {
+               iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
+               udelay(100);
+               iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+       }
+}
+
 static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
 {
+       iwl_fw_dbg_stop_recording(fwrt);
+
        fwrt->dump.conf = FW_DBG_INVALID;
 }
 
index 3e057b539d5b76dede3bf18de696003e041ace7a..71cb1ecde0f72b4b73d236d3028d5dde8bda4457 100644 (file)
@@ -108,6 +108,18 @@ enum iwl_led_mode {
        IWL_LED_DISABLE,
 };
 
+/**
+ * enum iwl_nvm_type - nvm formats
+ * @IWL_NVM: the regular format
+ * @IWL_NVM_EXT: extended NVM format
+ * @IWL_NVM_SDP: NVM format used by 3168 series
+ */
+enum iwl_nvm_type {
+       IWL_NVM,
+       IWL_NVM_EXT,
+       IWL_NVM_SDP,
+};
+
 /*
  * This is the threshold value of plcp error rate per 100mSecs.  It is
  * used to set and check for the validity of plcp_delta.
@@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff {
  * @integrated: discrete or integrated
  * @gen2: a000 and on transport operation
  * @cdb: CDB support
- * @ext_nvm: extended NVM format
+ * @nvm_type: see &enum iwl_nvm_type
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -342,6 +354,7 @@ struct iwl_cfg {
        const struct iwl_tt_params *thermal_params;
        enum iwl_device_family device_family;
        enum iwl_led_mode led_mode;
+       enum iwl_nvm_type nvm_type;
        u32 max_data_size;
        u32 max_inst_size;
        netdev_features_t features;
@@ -369,7 +382,6 @@ struct iwl_cfg {
            use_tfh:1,
            gen2:1,
            cdb:1,
-           ext_nvm:1,
            dbgc_supported:1;
        u8 valid_tx_ant;
        u8 valid_rx_ant;
index 3014beef48730dbd077c55b453e8e14c4bfb4c22..c3a5d8ccc95e942443dbf74e2ac6e21b2f09dfcb 100644 (file)
@@ -77,7 +77,7 @@
 #include "iwl-csr.h"
 
 /* NVM offsets (in words) definitions */
-enum wkp_nvm_offsets {
+enum nvm_offsets {
        /* NVM HW-Section offset (in words) definitions */
        SUBSYSTEM_ID = 0x0A,
        HW_ADDR = 0x15,
@@ -92,7 +92,10 @@ enum wkp_nvm_offsets {
 
        /* NVM calibration section offset (in words) definitions */
        NVM_CALIB_SECTION = 0x2B8,
-       XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+       XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
+
+       /* NVM REGULATORY -Section offset (in words) definitions */
+       NVM_CHANNELS_SDP = 0,
 };
 
 enum ext_nvm_offsets {
@@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags {
        NVM_CHANNEL_DC_HIGH             = BIT(12),
 };
 
+static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
+                                              int chan, u16 flags)
+{
 #define CHECK_AND_PRINT_I(x)   \
-       ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+       ((flags & NVM_CHANNEL_##x) ? " " #x : "")
+
+       if (!(flags & NVM_CHANNEL_VALID)) {
+               IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
+                             chan, flags);
+               return;
+       }
+
+       /* Note: already can print up to 101 characters, 110 is the limit! */
+       IWL_DEBUG_DEV(dev, level,
+                     "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+                     chan, flags,
+                     CHECK_AND_PRINT_I(VALID),
+                     CHECK_AND_PRINT_I(IBSS),
+                     CHECK_AND_PRINT_I(ACTIVE),
+                     CHECK_AND_PRINT_I(RADAR),
+                     CHECK_AND_PRINT_I(INDOOR_ONLY),
+                     CHECK_AND_PRINT_I(GO_CONCURRENT),
+                     CHECK_AND_PRINT_I(UNIFORM),
+                     CHECK_AND_PRINT_I(20MHZ),
+                     CHECK_AND_PRINT_I(40MHZ),
+                     CHECK_AND_PRINT_I(80MHZ),
+                     CHECK_AND_PRINT_I(160MHZ),
+                     CHECK_AND_PRINT_I(DC_HIGH));
+#undef CHECK_AND_PRINT_I
+}
 
 static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
                                 u16 nvm_flags, const struct iwl_cfg *cfg)
@@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
        u32 flags = IEEE80211_CHAN_NO_HT40;
        u32 last_5ghz_ht = LAST_5GHZ_HT;
 
-       if (cfg->ext_nvm)
+       if (cfg->nvm_type == IWL_NVM_EXT)
                last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
 
        if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
        int num_of_ch, num_2ghz_channels;
        const u8 *nvm_chan;
 
-       if (!cfg->ext_nvm) {
+       if (cfg->nvm_type != IWL_NVM_EXT) {
                num_of_ch = IWL_NUM_CHANNELS;
                nvm_chan = &iwl_nvm_channels[0];
                num_2ghz_channels = NUM_2GHZ_CHANNELS;
@@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                         * supported, hence we still want to add them to
                         * the list of supported channels to cfg80211.
                         */
-                       IWL_DEBUG_EEPROM(dev,
-                                        "Ch. %d Flags %x [%sGHz] - No traffic\n",
-                                        nvm_chan[ch_idx],
-                                        ch_flags,
-                                        (ch_idx >= num_2ghz_channels) ?
-                                        "5.2" : "2.4");
+                       iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+                                                   nvm_chan[ch_idx], ch_flags);
                        continue;
                }
 
@@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                else
                        channel->flags = 0;
 
-               IWL_DEBUG_EEPROM(dev,
-                                "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
-                                channel->hw_value,
-                                is_5ghz ? "5.2" : "2.4",
-                                ch_flags,
-                                CHECK_AND_PRINT_I(VALID),
-                                CHECK_AND_PRINT_I(IBSS),
-                                CHECK_AND_PRINT_I(ACTIVE),
-                                CHECK_AND_PRINT_I(RADAR),
-                                CHECK_AND_PRINT_I(INDOOR_ONLY),
-                                CHECK_AND_PRINT_I(GO_CONCURRENT),
-                                CHECK_AND_PRINT_I(UNIFORM),
-                                CHECK_AND_PRINT_I(20MHZ),
-                                CHECK_AND_PRINT_I(40MHZ),
-                                CHECK_AND_PRINT_I(80MHZ),
-                                CHECK_AND_PRINT_I(160MHZ),
-                                CHECK_AND_PRINT_I(DC_HIGH),
-                                channel->max_power,
-                                ((ch_flags & NVM_CHANNEL_IBSS) &&
-                                 !(ch_flags & NVM_CHANNEL_RADAR))
-                                       ? "" : "not ");
+               iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+                                           channel->hw_value, ch_flags);
+               IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
+                                channel->hw_value, channel->max_power);
        }
 
        return n_channels;
@@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
 static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
                       const __le16 *phy_sku)
 {
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + SKU);
 
        return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
@@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
 
 static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 {
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + NVM_VERSION);
        else
                return le32_to_cpup((__le32 *)(nvm_sw +
@@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
                             const __le16 *phy_sku)
 {
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
 
        return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
@@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
 {
        int n_hw_addr;
 
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                return le16_to_cpup(nvm_sw + N_HW_ADDRS);
 
        n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
@@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
                              struct iwl_nvm_data *data,
                              u32 radio_cfg)
 {
-       if (!cfg->ext_nvm) {
+       if (cfg->nvm_type != IWL_NVM_EXT) {
                data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
                data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
                data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
@@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
 {
        if (cfg->mac_addr_from_csr) {
                iwl_set_hw_address_from_csr(trans, data);
-       } else if (!cfg->ext_nvm) {
+       } else if (cfg->nvm_type != IWL_NVM_EXT) {
                const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
 
                /* The byte order is little endian 16 bit, meaning 214365 */
@@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        u16 lar_config;
        const __le16 *ch_section;
 
-       if (!cfg->ext_nvm)
+       if (cfg->nvm_type != IWL_NVM_EXT)
                data = kzalloc(sizeof(*data) +
                               sizeof(struct ieee80211_channel) *
                               IWL_NUM_CHANNELS,
@@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
-       if (!cfg->ext_nvm) {
+       if (cfg->nvm_type != IWL_NVM_EXT) {
                /* Checking for required sections */
                if (!nvm_calib) {
                        IWL_ERR(trans,
@@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                        kfree(data);
                        return NULL;
                }
+
+               ch_section = cfg->nvm_type == IWL_NVM_SDP ?
+                            &regulatory[NVM_CHANNELS_SDP] :
+                            &nvm_sw[NVM_CHANNELS];
+
                /* in family 8000 Xtal calibration values moved to OTP */
                data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
                data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
                lar_enabled = true;
-               ch_section = &nvm_sw[NVM_CHANNELS];
        } else {
                u16 lar_offset = data->nvm_version < 0xE39 ?
                                 NVM_LAR_OFFSET_OLD :
@@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
        u32 flags = NL80211_RRF_NO_HT40;
        u32 last_5ghz_ht = LAST_5GHZ_HT;
 
-       if (cfg->ext_nvm)
+       if (cfg->nvm_type == IWL_NVM_EXT)
                last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
 
        if (ch_idx < NUM_2GHZ_CHANNELS &&
@@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        int ch_idx;
        u16 ch_flags;
        u32 reg_rule_flags, prev_reg_rule_flags = 0;
-       const u8 *nvm_chan = cfg->ext_nvm ?
+       const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd;
        int size_of_regd;
@@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        int center_freq, prev_center_freq = 0;
        int valid_rules = 0;
        bool new_rule;
-       int max_num_ch = cfg->ext_nvm ?
+       int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
                         IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
 
        if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
@@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                new_rule = false;
 
                if (!(ch_flags & NVM_CHANNEL_VALID)) {
-                       IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                                     "Ch. %d Flags %x [%sGHz] - No traffic\n",
-                                     nvm_chan[ch_idx],
-                                     ch_flags,
-                                     (ch_idx >= NUM_2GHZ_CHANNELS) ?
-                                     "5.2" : "2.4");
+                       iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+                                                   nvm_chan[ch_idx], ch_flags);
                        continue;
                }
 
@@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                prev_center_freq = center_freq;
                prev_reg_rule_flags = reg_rule_flags;
 
-               IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                             "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n",
-                             center_freq,
-                             band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
-                             CHECK_AND_PRINT_I(VALID),
-                             CHECK_AND_PRINT_I(IBSS),
-                             CHECK_AND_PRINT_I(ACTIVE),
-                             CHECK_AND_PRINT_I(RADAR),
-                             CHECK_AND_PRINT_I(INDOOR_ONLY),
-                             CHECK_AND_PRINT_I(GO_CONCURRENT),
-                             CHECK_AND_PRINT_I(UNIFORM),
-                             CHECK_AND_PRINT_I(20MHZ),
-                             CHECK_AND_PRINT_I(40MHZ),
-                             CHECK_AND_PRINT_I(80MHZ),
-                             CHECK_AND_PRINT_I(160MHZ),
-                             CHECK_AND_PRINT_I(DC_HIGH),
-                             ch_flags);
-               IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                             "Ch. %d [%sGHz] reg_flags 0x%x: %s\n",
-                             center_freq,
-                             band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
-                             reg_rule_flags,
-                             ((ch_flags & NVM_CHANNEL_ACTIVE) &&
-                              !(ch_flags & NVM_CHANNEL_RADAR))
-                                        ? "Ad-Hoc" : "");
+               iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+                                           nvm_chan[ch_idx], ch_flags);
        }
 
        regd->n_reg_rules = valid_rules;
index 3bcaa82f59b2d2f91a8c8f9159e056642475c577..a9ac872226fdf79d87d6bc8b9643d9d6d86cd793 100644 (file)
@@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
        mvm->vif_count = 0;
        mvm->rx_ba_sessions = 0;
        mvm->fwrt.dump.conf = FW_DBG_INVALID;
+       mvm->monitor_on = false;
 
        /* keep statistics ticking */
        iwl_mvm_accu_radio_stats(mvm);
@@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                mvm->p2p_device_vif = vif;
        }
 
+       if (vif->type == NL80211_IFTYPE_MONITOR)
+               mvm->monitor_on = true;
+
        iwl_mvm_vif_dbgfs_register(mvm, vif);
        goto out_unlock;
 
@@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
        iwl_mvm_power_update_mac(mvm);
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
+       if (vif->type == NL80211_IFTYPE_MONITOR)
+               mvm->monitor_on = false;
+
 out_release:
        mutex_unlock(&mvm->mutex);
 }
index 83303bac0e4babf6b1d3d41d1f0fc167b80c871e..949e6341829908a8e8cc684ac616121dfd267428 100644 (file)
@@ -1015,6 +1015,9 @@ struct iwl_mvm {
        bool drop_bcn_ap_mode;
 
        struct delayed_work cs_tx_unblock_dwork;
+
+       /* does a monitor vif exist (only one can exist hence bool) */
+       bool monitor_on;
 #ifdef CONFIG_ACPI
        struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
        struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
@@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
         * Enable LAR only if it is supported by the FW (TLV) &&
         * enabled in the NVM
         */
-       if (mvm->cfg->ext_nvm)
+       if (mvm->cfg->nvm_type == IWL_NVM_EXT)
                return nvm_lar && tlv_lar;
        else
                return tlv_lar;
index 422aa6be99328b2f7dfe66db69c724bbe3824291..fb25b6f29323888b46789374077b4d4c1895ba05 100644 (file)
@@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        const __be16 *hw;
        const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
        bool lar_enabled;
+       int regulatory_type;
 
        /* Checking for required sections */
-       if (!mvm->trans->cfg->ext_nvm) {
+       if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
                    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
                        IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
                        return NULL;
                }
        } else {
+               if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
+                       regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
+               else
+                       regulatory_type = NVM_SECTION_TYPE_REGULATORY;
+
                /* SW and REGULATORY sections are mandatory */
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
-                   !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
+                   !mvm->nvm_sections[regulatory_type].data) {
                        IWL_ERR(mvm,
                                "Can't parse empty family 8000 OTP/NVM sections\n");
                        return NULL;
@@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
        calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
-       regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
        mac_override =
                (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
        phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
 
+       regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
+               (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
+               (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
        lar_enabled = !iwlwifi_mod_params.lar_disable &&
                      fw_has_capa(&mvm->fw->ucode_capa,
                                  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
@@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
        IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
 
        /* Maximal size depends on NVM version */
-       if (!mvm->trans->cfg->ext_nvm)
+       if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
                max_section_size = IWL_MAX_NVM_SECTION_SIZE;
        else
                max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
@@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
                        break;
                }
 
-               if (!mvm->trans->cfg->ext_nvm) {
+               if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
                        section_size =
                                2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
                        section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
@@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
        struct ieee80211_regdomain *regd;
        char mcc[3];
 
-       if (mvm->cfg->ext_nvm) {
+       if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
                tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
                                      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
                nvm_lar = mvm->nvm_data->lar_enabled;
index 184c749766f29455bc57bf71ecb84b905a6354a7..2d14a58cbdd7e1cc2e7eaeb20eebc7242e0e5993 100644 (file)
@@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
                return 0;
 
        default:
-               IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+               /* Expected in monitor (not having the keys) */
+               if (!mvm->monitor_on)
+                       IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
        }
 
        return 0;
index 77f77bc5d0834776947ac2fb5b1b6a13a44796f5..248699c2c4bff0981a680eb4e161ffd5da261795 100644 (file)
@@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                stats->flag |= RX_FLAG_DECRYPTED;
                return 0;
        default:
-               IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
+               /* Expected in monitor (not having the keys) */
+               if (!mvm->monitor_on)
+                       IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
        }
 
        return 0;
index 4d907f60bce9f22ba0acf6169023aacd79f9bb75..1232f63278eb64167263e55255cb95b54c27258d 100644 (file)
@@ -631,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
 
        if (!iwl_mvm_firmware_running(mvm) ||
            mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
-               ret = -EIO;
+               ret = -ENODATA;
                goto out;
        }
 
index 4f73012978e945869c6310d59996a2f7fc02b511..1d431d4bf6d26197516941d3de54ea1fffc9218b 100644 (file)
@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
        }
        if (0 == tmp) {
                read_addr = REG_DBI_RDATA + addr % 4;
-               ret = rtl_read_byte(rtlpriv, read_addr);
+               ret = rtl_read_word(rtlpriv, read_addr);
        }
        return ret;
 }
index ee8ed9da00ade809a70ac80800f9006edd8f535c..4491ca5aee906c1b29342bc12c692e175d857505 100644 (file)
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
 
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
 
-       dev->min_mtu = 0;
+       dev->min_mtu = ETH_MIN_MTU;
        dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
 
        /*
index 523387e71a8021b81beb46407132def2638fdb48..8b8689c6d8877863dddad9f9c10215d311f5fa6f 100644 (file)
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netdev->features |= netdev->hw_features;
 
        netdev->ethtool_ops = &xennet_ethtool_ops;
-       netdev->min_mtu = 0;
+       netdev->min_mtu = ETH_MIN_MTU;
        netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
index d94dd8b77abd5140d52c6dfbdff249409225cbec..98258583abb0b40529056767c91401296e0013d4 100644 (file)
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
        return -EINVAL;
 }
 
-static void of_mdiobus_register_phy(struct mii_bus *mdio,
+static int of_mdiobus_register_phy(struct mii_bus *mdio,
                                    struct device_node *child, u32 addr)
 {
        struct phy_device *phy;
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
        else
                phy = get_phy_device(mdio, addr, is_c45);
        if (IS_ERR(phy))
-               return;
+               return PTR_ERR(phy);
 
-       rc = irq_of_parse_and_map(child, 0);
+       rc = of_irq_get(child, 0);
+       if (rc == -EPROBE_DEFER) {
+               phy_device_free(phy);
+               return rc;
+       }
        if (rc > 0) {
                phy->irq = rc;
                mdio->irq[addr] = rc;
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
        if (rc) {
                phy_device_free(phy);
                of_node_put(child);
-               return;
+               return rc;
        }
 
        dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
                child->name, addr);
+       return 0;
 }
 
-static void of_mdiobus_register_device(struct mii_bus *mdio,
-                                      struct device_node *child, u32 addr)
+static int of_mdiobus_register_device(struct mii_bus *mdio,
+                                     struct device_node *child, u32 addr)
 {
        struct mdio_device *mdiodev;
        int rc;
 
        mdiodev = mdio_device_create(mdio, addr);
        if (IS_ERR(mdiodev))
-               return;
+               return PTR_ERR(mdiodev);
 
        /* Associate the OF node with the device structure so it
         * can be looked up later.
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
        if (rc) {
                mdio_device_free(mdiodev);
                of_node_put(child);
-               return;
+               return rc;
        }
 
        dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
                child->name, addr);
+       return 0;
 }
 
 /* The following is a list of PHY compatible strings which appear in
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                }
 
                if (of_mdiobus_child_is_phy(child))
-                       of_mdiobus_register_phy(mdio, child, addr);
+                       rc = of_mdiobus_register_phy(mdio, child, addr);
                else
-                       of_mdiobus_register_device(mdio, child, addr);
+                       rc = of_mdiobus_register_device(mdio, child, addr);
+               if (rc)
+                       goto unregister;
        }
 
        if (!scanphys)
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                        dev_info(&mdio->dev, "scan phy %s at address %i\n",
                                 child->name, addr);
 
-                       if (of_mdiobus_child_is_phy(child))
-                               of_mdiobus_register_phy(mdio, child, addr);
+                       if (of_mdiobus_child_is_phy(child)) {
+                               rc = of_mdiobus_register_phy(mdio, child, addr);
+                               if (rc)
+                                       goto unregister;
+                       }
                }
        }
 
        return 0;
+
+unregister:
+       mdiobus_unregister(mdio);
+       return rc;
 }
 EXPORT_SYMBOL(of_mdiobus_register);
 
index c60904ff40b830358a966b5121a9876a90d5ebe0..3907bbc9c6cf7eafd4210358c923423a8fd23a9c 100644 (file)
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
        struct socfpga_reset_data *data = container_of(rcdev,
                                                     struct socfpga_reset_data,
                                                     rcdev);
-       int bank = id / BITS_PER_LONG;
-       int offset = id % BITS_PER_LONG;
+       int reg_width = sizeof(u32);
+       int bank = id / (reg_width * BITS_PER_BYTE);
+       int offset = id % (reg_width * BITS_PER_BYTE);
        unsigned long flags;
        u32 reg;
 
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
                                                     struct socfpga_reset_data,
                                                     rcdev);
 
-       int bank = id / BITS_PER_LONG;
-       int offset = id % BITS_PER_LONG;
+       int reg_width = sizeof(u32);
+       int bank = id / (reg_width * BITS_PER_BYTE);
+       int offset = id % (reg_width * BITS_PER_BYTE);
        unsigned long flags;
        u32 reg;
 
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
 {
        struct socfpga_reset_data *data = container_of(rcdev,
                                                struct socfpga_reset_data, rcdev);
-       int bank = id / BITS_PER_LONG;
-       int offset = id % BITS_PER_LONG;
+       int reg_width = sizeof(u32);
+       int bank = id / (reg_width * BITS_PER_BYTE);
+       int offset = id % (reg_width * BITS_PER_BYTE);
        u32 reg;
 
        reg = readl(data->membase + (bank * BANK_INCREMENT));
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
        spin_lock_init(&data->lock);
 
        data->rcdev.owner = THIS_MODULE;
-       data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG;
+       data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
        data->rcdev.ops = &socfpga_reset_ops;
        data->rcdev.of_node = pdev->dev.of_node;
 
index 520325867e2b4c05528bd89a7eeaccea2f5c6f94..31d31aad3de1d3fd0f2ff58d2141cabddec474bf 100644 (file)
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work)
                                fc_rport_enter_flogi(rdata);
                                mutex_unlock(&rdata->rp_mutex);
                        } else {
+                               mutex_unlock(&rdata->rp_mutex);
                                FC_RPORT_DBG(rdata, "work delete\n");
                                mutex_lock(&lport->disc.disc_mutex);
                                list_del_rcu(&rdata->peers);
                                mutex_unlock(&lport->disc.disc_mutex);
-                               mutex_unlock(&rdata->rp_mutex);
                                kref_put(&rdata->kref, fc_rport_destroy);
                        }
                } else {
index c62e8d111fd9721e72374d1ed17db283f92564b6..f8dc1601efd5f1eb51b4d776087d6ea20534d09e 100644 (file)
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
 
        if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
                reason = FAILURE_SESSION_IN_RECOVERY;
-               sc->result = DID_REQUEUE;
+               sc->result = DID_REQUEUE << 16;
                goto fault;
        }
 
index 5b2437a5ea440a5d3a6836115bcf444595d8f7ef..937209805bafc9052a71aa20562188a2942c148d 100644 (file)
@@ -3175,6 +3175,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->can_queue, base_vha->req,
            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 
+       INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+
        if (ha->mqenable) {
                bool mq = false;
                bool startit = false;
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
         */
        qla2xxx_wake_dpc(base_vha);
 
-       INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
        INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
 
        if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
index bf53356f41f0639e7c0e70393d91337a2bed2880..f796bd61f3f06505069ad4e21fed202003f9df1c 100644 (file)
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget)
        spin_lock_irqsave(shost->host_lock, flags);
  restart:
        list_for_each_entry(sdev, &shost->__devices, siblings) {
+               /*
+                * We cannot call scsi_device_get() here, as
+                * we might've been called from rmmod() causing
+                * scsi_device_get() to fail the module_is_live()
+                * check.
+                */
                if (sdev->channel != starget->channel ||
                    sdev->id != starget->id ||
-                   scsi_device_get(sdev))
+                   !get_device(&sdev->sdev_gendev))
                        continue;
                spin_unlock_irqrestore(shost->host_lock, flags);
                scsi_remove_device(sdev);
-               scsi_device_put(sdev);
+               put_device(&sdev->sdev_gendev);
                spin_lock_irqsave(shost->host_lock, flags);
                goto restart;
        }
index cbd4495d0ff9dedb92852d029845418e1a173072..8c46a6d536af26a9e083bcf0a614a3eedd5374e6 100644 (file)
@@ -3320,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
 {
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 
+       if (WARN_ON_ONCE(!rport))
+               return FAST_IO_FAIL;
+
        return fc_block_rport(rport);
 }
 EXPORT_SYMBOL(fc_block_scsi_eh);
index d96f4512224ffb11fd13900422d6da1498ab8697..b55e5ebba8b4194df4392e259062e9b483be491d 100644 (file)
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
                                        struct media_link, list);
                ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
                if (ret)
-                       break;
+                       return ret;
        }
 
-       return ret;
+       return 0;
 }
 
 /* async subdev complete notifier */
index 35a128acfbd157b302386f9890d2a1de160c7be7..161694b660385a3ecedd758c964c1690d3e39386 100644 (file)
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb,
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
 #endif
-       sb->s_flags |= MS_I_VERSION;
+       sb->s_flags |= SB_I_VERSION;
        sb->s_iflags |= SB_I_CGROUPWB;
 
        err = super_setup_bdi(sb);
index 018c588c7ac3b7ac8fd78b4092332f771c6411c0..8e704d12a1cf2781087ec14d6ef6561ea9dd3236 100644 (file)
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
                goto out;
        }
        ukp = user_key_payload_locked(keyring_key);
+       if (!ukp) {
+               /* key was revoked before we acquired its semaphore */
+               res = -EKEYREVOKED;
+               goto out;
+       }
        if (ukp->datalen != sizeof(struct fscrypt_key)) {
                res = -EINVAL;
                goto out;
index 96415c65bbdc102f6a5dff696231e944d4fdccdb..b53e66d9abd7030f6b05a6dac4847928c24bf1a0 100644 (file)
  */
 #define DIO_PAGES      64
 
+/*
+ * Flags for dio_complete()
+ */
+#define DIO_COMPLETE_ASYNC             0x01    /* This is async IO */
+#define DIO_COMPLETE_INVALIDATE                0x02    /* Can invalidate pages */
+
 /*
  * This code generally works in units of "dio_blocks".  A dio_block is
  * somewhere between the hard sector size and the filesystem block size.  it
@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio,
  * filesystems can use it to hold additional state between get_block calls and
  * dio_complete.
  */
-static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
+static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
 {
        loff_t offset = dio->iocb->ki_pos;
        ssize_t transferred = 0;
@@ -259,14 +265,27 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
        if (ret == 0)
                ret = transferred;
 
+       if (dio->end_io) {
+               // XXX: ki_pos??
+               err = dio->end_io(dio->iocb, offset, ret, dio->private);
+               if (err)
+                       ret = err;
+       }
+
        /*
         * Try again to invalidate clean pages which might have been cached by
         * non-direct readahead, or faulted in by get_user_pages() if the source
         * of the write was an mmap'ed region of the file we're writing.  Either
         * one is a pretty crazy thing to do, so we don't support it 100%.  If
         * this invalidation fails, tough, the write still worked...
+        *
+        * And this page cache invalidation has to be after dio->end_io(), as
+        * some filesystems convert unwritten extents to real allocations in
+        * end_io() when necessary, otherwise a racing buffer read would cache
+        * zeros from unwritten extents.
         */
-       if (ret > 0 && dio->op == REQ_OP_WRITE &&
+       if (flags & DIO_COMPLETE_INVALIDATE &&
+           ret > 0 && dio->op == REQ_OP_WRITE &&
            dio->inode->i_mapping->nrpages) {
                err = invalidate_inode_pages2_range(dio->inode->i_mapping,
                                        offset >> PAGE_SHIFT,
@@ -274,18 +293,10 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
                WARN_ON_ONCE(err);
        }
 
-       if (dio->end_io) {
-
-               // XXX: ki_pos??
-               err = dio->end_io(dio->iocb, offset, ret, dio->private);
-               if (err)
-                       ret = err;
-       }
-
        if (!(dio->flags & DIO_SKIP_DIO_COUNT))
                inode_dio_end(dio->inode);
 
-       if (is_async) {
+       if (flags & DIO_COMPLETE_ASYNC) {
                /*
                 * generic_write_sync expects ki_pos to have been updated
                 * already, but the submission path only does this for
@@ -306,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work)
 {
        struct dio *dio = container_of(work, struct dio, complete_work);
 
-       dio_complete(dio, 0, true);
+       dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
 }
 
 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -348,7 +359,7 @@ static void dio_bio_end_aio(struct bio *bio)
                        queue_work(dio->inode->i_sb->s_dio_done_wq,
                                   &dio->complete_work);
                } else {
-                       dio_complete(dio, 0, true);
+                       dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
                }
        }
 }
@@ -1360,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                dio_await_completion(dio);
 
        if (drop_refcount(dio) == 0) {
-               retval = dio_complete(dio, retval, false);
+               retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
        } else
                BUG_ON(retval != -EIOCBQUEUED);
 
index 9c351bf757b20e037f39aeadf0fa0ed12f963db6..3fbc0ff79699dde5cca295262591b2cb5aafcced 100644 (file)
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
 static inline struct ecryptfs_auth_tok *
 ecryptfs_get_encrypted_key_payload_data(struct key *key)
 {
-       if (key->type == &key_type_encrypted)
-               return (struct ecryptfs_auth_tok *)
-                       (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
-       else
+       struct encrypted_key_payload *payload;
+
+       if (key->type != &key_type_encrypted)
                return NULL;
+
+       payload = key->payload.data[0];
+       if (!payload)
+               return ERR_PTR(-EKEYREVOKED);
+
+       return (struct ecryptfs_auth_tok *)payload->payload_data;
 }
 
 static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
 ecryptfs_get_key_payload_data(struct key *key)
 {
        struct ecryptfs_auth_tok *auth_tok;
+       struct user_key_payload *ukp;
 
        auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
-       if (!auth_tok)
-               return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
-       else
+       if (auth_tok)
                return auth_tok;
+
+       ukp = user_key_payload_locked(key);
+       if (!ukp)
+               return ERR_PTR(-EKEYREVOKED);
+
+       return (struct ecryptfs_auth_tok *)ukp->data;
 }
 
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
index 3cf1546dca8257677572db866f86e3f2190a918e..fa218cd64f746d2c924a786edc1c6052911c1732 100644 (file)
@@ -459,7 +459,8 @@ out:
  * @auth_tok_key: key containing the authentication token
  * @auth_tok: authentication token
  *
- * Returns zero on valid auth tok; -EINVAL otherwise
+ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
+ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
  */
 static int
 ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
        int rc = 0;
 
        (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+       if (IS_ERR(*auth_tok)) {
+               rc = PTR_ERR(*auth_tok);
+               *auth_tok = NULL;
+               goto out;
+       }
+
        if (ecryptfs_verify_version((*auth_tok)->version)) {
                printk(KERN_ERR "Data structure version mismatch. Userspace "
                       "tools must match eCryptfs kernel module with major "
index 5470d3c1892af7101dbb4c13d085d4000c558746..3e14ba25f678bf8869e005a34dc9742e117e90df 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename,
        /* execve succeeded */
        current->fs->in_exec = 0;
        current->in_execve = 0;
+       membarrier_execve(current);
        acct_update_integrals(current);
        task_numa_free(current);
        free_bprm(bprm);
index b104096fce9eeec4e6079d3d1b05dd8b83fe796b..b0915b734a3817a811b62b41f677f7a1a6228c9f 100644 (file)
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
                return 1;
        case Opt_i_version:
-               sb->s_flags |= MS_I_VERSION;
+               sb->s_flags |= SB_I_VERSION;
                return 1;
        case Opt_lazytime:
                sb->s_flags |= MS_LAZYTIME;
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
                SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
        if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
                SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
-       if (sb->s_flags & MS_I_VERSION)
+       if (sb->s_flags & SB_I_VERSION)
                SEQ_OPTS_PUTS("i_version");
        if (nodefs || sbi->s_stripe)
                SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
index b5ab06fabc60a3bd0a7a308f45e4cb5b850398c1..0438d4cd91ef74ee98f8990936d07c3bf8a67b30 100644 (file)
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
        rcu_read_lock();
 
        confkey = user_key_payload_rcu(key);
+       if (!confkey) {
+               /* key was revoked */
+               rcu_read_unlock();
+               key_put(key);
+               goto no_config;
+       }
+
        buf = confkey->data;
 
        for (len = confkey->datalen - 1; len >= 0; len--) {
index 65c88379a3a14311cca68b8750d6bb0b9f107444..94a745acaef842eed799e45731bd12dd2ac0f7f7 100644 (file)
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        if (sb->s_flags & MS_MANDLOCK)
                goto err;
 
-       sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
+       sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
 
        if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
index be61cf742b5ed1d0d201ff415e9355fa2684508c..d4801f8dd4fd55a111e647a810c3e0788829897d 100644 (file)
@@ -714,23 +714,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
 {
        struct kiocb *iocb = dio->iocb;
        struct inode *inode = file_inode(iocb->ki_filp);
+       loff_t offset = iocb->ki_pos;
        ssize_t ret;
 
-       /*
-        * Try again to invalidate clean pages which might have been cached by
-        * non-direct readahead, or faulted in by get_user_pages() if the source
-        * of the write was an mmap'ed region of the file we're writing.  Either
-        * one is a pretty crazy thing to do, so we don't support it 100%.  If
-        * this invalidation fails, tough, the write still worked...
-        */
-       if (!dio->error &&
-           (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
-               ret = invalidate_inode_pages2_range(inode->i_mapping,
-                               iocb->ki_pos >> PAGE_SHIFT,
-                               (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT);
-               WARN_ON_ONCE(ret);
-       }
-
        if (dio->end_io) {
                ret = dio->end_io(iocb,
                                dio->error ? dio->error : dio->size,
@@ -742,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
        if (likely(!ret)) {
                ret = dio->size;
                /* check for short read */
-               if (iocb->ki_pos + ret > dio->i_size &&
+               if (offset + ret > dio->i_size &&
                    !(dio->flags & IOMAP_DIO_WRITE))
-                       ret = dio->i_size - iocb->ki_pos;
+                       ret = dio->i_size - offset;
                iocb->ki_pos += ret;
        }
 
+       /*
+        * Try again to invalidate clean pages which might have been cached by
+        * non-direct readahead, or faulted in by get_user_pages() if the source
+        * of the write was an mmap'ed region of the file we're writing.  Either
+        * one is a pretty crazy thing to do, so we don't support it 100%.  If
+        * this invalidation fails, tough, the write still worked...
+        *
+        * And this page cache invalidation has to be after dio->end_io(), as
+        * some filesystems convert unwritten extents to real allocations in
+        * end_io() when necessary, otherwise a racing buffer read would cache
+        * zeros from unwritten extents.
+        */
+       if (!dio->error &&
+           (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
+               int err;
+               err = invalidate_inode_pages2_range(inode->i_mapping,
+                               offset >> PAGE_SHIFT,
+                               (offset + dio->size - 1) >> PAGE_SHIFT);
+               WARN_ON_ONCE(err);
+       }
+
        inode_dio_end(file_inode(iocb->ki_filp));
        kfree(dio);
 
index 3b601f115b6cb72a9f02a82f5e8a41c458729b4b..d18deb4c410b24ed276c9b60c869f4c06b6ec20f 100644 (file)
@@ -2825,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                            SB_MANDLOCK |
                            SB_DIRSYNC |
                            SB_SILENT |
-                           SB_POSIXACL);
+                           SB_POSIXACL |
+                           SB_I_VERSION);
 
        if (flags & MS_REMOUNT)
                retval = do_remount(&path, flags, sb_flags, mnt_flags,
index def32fa1c2253ee232d29a5d1cc9d6ebd4511728..89263797cf3253d905e1e1010625243a0ee9bf23 100644 (file)
@@ -3852,6 +3852,17 @@ xfs_trim_extent(
        }
 }
 
+/* trim extent to within eof */
+void
+xfs_trim_extent_eof(
+       struct xfs_bmbt_irec    *irec,
+       struct xfs_inode        *ip)
+
+{
+       xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
+                                             i_size_read(VFS_I(ip))));
+}
+
 /*
  * Trim the returned map to the required bounds
  */
index 851982a5dfbc54b347d5836898264f56b3b4f957..502e0d8fb4ff63e125328d31434e3ee1eaa39cbd 100644 (file)
@@ -208,6 +208,7 @@ void        xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
 
 void   xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
                xfs_filblks_t len);
+void   xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
 int    xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
 void   xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void   xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
index f18e5932aec4bfe5f1e1b3fb0d69d1b32c540b16..a3eeaba156c5ab8d7d34e6b4f217423c452b73dc 100644 (file)
@@ -446,6 +446,19 @@ xfs_imap_valid(
 {
        offset >>= inode->i_blkbits;
 
+       /*
+        * We have to make sure the cached mapping is within EOF to protect
+        * against eofblocks trimming on file release leaving us with a stale
+        * mapping. Otherwise, a page for a subsequent file extending buffered
+        * write could get picked up by this writeback cycle and written to the
+        * wrong blocks.
+        *
+        * Note that what we really want here is a generic mapping invalidation
+        * mechanism to protect us from arbitrary extent modifying contexts, not
+        * just eofblocks.
+        */
+       xfs_trim_extent_eof(imap, XFS_I(inode));
+
        return offset >= imap->br_startoff &&
                offset < imap->br_startoff + imap->br_blockcount;
 }
@@ -735,6 +748,14 @@ xfs_vm_invalidatepage(
 {
        trace_xfs_invalidatepage(page->mapping->host, page, offset,
                                 length);
+
+       /*
+        * If we are invalidating the entire page, clear the dirty state from it
+        * so that we can check for attempts to release dirty cached pages in
+        * xfs_vm_releasepage().
+        */
+       if (offset == 0 && length >= PAGE_SIZE)
+               cancel_dirty_page(page);
        block_invalidatepage(page, offset, length);
 }
 
@@ -1190,25 +1211,27 @@ xfs_vm_releasepage(
         * mm accommodates an old ext3 case where clean pages might not have had
         * the dirty bit cleared. Thus, it can send actual dirty pages to
         * ->releasepage() via shrink_active_list(). Conversely,
-        * block_invalidatepage() can send pages that are still marked dirty
-        * but otherwise have invalidated buffers.
+        * block_invalidatepage() can send pages that are still marked dirty but
+        * otherwise have invalidated buffers.
         *
         * We want to release the latter to avoid unnecessary buildup of the
-        * LRU, skip the former and warn if we've left any lingering
-        * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
-        * or unwritten buffers and warn if the page is not dirty. Otherwise
-        * try to release the buffers.
+        * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
+        * that are entirely invalidated and need to be released.  Hence the
+        * only time we should get dirty pages here is through
+        * shrink_active_list() and so we can simply skip those now.
+        *
+        * warn if we've left any lingering delalloc/unwritten buffers on clean
+        * or invalidated pages we are about to release.
         */
+       if (PageDirty(page))
+               return 0;
+
        xfs_count_page_state(page, &delalloc, &unwritten);
 
-       if (delalloc) {
-               WARN_ON_ONCE(!PageDirty(page));
+       if (WARN_ON_ONCE(delalloc))
                return 0;
-       }
-       if (unwritten) {
-               WARN_ON_ONCE(!PageDirty(page));
+       if (WARN_ON_ONCE(unwritten))
                return 0;
-       }
 
        return try_to_free_buffers(page);
 }
index 560e0b40ac1b10bdb505885900a9d1d95c8f65eb..43cfc07996a43ed1779e7ce6249df2113d87dc28 100644 (file)
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
        return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
 }
 
-/* Transform a rtbitmap "record" into a fsmap */
-STATIC int
-xfs_getfsmap_rtdev_rtbitmap_helper(
-       struct xfs_trans                *tp,
-       struct xfs_rtalloc_rec          *rec,
-       void                            *priv)
-{
-       struct xfs_mount                *mp = tp->t_mountp;
-       struct xfs_getfsmap_info        *info = priv;
-       struct xfs_rmap_irec            irec;
-       xfs_daddr_t                     rec_daddr;
-
-       rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
-
-       irec.rm_startblock = rec->ar_startblock;
-       irec.rm_blockcount = rec->ar_blockcount;
-       irec.rm_owner = XFS_RMAP_OWN_NULL;      /* "free" */
-       irec.rm_offset = 0;
-       irec.rm_flags = 0;
-
-       return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
-}
-
 /* Transform a bnobt irec into a fsmap */
 STATIC int
 xfs_getfsmap_datadev_bnobt_helper(
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
        return xfs_getfsmap_helper(tp, info, &rmap, 0);
 }
 
+#ifdef CONFIG_XFS_RT
+/* Transform a rtbitmap "record" into a fsmap */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap_helper(
+       struct xfs_trans                *tp,
+       struct xfs_rtalloc_rec          *rec,
+       void                            *priv)
+{
+       struct xfs_mount                *mp = tp->t_mountp;
+       struct xfs_getfsmap_info        *info = priv;
+       struct xfs_rmap_irec            irec;
+       xfs_daddr_t                     rec_daddr;
+
+       rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
+
+       irec.rm_startblock = rec->ar_startblock;
+       irec.rm_blockcount = rec->ar_blockcount;
+       irec.rm_owner = XFS_RMAP_OWN_NULL;      /* "free" */
+       irec.rm_offset = 0;
+       irec.rm_flags = 0;
+
+       return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
+}
+
 /* Execute a getfsmap query against the realtime device. */
 STATIC int
 __xfs_getfsmap_rtdev(
@@ -521,7 +522,6 @@ __xfs_getfsmap_rtdev(
        return query_fn(tp, info);
 }
 
-#ifdef CONFIG_XFS_RT
 /* Actually query the realtime bitmap. */
 STATIC int
 xfs_getfsmap_rtdev_rtbitmap_query(
index 584cf2d573babe6b8331b856e617c4e6f08b5a53..f663022353c0d98b681e51fe8578096d0fbf57bf 100644 (file)
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super(
 
        /* version 5 superblocks support inode version counters. */
        if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
-               sb->s_flags |= MS_I_VERSION;
+               sb->s_flags |= SB_I_VERSION;
 
        if (mp->m_flags & XFS_MOUNT_DAX) {
                xfs_warn(mp,
index d29e58fde364f01168c059f85faac389622a3fc3..818a0b26249ea2a836638204226cd73b106e7ccb 100644 (file)
@@ -728,7 +728,7 @@ void xdp_do_flush_map(void);
 void bpf_warn_invalid_xdp_action(u32 act);
 void bpf_warn_invalid_xdp_redirect(u32 ifindex);
 
-struct sock *do_sk_redirect_map(void);
+struct sock *do_sk_redirect_map(struct sk_buff *skb);
 
 #ifdef CONFIG_BPF_JIT
 extern int bpf_jit_enable;
index fb5e23c7ed98894e2833f2143e35cd5f703a9b09..7c7516eb7d76c64071e51c199fdb0fd0f4e3c934 100644 (file)
@@ -234,6 +234,10 @@ struct input_dev {
 #error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
 #endif
 
+#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
+#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
+#endif
+
 #define INPUT_DEVICE_ID_MATCH_DEVICE \
        (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
 #define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
 int input_set_keycode(struct input_dev *dev,
                      const struct input_keymap_entry *ke);
 
+bool input_match_device_id(const struct input_dev *dev,
+                          const struct input_device_id *id);
+
 void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
 
 extern struct class input_class;
index d4728bf6a537c802073592fcd6e440ccc628509e..5ad10948ea95241ed1ce4afe1d1da89b73ea2a39 100644 (file)
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
 void irq_gc_unmask_enable_reg(struct irq_data *d);
 void irq_gc_ack_set_bit(struct irq_data *d);
 void irq_gc_ack_clr_bit(struct irq_data *d);
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
 void irq_gc_eoi(struct irq_data *d);
 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
 
index 1ea576c8126f8b8c467f1ea3ab2623826efde0af..14b74f22d43c147f2bc8eec9ef82e2a1461a6643 100644 (file)
 #define GITS_BASER_ENTRY_SIZE_SHIFT            (48)
 #define GITS_BASER_ENTRY_SIZE(r)       ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
 #define GITS_BASER_ENTRY_SIZE_MASK     GENMASK_ULL(52, 48)
+#define GITS_BASER_PHYS_52_to_48(phys)                                 \
+       (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
 #define GITS_BASER_SHAREABILITY_SHIFT  (10)
 #define GITS_BASER_InnerShareable                                      \
        GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
index e315e16b6ff8a0aaade2552ee404d35e27da5570..8a15cabe928d0ee282742f0c44be26e7af01851d 100644 (file)
@@ -138,6 +138,11 @@ struct key_restriction {
        struct key_type *keytype;
 };
 
+enum key_state {
+       KEY_IS_UNINSTANTIATED,
+       KEY_IS_POSITIVE,                /* Positively instantiated */
+};
+
 /*****************************************************************************/
 /*
  * authentication token / access credential / keyring
@@ -169,6 +174,7 @@ struct key {
                                                 * - may not match RCU dereferenced payload
                                                 * - payload should contain own length
                                                 */
+       short                   state;          /* Key state (+) or rejection error (-) */
 
 #ifdef KEY_DEBUGGING
        unsigned                magic;
@@ -176,18 +182,16 @@ struct key {
 #endif
 
        unsigned long           flags;          /* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED  0       /* set if key has been instantiated */
-#define KEY_FLAG_DEAD          1       /* set if key type has been deleted */
-#define KEY_FLAG_REVOKED       2       /* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA      3       /* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT        4       /* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE      5       /* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR        6       /* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED   7       /* set if key has been invalidated */
-#define KEY_FLAG_BUILTIN       8       /* set if key is built in to the kernel */
-#define KEY_FLAG_ROOT_CAN_INVAL        9       /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP          10      /* set if key should not be removed */
-#define KEY_FLAG_UID_KEYRING   11      /* set if key is a user or user session keyring */
+#define KEY_FLAG_DEAD          0       /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED       1       /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA      2       /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT        3       /* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR        4       /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED   5       /* set if key has been invalidated */
+#define KEY_FLAG_BUILTIN       6       /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL        7       /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP          8       /* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING   9       /* set if key is a user or user session keyring */
 
        /* the key type and key description string
         * - the desc is used to match a key against search criteria
@@ -213,7 +217,6 @@ struct key {
                        struct list_head name_link;
                        struct assoc_array keys;
                };
-               int reject_error;
        };
 
        /* This is set on a keyring to restrict the addition of a link to a key
@@ -353,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
 #define        KEY_NEED_SETATTR 0x20   /* Require permission to change attributes */
 #define        KEY_NEED_ALL    0x3f    /* All the above permissions */
 
+static inline short key_read_state(const struct key *key)
+{
+       /* Barrier versus mark_key_instantiated(). */
+       return smp_load_acquire(&key->state);
+}
+
 /**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
  * @key: The key to check.
  *
  * Return true if the specified key has been positively instantiated, false
  * otherwise.
  */
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
+{
+       return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
 {
-       return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
-               !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+       return key_read_state(key) < 0;
 }
 
 #define dereference_key_rcu(KEY)                                       \
index 0d3f14fd26217fcb2a472c411c2e9ee73a6c6c85..4773145246ed264f47e2a1c885dc7bd36654ddd3 100644 (file)
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
        struct mbus_dram_window {
                u8      cs_index;
                u8      mbus_attr;
-               u32     base;
-               u32     size;
+               u64     base;
+               u64     size;
        } cs[4];
 };
 
index 46f4ecf5479adbb2829c26e63d5e56b20e779312..1861ea8dba775989dcf298cb77d92ef9429fa221 100644 (file)
@@ -445,6 +445,9 @@ struct mm_struct {
        unsigned long flags; /* Must use atomic bitops to access the bits */
 
        struct core_state *core_state; /* coredumping support */
+#ifdef CONFIG_MEMBARRIER
+       atomic_t membarrier_state;
+#endif
 #ifdef CONFIG_AIO
        spinlock_t                      ioctx_lock;
        struct kioctx_table __rcu       *ioctx_table;
index 694cebb50f72c7d1cb404e26416e5400545c114c..2657f9f51536c369188fb13e6c7943234bd272f3 100644 (file)
@@ -293,6 +293,7 @@ struct pcmcia_device_id {
 #define INPUT_DEVICE_ID_SND_MAX                0x07
 #define INPUT_DEVICE_ID_FF_MAX         0x7f
 #define INPUT_DEVICE_ID_SW_MAX         0x0f
+#define INPUT_DEVICE_ID_PROP_MAX       0x1f
 
 #define INPUT_DEVICE_ID_MATCH_BUS      1
 #define INPUT_DEVICE_ID_MATCH_VENDOR   2
@@ -308,6 +309,7 @@ struct pcmcia_device_id {
 #define INPUT_DEVICE_ID_MATCH_SNDBIT   0x0400
 #define INPUT_DEVICE_ID_MATCH_FFBIT    0x0800
 #define INPUT_DEVICE_ID_MATCH_SWBIT    0x1000
+#define INPUT_DEVICE_ID_MATCH_PROPBIT  0x2000
 
 struct input_device_id {
 
@@ -327,6 +329,7 @@ struct input_device_id {
        kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
        kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
        kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
+       kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
 
        kernel_ulong_t driver_info;
 };
index f535779d9dc1dfe36934c2abba4e43d053ac5d6f..2eaac7d75af4f1bbdaf876acc55b4bd0d37a7f36 100644 (file)
@@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
                                    unsigned char name_assign_type,
                                    void (*setup)(struct net_device *),
                                    unsigned int txqs, unsigned int rxqs);
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+                      const char *name);
+
 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
        alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
 
index b1fd8bf85fdc430eaaa2195cd6dc18417bb64585..2bea1d5e99302bd1b440d595f9df7ab30531e228 100644 (file)
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 #define list_entry_rcu(ptr, type, member) \
        container_of(lockless_dereference(ptr), type, member)
 
-/**
+/*
  * Where are list_empty_rcu() and list_first_entry_rcu()?
  *
  * Implementing those functions following their counterparts list_empty() and
index de50d8a4cf414121a9f83e50df7738a5f530494a..1a9f70d44af954ffe790dcb75872704beab152b2 100644 (file)
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  * Return the value of the specified RCU-protected pointer, but omit
  * both the smp_read_barrier_depends() and the READ_ONCE().  This
  * is useful in cases where update-side locks prevent the value of the
- * pointer from changing.  Please note that this primitive does -not-
+ * pointer from changing.  Please note that this primitive does *not*
  * prevent the compiler from repeating this reference or combining it
  * with other references, so it should not be used without protection
  * of appropriate locks.
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  * is handed off from RCU to some other synchronization mechanism, for
  * example, reference counting or locking.  In C11, it would map to
  * kill_dependency().  It could be used as follows:
- *
+ * ``
  *     rcu_read_lock();
  *     p = rcu_dereference(gp);
  *     long_lived = is_long_lived(p);
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  *                     p = rcu_pointer_handoff(p);
  *     }
  *     rcu_read_unlock();
+ *``
  */
 #define rcu_pointer_handoff(p) (p)
 
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 
 /**
  * RCU_INIT_POINTER() - initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
  *
  * Initialize an RCU-protected pointer in special cases where readers
  * do not need ordering constraints on the CPU or the compiler.  These
  * special cases are:
  *
- * 1.  This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
+ * 1.  This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
  * 2.  The caller has taken whatever steps are required to prevent
- *     RCU readers from concurrently accessing this pointer -or-
+ *     RCU readers from concurrently accessing this pointer *or*
  * 3.  The referenced data structure has already been exposed to
- *     readers either at compile time or via rcu_assign_pointer() -and-
- *     a.      You have not made -any- reader-visible changes to
- *             this structure since then -or-
+ *     readers either at compile time or via rcu_assign_pointer() *and*
+ *
+ *     a.      You have not made *any* reader-visible changes to
+ *             this structure since then *or*
  *     b.      It is OK for readers accessing this structure from its
  *             new location to see the old state of the structure.  (For
  *             example, the changes were to statistical counters or to
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
  * by a single external-to-structure RCU-protected pointer, then you may
  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
  * pointers, but you must use rcu_assign_pointer() to initialize the
- * external-to-structure pointer -after- you have completely initialized
+ * external-to-structure pointer *after* you have completely initialized
  * the reader-accessible portions of the linked structure.
  *
  * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 
 /**
  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
  *
  * GCC-style initialization for an RCU-protected pointer in a structure field.
  */
index ae53e413fb1311ddc1a9c826e1634fa86f083599..ab9bf7b739545fc3526d94fcd7fa7cd3d6b11054 100644 (file)
@@ -211,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
        current->flags = (current->flags & ~PF_MEMALLOC) | flags;
 }
 
+#ifdef CONFIG_MEMBARRIER
+enum {
+       MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY        = (1U << 0),
+       MEMBARRIER_STATE_SWITCH_MM                      = (1U << 1),
+};
+
+static inline void membarrier_execve(struct task_struct *t)
+{
+       atomic_set(&t->mm->membarrier_state, 0);
+}
+#else
+static inline void membarrier_execve(struct task_struct *t)
+{
+}
+#endif
+
 #endif /* _LINUX_SCHED_MM_H */
index 39af9bc0f653ec97f739b2e803385cba669d1aa7..62be8966e8370535ac6cb9b15b7c154e23381dda 100644 (file)
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
 
 /**
  * srcu_read_lock_held - might we be in SRCU read-side critical section?
+ * @sp: The srcu_struct structure to check
  *
  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
index aa95053dfc78d35d04aef276e2a5dce7343f72a0..425752f768d2f1a0efb13964204e07f27609e9db 100644 (file)
@@ -96,7 +96,7 @@ struct inet_request_sock {
        kmemcheck_bitfield_end(flags);
        u32                     ir_mark;
        union {
-               struct ip_options_rcu   *opt;
+               struct ip_options_rcu __rcu     *ireq_opt;
 #if IS_ENABLED(CONFIG_IPV6)
                struct {
                        struct ipv6_txoptions   *ipv6_opt;
index 89974c5286d8ae4db63def6822ae5b0dc93cddf9..b1ef98ebce53cd259a2894f4575cb18cc7755331 100644 (file)
@@ -840,6 +840,11 @@ struct tcp_skb_cb {
                        struct inet6_skb_parm   h6;
 #endif
                } header;       /* For incoming skbs */
+               struct {
+                       __u32 key;
+                       __u32 flags;
+                       struct bpf_map *map;
+               } bpf;
        };
 };
 
index bd7246de58e7c4d0cdca0cbd233aece13a311b35..a1f1152bc687613b87d7d4a1f73a3d139ef9f354 100644 (file)
@@ -248,6 +248,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
                             void *private_data);
 void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
 #define snd_ctl_sync_vmaster_hook(kctl)        snd_ctl_sync_vmaster(kctl, true)
+int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
+                                int (*func)(struct snd_kcontrol *, void *),
+                                void *arg);
 
 /*
  * Helper functions for jack-detection controls
index 6d47b3249d8ad84a2fcf2b38585338ccbf05a99f..4e01ad7ffe9831c63f90a46d40445e467365beec 100644 (file)
  *                          (non-running threads are de facto in such a
  *                          state). This only covers threads from the
  *                          same processes as the caller thread. This
- *                          command returns 0. The "expedited" commands
- *                          complete faster than the non-expedited ones,
- *                          they never block, but have the downside of
- *                          causing extra overhead.
+ *                          command returns 0 on success. The
+ *                          "expedited" commands complete faster than
+ *                          the non-expedited ones, they never block,
+ *                          but have the downside of causing extra
+ *                          overhead. A process needs to register its
+ *                          intent to use the private expedited command
+ *                          prior to using it, otherwise this command
+ *                          returns -EPERM.
+ * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
+ *                          Register the process intent to use
+ *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
+ *                          returns 0.
  *
  * Command to be passed to the membarrier system call. The commands need to
  * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
  * the value 0.
  */
 enum membarrier_cmd {
-       MEMBARRIER_CMD_QUERY                    = 0,
-       MEMBARRIER_CMD_SHARED                   = (1 << 0),
+       MEMBARRIER_CMD_QUERY                            = 0,
+       MEMBARRIER_CMD_SHARED                           = (1 << 0),
        /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
        /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
-       MEMBARRIER_CMD_PRIVATE_EXPEDITED        = (1 << 3),
+       MEMBARRIER_CMD_PRIVATE_EXPEDITED                = (1 << 3),
+       MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED       = (1 << 4),
 };
 
 #endif /* _UAPI_LINUX_MEMBARRIER_H */
index 98c0f00c3f5e05007287de1a1636c4e6722beed7..e2636737b69bd8bdd1a690e5453616effb9800d5 100644 (file)
@@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
 
        if (array_size >= U32_MAX - PAGE_SIZE ||
-           elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
+           bpf_array_alloc_percpu(array)) {
                bpf_map_area_free(array);
                return ERR_PTR(-ENOMEM);
        }
index e093d9a2c4dd3fb5f02503bf0c741f2af9cc7c48..e745d6a88224f5b5e9a8241dc7dee5d35e4832de 100644 (file)
@@ -69,7 +69,7 @@ static LIST_HEAD(dev_map_list);
 
 static u64 dev_map_bitmap_size(const union bpf_attr *attr)
 {
-       return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
+       return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
 }
 
 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
@@ -78,6 +78,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
        int err = -EINVAL;
        u64 cost;
 
+       if (!capable(CAP_NET_ADMIN))
+               return ERR_PTR(-EPERM);
+
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -111,8 +114,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
        err = -ENOMEM;
 
        /* A per cpu bitfield with a bit per possible net device */
-       dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
-                                           __alignof__(unsigned long));
+       dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
+                                               __alignof__(unsigned long),
+                                               GFP_KERNEL | __GFP_NOWARN);
        if (!dtab->flush_needed)
                goto free_dtab;
 
index 431126f31ea3c90648366295e2b77fd3bb79b6e7..6533f08d1238e136895a5cf0665be31d7b23df51 100644 (file)
@@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 */
                goto free_htab;
 
-       if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
-               /* make sure the size for pcpu_alloc() is reasonable */
-               goto free_htab;
-
        htab->elem_size = sizeof(struct htab_elem) +
                          round_up(htab->map.key_size, 8);
        if (percpu)
index 6424ce0e49698abee1da6242a0a8494d8ba0f03f..2b6eb35ae5d39799a9ae33fe5bf07508d2b82a54 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/workqueue.h>
 #include <linux/list.h>
 #include <net/strparser.h>
+#include <net/tcp.h>
 
 struct bpf_stab {
        struct bpf_map map;
@@ -101,9 +102,16 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
                return SK_DROP;
 
        skb_orphan(skb);
+       /* We need to ensure that BPF metadata for maps is also cleared
+        * when we orphan the skb so that we don't have the possibility
+        * to reference a stale map.
+        */
+       TCP_SKB_CB(skb)->bpf.map = NULL;
        skb->sk = psock->sock;
        bpf_compute_data_end(skb);
+       preempt_disable();
        rc = (*prog->bpf_func)(skb, prog->insnsi);
+       preempt_enable();
        skb->sk = NULL;
 
        return rc;
@@ -114,17 +122,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
        struct sock *sk;
        int rc;
 
-       /* Because we use per cpu values to feed input from sock redirect
-        * in BPF program to do_sk_redirect_map() call we need to ensure we
-        * are not preempted. RCU read lock is not sufficient in this case
-        * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
-        */
-       preempt_disable();
        rc = smap_verdict_func(psock, skb);
        switch (rc) {
        case SK_REDIRECT:
-               sk = do_sk_redirect_map();
-               preempt_enable();
+               sk = do_sk_redirect_map(skb);
                if (likely(sk)) {
                        struct smap_psock *peer = smap_psock_sk(sk);
 
@@ -141,8 +142,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
        /* Fall through and free skb otherwise */
        case SK_DROP:
        default:
-               if (rc != SK_REDIRECT)
-                       preempt_enable();
                kfree_skb(skb);
        }
 }
@@ -487,6 +486,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
        int err = -EINVAL;
        u64 cost;
 
+       if (!capable(CAP_NET_ADMIN))
+               return ERR_PTR(-EPERM);
+
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -840,6 +842,12 @@ static int sock_map_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       if (skops.sk->sk_type != SOCK_STREAM ||
+           skops.sk->sk_protocol != IPPROTO_TCP) {
+               fput(socket->file);
+               return -EOPNOTSUPP;
+       }
+
        err = sock_map_ctx_update_elem(&skops, map, key, flags);
        fput(socket->file);
        return err;
index 8b8d6ba39e238cb5eea09a71260ddc55630faa6f..c48ca2a34b5e131420f4795c4a0eaf9d9a64861d 100644 (file)
@@ -1116,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                /* ctx accesses must be at a fixed offset, so that we can
                 * determine what type of data were returned.
                 */
-               if (!tnum_is_const(reg->var_off)) {
+               if (reg->off) {
+                       verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
+                               regno, reg->off, off - reg->off);
+                       return -EACCES;
+               }
+               if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
                        char tn_buf[48];
 
                        tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@ -1124,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                                tn_buf, off, size);
                        return -EACCES;
                }
-               off += reg->var_off.value;
                err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
                if (!err && t == BPF_READ && value_regno >= 0) {
                        /* ctx access returns either a scalar, or a
@@ -2426,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 }
 
 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
-                                  struct bpf_reg_state *dst_reg)
+                                  struct bpf_reg_state *dst_reg,
+                                  bool range_right_open)
 {
        struct bpf_reg_state *regs = state->regs, *reg;
+       u16 new_range;
        int i;
 
-       if (dst_reg->off < 0)
+       if (dst_reg->off < 0 ||
+           (dst_reg->off == 0 && range_right_open))
                /* This doesn't give us any range */
                return;
 
@@ -2442,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
                 */
                return;
 
-       /* LLVM can generate four kind of checks:
+       new_range = dst_reg->off;
+       if (range_right_open)
+               new_range--;
+
+       /* Examples for register markings:
         *
-        * Type 1/2:
+        * pkt_data in dst register:
         *
         *   r2 = r3;
         *   r2 += 8;
@@ -2461,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
         *     r2=pkt(id=n,off=8,r=0)
         *     r3=pkt(id=n,off=0,r=0)
         *
-        * Type 3/4:
+        * pkt_data in src register:
         *
         *   r2 = r3;
         *   r2 += 8;
@@ -2479,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
         *     r3=pkt(id=n,off=0,r=0)
         *
         * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
-        * so that range of bytes [r3, r3 + 8) is safe to access.
+        * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
+        * and [r3, r3 + 8-1) respectively is safe to access depending on
+        * the check.
         */
 
        /* If our ids match, then we must have the same max_value.  And we
@@ -2490,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
        for (i = 0; i < MAX_BPF_REG; i++)
                if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
                        /* keep the maximum range already checked */
-                       regs[i].range = max_t(u16, regs[i].range, dst_reg->off);
+                       regs[i].range = max(regs[i].range, new_range);
 
        for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
                if (state->stack_slot_type[i] != STACK_SPILL)
                        continue;
                reg = &state->spilled_regs[i / BPF_REG_SIZE];
                if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
-                       reg->range = max_t(u16, reg->range, dst_reg->off);
+                       reg->range = max(reg->range, new_range);
        }
 }
 
@@ -2861,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
                   dst_reg->type == PTR_TO_PACKET &&
                   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
-               find_good_pkt_pointers(this_branch, dst_reg);
+               /* pkt_data' > pkt_end */
+               find_good_pkt_pointers(this_branch, dst_reg, false);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
+                  dst_reg->type == PTR_TO_PACKET_END &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET) {
+               /* pkt_end > pkt_data' */
+               find_good_pkt_pointers(other_branch, &regs[insn->src_reg], true);
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
                   dst_reg->type == PTR_TO_PACKET &&
                   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
-               find_good_pkt_pointers(other_branch, dst_reg);
+               /* pkt_data' < pkt_end */
+               find_good_pkt_pointers(other_branch, dst_reg, true);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
+                  dst_reg->type == PTR_TO_PACKET_END &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET) {
+               /* pkt_end < pkt_data' */
+               find_good_pkt_pointers(this_branch, &regs[insn->src_reg], false);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
+                  dst_reg->type == PTR_TO_PACKET &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+               /* pkt_data' >= pkt_end */
+               find_good_pkt_pointers(this_branch, dst_reg, true);
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
                   dst_reg->type == PTR_TO_PACKET_END &&
                   regs[insn->src_reg].type == PTR_TO_PACKET) {
-               find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
+               /* pkt_end >= pkt_data' */
+               find_good_pkt_pointers(other_branch, &regs[insn->src_reg], false);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
+                  dst_reg->type == PTR_TO_PACKET &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+               /* pkt_data' <= pkt_end */
+               find_good_pkt_pointers(other_branch, dst_reg, false);
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
                   dst_reg->type == PTR_TO_PACKET_END &&
                   regs[insn->src_reg].type == PTR_TO_PACKET) {
-               find_good_pkt_pointers(this_branch, &regs[insn->src_reg]);
+               /* pkt_end <= pkt_data' */
+               find_good_pkt_pointers(this_branch, &regs[insn->src_reg], true);
        } else if (is_pointer_value(env, insn->dst_reg)) {
                verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
                return -EACCES;
index d851df22f5c5eef24b9f4e3ead6e986e4b87352f..04892a82f6ac36c92324806b66a1c1855880c8f7 100644 (file)
@@ -632,6 +632,11 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
                __cpuhp_kick_ap(st);
        }
 
+       /*
+        * Clean up the leftovers so the next hotplug operation wont use stale
+        * data.
+        */
+       st->node = st->last = NULL;
        return ret;
 }
 
index cf28528842bcf54f4125517fd6c7521b3c79685c..f6cad39f35dfbe441abc5fc740458a6574e7d529 100644 (file)
@@ -1611,7 +1611,7 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
                return err;
 
        if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
-               goto Efault;
+               return -EFAULT;
 
        user_access_begin();
        unsafe_put_user(signo, &infop->si_signo, Efault);
@@ -1739,7 +1739,7 @@ COMPAT_SYSCALL_DEFINE5(waitid,
                return err;
 
        if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
-               goto Efault;
+               return -EFAULT;
 
        user_access_begin();
        unsafe_put_user(signo, &infop->si_signo, Efault);
index 5270a54b9fa4dbf6a4cbdf3e990edc6a0156a915..c26c5bb6b491f75f76f1190cdc21989f79d17e09 100644 (file)
@@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
 }
 
 /**
- * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
+ * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
  * @d: irq_data
+ *
+ * This generic implementation of the irq_mask_ack method is for chips
+ * with separate enable/disable registers instead of a single mask
+ * register and where a pending interrupt is acknowledged by setting a
+ * bit.
+ *
+ * Note: This is the only permutation currently used.  Similar generic
+ * functions should be added here if other permutations are required.
  */
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
 {
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        struct irq_chip_type *ct = irq_data_get_chip_type(d);
        u32 mask = d->mask;
 
        irq_gc_lock(gc);
-       irq_reg_writel(gc, mask, ct->regs.mask);
+       irq_reg_writel(gc, mask, ct->regs.disable);
+       *ct->mask_cache &= ~mask;
        irq_reg_writel(gc, mask, ct->regs.ack);
        irq_gc_unlock(gc);
 }
index 729a8706751db4230bf3e3275192475fb44f9c3a..6d5880089ff6b7785db69e0facb432cab4aab5fc 100644 (file)
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
 /**
  * call_srcu() - Queue a callback for invocation after an SRCU grace period
  * @sp: srcu_struct in queue the callback
- * @head: structure to be used for queueing the SRCU callback.
+ * @rhp: structure to be used for queueing the SRCU callback.
  * @func: function to be invoked after the SRCU grace period
  *
  * The callback function will be invoked some time after a full SRCU
index 50d1861f7759b40ff0248281e742a6a4fac1e852..3f943efcf61c1e3c31373799f475c80967ae3fd0 100644 (file)
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
 }
 
 /**
+ * rcu_sync_enter_start - Force readers onto slow path for multiple updates
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
  * Must be called after rcu_sync_init() and before first use.
  *
  * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
 
 /**
  * rcu_sync_func() - Callback function managing reader access to fastpath
- * @rsp: Pointer to rcu_sync structure to use for synchronization
+ * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
  *
  * This function is passed to one of the call_rcu() functions by
  * rcu_sync_exit(), so that it is invoked after a grace period following the
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
  * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
  * can again use their fastpaths.
  */
-static void rcu_sync_func(struct rcu_head *rcu)
+static void rcu_sync_func(struct rcu_head *rhp)
 {
-       struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
+       struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
        unsigned long flags;
 
        BUG_ON(rsp->gp_state != GP_PASSED);
index b0ad62b0e7b8f8028d20694d79e144e4e07ec9c0..3e3650e94ae6b1dd26fea711a8961d627c16623b 100644 (file)
@@ -3097,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
  * read-side critical sections have completed. call_rcu_sched() assumes
  * that the read-side critical sections end on enabling of preemption
  * or on voluntary preemption.
- * RCU read-side critical sections are delimited by :
- *  - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
- *  - anything that disables preemption.
+ * RCU read-side critical sections are delimited by:
+ *
+ * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
+ * - anything that disables preemption.
  *
  *  These may be nested.
  *
@@ -3124,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
  * handler. This means that read-side critical sections in process
  * context must not be interrupted by softirqs. This interface is to be
  * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by :
- *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
- *  OR
- *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- *  These may be nested.
+ * RCU read-side critical sections are delimited by:
+ *
+ * - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context, OR
+ * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+ *
+ * These may be nested.
  *
  * See the description of call_rcu() for more detailed information on
  * memory ordering guarantees.
index a92fddc227471e05a77d9f8c4c585b84ba8f28dd..dd7908743dab696facd9dd32f4399ee952228151 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/membarrier.h>
 #include <linux/tick.h>
 #include <linux/cpumask.h>
+#include <linux/atomic.h>
 
 #include "sched.h"     /* for cpu_rq(). */
 
  * except MEMBARRIER_CMD_QUERY.
  */
 #define MEMBARRIER_CMD_BITMASK \
-       (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED)
+       (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED       \
+       | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
 
 static void ipi_mb(void *info)
 {
        smp_mb();       /* IPIs should be serializing but paranoid. */
 }
 
-static void membarrier_private_expedited(void)
+static int membarrier_private_expedited(void)
 {
        int cpu;
        bool fallback = false;
        cpumask_var_t tmpmask;
 
+       if (!(atomic_read(&current->mm->membarrier_state)
+                       & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
+               return -EPERM;
+
        if (num_online_cpus() == 1)
-               return;
+               return 0;
 
        /*
         * Matches memory barriers around rq->curr modification in
@@ -94,6 +100,24 @@ static void membarrier_private_expedited(void)
         * rq->curr modification in scheduler.
         */
        smp_mb();       /* exit from system call is not a mb */
+       return 0;
+}
+
+static void membarrier_register_private_expedited(void)
+{
+       struct task_struct *p = current;
+       struct mm_struct *mm = p->mm;
+
+       /*
+        * We need to consider threads belonging to different thread
+        * groups, which use the same mm. (CLONE_VM but not
+        * CLONE_THREAD).
+        */
+       if (atomic_read(&mm->membarrier_state)
+                       & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
+               return;
+       atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
+                       &mm->membarrier_state);
 }
 
 /**
@@ -144,7 +168,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
                        synchronize_sched();
                return 0;
        case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
-               membarrier_private_expedited();
+               return membarrier_private_expedited();
+       case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
+               membarrier_register_private_expedited();
                return 0;
        default:
                return -EINVAL;
index 03d7c63837aecb36f74037212e9e7dd421e2e4c9..6ba6fcd92dd10cd2c78898b35c65a0540b19bfb3 100644 (file)
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
        down_read(&key->sem);
        ukp = user_key_payload_locked(key);
 
+       if (!ukp) {
+               /* key was revoked before we acquired its semaphore */
+               err = -EKEYREVOKED;
+               goto err1;
+       }
+
        if (ukp->datalen < sizeof(*pkh))
                goto err1;
 
index 5696a35184e4a3a086573c6020b9f32fcede5fe2..69557c74ef9f8e4dbe1994dce8d1099456717cf2 100644 (file)
@@ -11,7 +11,7 @@
  * ==========================================================================
  *
  *   A finite state machine consists of n states (struct ts_fsm_token)
- *   representing the pattern as a finite automation. The data is read
+ *   representing the pattern as a finite automaton. The data is read
  *   sequentially on an octet basis. Every state token specifies the number
  *   of recurrences and the type of value accepted which can be either a
  *   specific character or ctype based set of characters. The available
index 632f783e65f1cfef531dba412f0568fd9f7849e8..ffbe66cbb0ed60ce9c602ed3d2dc6e4b643155e3 100644 (file)
@@ -27,7 +27,7 @@
  *
  *   [1] Cormen, Leiserson, Rivest, Stein
  *       Introdcution to Algorithms, 2nd Edition, MIT Press
- *   [2] See finite automation theory
+ *   [2] See finite automaton theory
  */
 
 #include <linux/module.h>
index d5f3a62887cf958f6b657c0f542f0cf2c3e86e8d..661f046ad3181f65eccfd9bf3832e395e27aa226 100644 (file)
@@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
        if (!mem_cgroup_sockets_enabled)
                return;
 
-       /*
-        * Socket cloning can throw us here with sk_memcg already
-        * filled. It won't however, necessarily happen from
-        * process context. So the test for root memcg given
-        * the current task's memcg won't help us in this case.
-        *
-        * Respecting the original socket's memcg is a better
-        * decision in this case.
-        */
-       if (sk->sk_memcg) {
-               BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
-               css_get(&sk->sk_memcg->css);
-               return;
-       }
-
        rcu_read_lock();
        memcg = mem_cgroup_from_task(current);
        if (memcg == root_mem_cgroup)
index aa121cef76de338441f3dbcf5e3e0d1de9f0aebf..a0e0c82c1e4cd22324e87a86a5df7301f35b8a83 100644 (file)
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
  * @gfp: allocation flags
  *
  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
- * contain %GFP_KERNEL, the allocation is atomic.
+ * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
+ * then no warning will be triggered on invalid or failed allocation
+ * requests.
  *
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                                 gfp_t gfp)
 {
+       bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+       bool do_warn = !(gfp & __GFP_NOWARN);
        static int warn_limit = 10;
        struct pcpu_chunk *chunk;
        const char *err;
-       bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
        int slot, off, cpu, ret;
        unsigned long flags;
        void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 
        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
                     !is_power_of_2(align))) {
-               WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
+               WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
                     size, align);
                return NULL;
        }
@@ -1482,7 +1485,7 @@ fail_unlock:
 fail:
        trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
 
-       if (!is_atomic && warn_limit) {
+       if (!is_atomic && do_warn && warn_limit) {
                pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
                        size, align, is_atomic, err);
                dump_stack();
@@ -1507,7 +1510,9 @@ fail:
  *
  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
- * be called from any context but is a lot more likely to fail.
+ * be called from any context but is a lot more likely to fail. If @gfp
+ * has __GFP_NOWARN then no warning will be triggered on invalid or failed
+ * allocation requests.
  *
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
index 3bc890716c89cb5fdeafcbd3e5062ed9bb6a9882..de21527308093240614481966bed7f43d7af07f3 100644 (file)
@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
                }
                *vinfo_last = NULL;
 
-               return 0;
+               return err;
        }
 
        return br_vlan_info(br, p, cmd, vinfo_curr);
index 88edac0f3e366398d0c1e0de023b90b0669498f9..ecd5c703d11e85c32eaee87321af56da03e7e07d 100644 (file)
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
 static struct kmem_cache *rcv_cache __read_mostly;
 
 /* table of registered CAN protocols */
-static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
+static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
 static DEFINE_MUTEX(proto_tab_lock);
 
 static atomic_t skbcounter = ATOMIC_INIT(0);
@@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp)
 
        mutex_lock(&proto_tab_lock);
 
-       if (proto_tab[proto]) {
+       if (rcu_access_pointer(proto_tab[proto])) {
                pr_err("can: protocol %d already registered\n", proto);
                err = -EBUSY;
        } else
@@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp)
        int proto = cp->protocol;
 
        mutex_lock(&proto_tab_lock);
-       BUG_ON(proto_tab[proto] != cp);
+       BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
        RCU_INIT_POINTER(proto_tab[proto], NULL);
        mutex_unlock(&proto_tab_lock);
 
@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
        spin_lock_init(&net->can.can_rcvlists_lock);
        net->can.can_rx_alldev_list =
                kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
-
+       if (!net->can.can_rx_alldev_list)
+               goto out;
        net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
+       if (!net->can.can_stats)
+               goto out_free_alldev_list;
        net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
+       if (!net->can.can_pstats)
+               goto out_free_can_stats;
 
        if (IS_ENABLED(CONFIG_PROC_FS)) {
                /* the statistics are updated every second (timer triggered) */
@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
        }
 
        return 0;
+
+ out_free_can_stats:
+       kfree(net->can.can_stats);
+ out_free_alldev_list:
+       kfree(net->can.can_rx_alldev_list);
+ out:
+       return -ENOMEM;
 }
 
 static void can_pernet_exit(struct net *net)
index 47a8748d953afbf460238804d3d7d3d8b087f474..13690334efa31b978cff2ed6432af626f00cab76 100644 (file)
@@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk)
 static int bcm_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
-       struct net *net = sock_net(sk);
+       struct net *net;
        struct bcm_sock *bo;
        struct bcm_op *op, *next;
 
-       if (sk == NULL)
+       if (!sk)
                return 0;
 
+       net = sock_net(sk);
        bo = bcm_sk(sk);
 
        /* remove bcm_ops, timer, rx_unregister(), etc. */
index 588b473194a8a8ce23bde368b2b0350a3317db8b..11596a302a265212cb5dfe40f51b5b01fb20d0ce 100644 (file)
@@ -1147,9 +1147,8 @@ static int dev_alloc_name_ns(struct net *net,
        return ret;
 }
 
-static int dev_get_valid_name(struct net *net,
-                             struct net_device *dev,
-                             const char *name)
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+                      const char *name)
 {
        BUG_ON(!net);
 
@@ -1165,6 +1164,7 @@ static int dev_get_valid_name(struct net *net,
 
        return 0;
 }
+EXPORT_SYMBOL(dev_get_valid_name);
 
 /**
  *     dev_change_name - change name of a device
index 709a4e6fb447fda886046308de5b613a88ff9dfa..f9c7a88cd98183fd10e4b00bf84e8ec5452c88a8 100644 (file)
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
        case SIOCSIFTXQLEN:
                if (ifr->ifr_qlen < 0)
                        return -EINVAL;
-               dev->tx_queue_len = ifr->ifr_qlen;
+               if (dev->tx_queue_len ^ ifr->ifr_qlen) {
+                       unsigned int orig_len = dev->tx_queue_len;
+
+                       dev->tx_queue_len = ifr->ifr_qlen;
+                       err = call_netdevice_notifiers(
+                                       NETDEV_CHANGE_TX_QUEUE_LEN, dev);
+                       err = notifier_to_errno(err);
+                       if (err) {
+                               dev->tx_queue_len = orig_len;
+                               return err;
+                       }
+               }
                return 0;
 
        case SIOCSIFNAME:
index 3228411ada0fa77e2796b733a695826282d82df9..9a9a3d77e3274fc3e115fe73470f18bc93be6364 100644 (file)
@@ -436,7 +436,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
 EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
 
 /* return false if legacy contained non-0 deprecated fields
- * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated
+ * maxtxpkt/maxrxpkt. rest of ksettings always updated
  */
 static bool
 convert_legacy_settings_to_link_ksettings(
@@ -451,8 +451,7 @@ convert_legacy_settings_to_link_ksettings(
         * deprecated legacy fields, and they should not use
         * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
         */
-       if (legacy_settings->transceiver ||
-           legacy_settings->maxtxpkt ||
+       if (legacy_settings->maxtxpkt ||
            legacy_settings->maxrxpkt)
                retval = false;
 
index 74b8c91fb5f4461da58c73568976bc9834c4612b..aa0265997f930c86229ee1658063ba9718c6dc79 100644 (file)
@@ -1839,31 +1839,31 @@ static const struct bpf_func_proto bpf_redirect_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags)
+BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
+          struct bpf_map *, map, u32, key, u64, flags)
 {
-       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+       struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
        if (unlikely(flags))
                return SK_ABORTED;
 
-       ri->ifindex = key;
-       ri->flags = flags;
-       ri->map = map;
+       tcb->bpf.key = key;
+       tcb->bpf.flags = flags;
+       tcb->bpf.map = map;
 
        return SK_REDIRECT;
 }
 
-struct sock *do_sk_redirect_map(void)
+struct sock *do_sk_redirect_map(struct sk_buff *skb)
 {
-       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+       struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
        struct sock *sk = NULL;
 
-       if (ri->map) {
-               sk = __sock_map_lookup_elem(ri->map, ri->ifindex);
+       if (tcb->bpf.map) {
+               sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
 
-               ri->ifindex = 0;
-               ri->map = NULL;
-               /* we do not clear flags for future lookup */
+               tcb->bpf.key = 0;
+               tcb->bpf.map = NULL;
        }
 
        return sk;
@@ -1873,9 +1873,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
        .func           = bpf_sk_redirect_map,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_CONST_MAP_PTR,
-       .arg2_type      = ARG_ANYTHING,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_ANYTHING,
 };
 
 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -3683,7 +3684,6 @@ static bool sk_skb_is_valid_access(int off, int size,
 {
        if (type == BPF_WRITE) {
                switch (off) {
-               case bpf_ctx_range(struct __sk_buff, mark):
                case bpf_ctx_range(struct __sk_buff, tc_index):
                case bpf_ctx_range(struct __sk_buff, priority):
                        break;
@@ -3693,6 +3693,7 @@ static bool sk_skb_is_valid_access(int off, int size,
        }
 
        switch (off) {
+       case bpf_ctx_range(struct __sk_buff, mark):
        case bpf_ctx_range(struct __sk_buff, tc_classid):
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
index d4bcdcc68e9268f0c6b4bbf918d5af2e6b71be4e..5ace48926b196666265a7f95b77779cbdd1ff848 100644 (file)
@@ -1483,7 +1483,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_LINKINFO]         = { .type = NLA_NESTED },
        [IFLA_NET_NS_PID]       = { .type = NLA_U32 },
        [IFLA_NET_NS_FD]        = { .type = NLA_U32 },
-       [IFLA_IFALIAS]          = { .type = NLA_STRING, .len = IFALIASZ-1 },
+       /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
+        * allow 0-length string (needed to remove an alias).
+        */
+       [IFLA_IFALIAS]          = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
        [IFLA_VFINFO_LIST]      = {. type = NLA_NESTED },
        [IFLA_VF_PORTS]         = { .type = NLA_NESTED },
        [IFLA_PORT_SELF]        = { .type = NLA_NESTED },
@@ -2093,7 +2096,7 @@ static int do_setlink(const struct sk_buff *skb,
                                dev->tx_queue_len = orig_len;
                                goto errout;
                        }
-                       status |= DO_SETLINK_NOTIFY;
+                       status |= DO_SETLINK_MODIFIED;
                }
        }
 
@@ -2248,7 +2251,7 @@ static int do_setlink(const struct sk_buff *skb,
 
 errout:
        if (status & DO_SETLINK_MODIFIED) {
-               if (status & DO_SETLINK_NOTIFY)
+               if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
                        netdev_state_change(dev);
 
                if (err < 0)
@@ -4279,13 +4282,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
 
        switch (event) {
        case NETDEV_REBOOT:
+       case NETDEV_CHANGEMTU:
        case NETDEV_CHANGEADDR:
        case NETDEV_CHANGENAME:
        case NETDEV_FEAT_CHANGE:
        case NETDEV_BONDING_FAILOVER:
+       case NETDEV_POST_TYPE_CHANGE:
        case NETDEV_NOTIFY_PEERS:
+       case NETDEV_CHANGEUPPER:
        case NETDEV_RESEND_IGMP:
        case NETDEV_CHANGEINFODATA:
+       case NETDEV_CHANGE_TX_QUEUE_LEN:
                rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
                                   GFP_KERNEL);
                break;
index 16982de649b97b92423a4f9f5eac1e98ca803370..24656076906d2a0f3b70b030c977e854caba2487 100644 (file)
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
 
        err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
        if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
+               struct sock *save_sk = skb->sk;
+
                /* Streams do not free skb on error. Reset to prev state. */
                msg->msg_iter = orig_iter;
+               skb->sk = sk;
                ___pskb_trim(skb, orig_len);
+               skb->sk = save_sk;
                return err;
        }
 
@@ -1896,7 +1900,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
        }
 
        /* If we need update frag list, we are in troubles.
-        * Certainly, it possible to add an offset to skb data,
+        * Certainly, it is possible to add an offset to skb data,
         * but taking into account that pulling is expected to
         * be very rare operation, it is worth to fight against
         * further bloating skb head and crucify ourselves here instead.
index 23953b741a41fbcf4a6ffb0dd5bf05bd5266b99d..415f441c63b9e2ff8feb010f44ca27303c72aaa1 100644 (file)
@@ -1677,12 +1677,17 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_dst_pending_confirm = 0;
                newsk->sk_wmem_queued   = 0;
                newsk->sk_forward_alloc = 0;
+
+               /* sk->sk_memcg will be populated at accept() time */
+               newsk->sk_memcg = NULL;
+
                atomic_set(&newsk->sk_drops, 0);
                newsk->sk_send_head     = NULL;
                newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
                atomic_set(&newsk->sk_zckey, 0);
 
                sock_reset_flag(newsk, SOCK_DONE);
+               cgroup_sk_alloc(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
                filter = rcu_dereference(sk->sk_filter);
@@ -1714,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_incoming_cpu = raw_smp_processor_id();
                atomic64_set(&newsk->sk_cookie, 0);
 
-               mem_cgroup_sk_alloc(newsk);
-               cgroup_sk_alloc(&newsk->sk_cgrp_data);
-
                /*
                 * Before updating sk_refcnt, we must commit prior changes to memory
                 * (Documentation/RCU/rculist_nulls.txt for details)
index eed1ebf7f29d0fac552074b127e5636fecede65f..b1e0dbea1e8cac4283aa2e659ac4f23d9c291a36 100644 (file)
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
         * soft irq of receive path or setsockopt from process context
         */
        spin_lock_bh(&reuseport_lock);
-       WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
-                                           lockdep_is_held(&reuseport_lock)),
-                 "multiple allocations for the same socket");
+
+       /* Allocation attempts can occur concurrently via the setsockopt path
+        * and the bind/hash path.  Nothing to do when we lose the race.
+        */
+       if (rcu_dereference_protected(sk->sk_reuseport_cb,
+                                     lockdep_is_held(&reuseport_lock)))
+               goto out;
+
        reuse = __reuseport_alloc(INIT_SOCKS);
        if (!reuse) {
                spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
        reuse->num_socks = 1;
        rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
 
+out:
        spin_unlock_bh(&reuseport_lock);
 
        return 0;
index 001c08696334bba0ceb896c116e595b814af0667..0490916864f93d5466e87f5b97dc524b3ee57a2e 100644 (file)
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
        sk_daddr_set(newsk, ireq->ir_rmt_addr);
        sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newinet->inet_saddr     = ireq->ir_loc_addr;
-       newinet->inet_opt       = ireq->opt;
-       ireq->opt          = NULL;
+       RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
        newinet->mc_index  = inet_iif(skb);
        newinet->mc_ttl    = ip_hdr(skb)->ttl;
        newinet->inet_id   = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
        *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-
+       if (*own_req)
+               ireq->ireq_opt = NULL;
+       else
+               newinet->inet_opt = NULL;
        return newsk;
 
 exit_overflow:
@@ -441,6 +443,7 @@ exit:
        __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return NULL;
 put_and_exit:
+       newinet->inet_opt = NULL;
        inet_csk_prepare_forced_close(newsk);
        dccp_done(newsk);
        goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
                                                              ireq->ir_rmt_addr);
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq->opt);
+                                           rcu_dereference(ireq->ireq_opt));
                err = net_xmit_eval(err);
        }
 
@@ -548,7 +551,7 @@ out:
 static void dccp_v4_reqsk_destructor(struct request_sock *req)
 {
        dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
-       kfree(inet_rsk(req)->opt);
+       kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
 }
 
 void dccp_syn_ack_timeout(const struct request_sock *req)
index 8737412c7b27f33125c14c8aa6e64c4ce5cd2dc8..e1d4d898a007df9ba0911fd612caac4a48a50dc2 100644 (file)
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
 static void dns_resolver_describe(const struct key *key, struct seq_file *m)
 {
        seq_puts(m, key->description);
-       if (key_is_instantiated(key)) {
+       if (key_is_positive(key)) {
                int err = PTR_ERR(key->payload.data[dns_key_error]);
 
                if (err)
index 91a2557942fa8533564943f1f8e8d9df4d7df141..f48fe6fc7e8c413d7d7e4d7d37d1d859a566e8fb 100644 (file)
@@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES
          address into account. Furthermore, the TOS (Type-Of-Service) field
          of the packet can be used for routing decisions as well.
 
-         If you are interested in this, please see the preliminary
-         documentation at <http://www.compendium.com.ar/policy-routing.txt>
-         and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>.
-         You will need supporting software from
-         <ftp://ftp.tux.org/pub/net/ip-routing/>.
+         If you need more information, see the Linux Advanced
+         Routing and Traffic Control documentation at
+         <http://lartc.org/howto/lartc.rpdb.html>
 
          If unsure, say N.
 
index 2ae8f54cb32148f2499f78ecbf29259db36bd207..82178cc69c9618bae69c096290a7a96a8b8bade0 100644 (file)
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
        buf = NULL;
 
        req_inet = inet_rsk(req);
-       opt = xchg(&req_inet->opt, opt);
+       opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
        if (opt)
                kfree_rcu(opt, rcu);
 
@@ -1973,11 +1973,13 @@ req_setattr_failure:
  * values on failure.
  *
  */
-static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
+static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
 {
+       struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
        int hdr_delta = 0;
-       struct ip_options_rcu *opt = *opt_ptr;
 
+       if (!opt || opt->opt.cipso == 0)
+               return 0;
        if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
                u8 cipso_len;
                u8 cipso_off;
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
  */
 void cipso_v4_sock_delattr(struct sock *sk)
 {
-       int hdr_delta;
-       struct ip_options_rcu *opt;
        struct inet_sock *sk_inet;
+       int hdr_delta;
 
        sk_inet = inet_sk(sk);
-       opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
-       if (!opt || opt->opt.cipso == 0)
-               return;
 
        hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
        if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
  */
 void cipso_v4_req_delattr(struct request_sock *req)
 {
-       struct ip_options_rcu *opt;
-       struct inet_request_sock *req_inet;
-
-       req_inet = inet_rsk(req);
-       opt = req_inet->opt;
-       if (!opt || opt->opt.cipso == 0)
-               return;
-
-       cipso_v4_delopt(&req_inet->opt);
+       cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
 }
 
 /**
index c039c937ba90c7aec39ba2687bceb8253ead70aa..5ec9136a7c36933cb36e5cd50058eb6cf189a7c3 100644 (file)
@@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
                }
                spin_unlock_bh(&queue->fastopenq.lock);
        }
+       mem_cgroup_sk_alloc(newsk);
 out:
        release_sock(sk);
        if (req)
@@ -539,9 +540,10 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct net *net = read_pnet(&ireq->ireq_net);
-       struct ip_options_rcu *opt = ireq->opt;
+       struct ip_options_rcu *opt;
        struct rtable *rt;
 
+       opt = rcu_dereference(ireq->ireq_opt);
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -575,10 +577,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
        struct flowi4 *fl4;
        struct rtable *rt;
 
+       opt = rcu_dereference(ireq->ireq_opt);
        fl4 = &newinet->cork.fl.u.ip4;
 
-       rcu_read_lock();
-       opt = rcu_dereference(newinet->inet_opt);
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -591,13 +592,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
                goto no_route;
        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
                goto route_err;
-       rcu_read_unlock();
        return &rt->dst;
 
 route_err:
        ip_rt_put(rt);
 no_route:
-       rcu_read_unlock();
        __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
index 597bb4cfe805281a5a7cb1f8d1334c2103d34dc8..e7d15fb0d94d9790675356d3144d204b91eab984 100644 (file)
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                        return reuseport_add_sock(sk, sk2);
        }
 
-       /* Initial allocation may have already happened via setsockopt */
-       if (!rcu_access_pointer(sk->sk_reuseport_cb))
-               return reuseport_alloc(sk);
-       return 0;
+       return reuseport_alloc(sk);
 }
 
 int __inet_hash(struct sock *sk, struct sock *osk)
index b1bb1b3a108232d56aa82383422d68b5ff9da3ed..77cf32a80952fcf3ceff4ada946cc2d0df2411d9 100644 (file)
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        /* We throwed the options of the initial SYN away, so we hope
         * the ACK carries the same options again (see RFC1122 4.2.3.8)
         */
-       ireq->opt = tcp_v4_save_options(sock_net(sk), skb);
+       RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
 
        if (security_inet_conn_request(sk, skb, req)) {
                reqsk_free(req);
index c5d7656beeee29b3c92e1c8824dbf00d3fa32d28..7eec3383702bbab497a12095b55d255532ad5f60 100644 (file)
@@ -6196,7 +6196,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
                struct inet_request_sock *ireq = inet_rsk(req);
 
                kmemcheck_annotate_bitfield(ireq, flags);
-               ireq->opt = NULL;
+               ireq->ireq_opt = NULL;
 #if IS_ENABLED(CONFIG_IPV6)
                ireq->pktopts = NULL;
 #endif
index 85164d4d3e537537c87d74c00172592c860d4dfb..4c43365c374c8bf868fc0b862333244ca26d5016 100644 (file)
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq->opt);
+                                           rcu_dereference(ireq->ireq_opt));
                err = net_xmit_eval(err);
        }
 
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
  */
 static void tcp_v4_reqsk_destructor(struct request_sock *req)
 {
-       kfree(inet_rsk(req)->opt);
+       kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req,
                            struct sk_buff *skb)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
+       struct net *net = sock_net(sk_listener);
 
        sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
        sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
-       ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
+       RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
 }
 
 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        sk_daddr_set(newsk, ireq->ir_rmt_addr);
        sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
        newsk->sk_bound_dev_if = ireq->ir_iif;
-       newinet->inet_saddr           = ireq->ir_loc_addr;
-       inet_opt              = ireq->opt;
-       rcu_assign_pointer(newinet->inet_opt, inet_opt);
-       ireq->opt             = NULL;
+       newinet->inet_saddr   = ireq->ir_loc_addr;
+       inet_opt              = rcu_dereference(ireq->ireq_opt);
+       RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
        newinet->mc_index     = inet_iif(skb);
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
        *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-       if (*own_req)
+       if (likely(*own_req)) {
                tcp_move_syn(newtp, req);
-
+               ireq->ireq_opt = NULL;
+       } else {
+               newinet->inet_opt = NULL;
+       }
        return newsk;
 
 exit_overflow:
@@ -1416,6 +1419,7 @@ exit:
        tcp_listendrop(sk);
        return NULL;
 put_and_exit:
+       newinet->inet_opt = NULL;
        inet_csk_prepare_forced_close(newsk);
        tcp_done(newsk);
        goto exit;
index e45177ceb0ee514ed173f2a899d35e0bc1807f77..ebfbccae62fde187ec5863670c03cd5b5c96258b 100644 (file)
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
                }
        }
 
-       /* Initial allocation may have already happened via setsockopt */
-       if (!rcu_access_pointer(sk->sk_reuseport_cb))
-               return reuseport_alloc(sk);
-       return 0;
+       return reuseport_alloc(sk);
 }
 
 /**
@@ -1061,7 +1058,7 @@ back_from_confirm:
                /* ... which is an evident application bug. --ANK */
                release_sock(sk);
 
-               net_dbg_ratelimited("cork app bug 2\n");
+               net_dbg_ratelimited("socket already corked\n");
                err = -EINVAL;
                goto out;
        }
@@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
        if (unlikely(!up->pending)) {
                release_sock(sk);
 
-               net_dbg_ratelimited("udp cork app bug 3\n");
+               net_dbg_ratelimited("cork failed\n");
                return -EINVAL;
        }
 
index 8081bafe441b83f60f414114bfdc3529d6ea0a09..15535ee327c5780e80feb050c2ab4e0d1cc3e99c 100644 (file)
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
        }
        opt_space->dst1opt = fopt->dst1opt;
        opt_space->opt_flen = fopt->opt_flen;
+       opt_space->tot_len = fopt->tot_len;
        return opt_space;
 }
 EXPORT_SYMBOL_GPL(fl6_merge_options);
index 43ca864327c73015f1724879d7ee8268a0de513b..5110a418cc4d0c1040506394460cb482698d8c15 100644 (file)
@@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
                if (WARN_ON(v6_cork->opt))
                        return -EINVAL;
 
-               v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
+               v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
                if (unlikely(!v6_cork->opt))
                        return -ENOBUFS;
 
-               v6_cork->opt->tot_len = opt->tot_len;
+               v6_cork->opt->tot_len = sizeof(*opt);
                v6_cork->opt->opt_flen = opt->opt_flen;
                v6_cork->opt->opt_nflen = opt->opt_nflen;
 
index bc6e8bfc5be4997e421c0b5326a8b37e2ffc9901..f50452b919d5ece9d70e2c25634bc8fef9962e6b 100644 (file)
@@ -988,6 +988,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
                 session->name, cmd, arg);
 
        sk = ps->sock;
+       if (!sk)
+               return -EBADR;
+
        sock_hold(sk);
 
        switch (cmd) {
index a98fc2b5e0dc94664a19ba319099385276212c44..ae995c8480db9639e1070a6f4743f1e6d6213e81 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright 2015      Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -620,9 +620,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
 
        pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
        idx = key->conf.keyidx;
-       key->local = sdata->local;
-       key->sdata = sdata;
-       key->sta = sta;
 
        mutex_lock(&sdata->local->key_mtx);
 
@@ -633,6 +630,21 @@ int ieee80211_key_link(struct ieee80211_key *key,
        else
                old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
 
+       /*
+        * Silently accept key re-installation without really installing the
+        * new version of the key to avoid nonce reuse or replay issues.
+        */
+       if (old_key && key->conf.keylen == old_key->conf.keylen &&
+           !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) {
+               ieee80211_key_free_unused(key);
+               ret = 0;
+               goto out;
+       }
+
+       key->local = sdata->local;
+       key->sdata = sdata;
+       key->sta = sta;
+
        increment_tailroom_need_count(sdata);
 
        ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -648,6 +660,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
                ret = 0;
        }
 
+ out:
        mutex_unlock(&sdata->local->key_mtx);
 
        return ret;
index af3d636534efb8b81fc47547f2fb027f61f5be34..d30f7bd741d0610cd261b4514baf0b1cbf20c607 100644 (file)
@@ -286,6 +286,7 @@ struct ncsi_dev_priv {
        struct work_struct  work;            /* For channel management     */
        struct packet_type  ptype;           /* NCSI packet Rx handler     */
        struct list_head    node;            /* Form NCSI device list      */
+#define NCSI_MAX_VLAN_VIDS     15
        struct list_head    vlan_vids;       /* List of active VLAN IDs */
 };
 
index 6898e7229285a6720115a37d58673f569d4a9bff..f135938bf781eb9c135648599ce06e535c780390 100644 (file)
@@ -187,7 +187,7 @@ static struct ncsi_aen_handler {
 } ncsi_aen_handlers[] = {
        { NCSI_PKT_AEN_LSC,    12, ncsi_aen_handler_lsc    },
        { NCSI_PKT_AEN_CR,      4, ncsi_aen_handler_cr     },
-       { NCSI_PKT_AEN_HNCDSC,  4, ncsi_aen_handler_hncdsc }
+       { NCSI_PKT_AEN_HNCDSC,  8, ncsi_aen_handler_hncdsc }
 };
 
 int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
index 3fd3c39e627836117f250fc7d5037415491ee6f5..28c42b22b7489efa1d7fffd51eee2a146ecad636 100644 (file)
@@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data)
        struct ncsi_channel *nc = (struct ncsi_channel *)data;
        struct ncsi_package *np = nc->package;
        struct ncsi_dev_priv *ndp = np->ndp;
+       struct ncsi_channel_mode *ncm;
        struct ncsi_cmd_arg nca;
        bool enabled, chained;
        unsigned int monitor_state;
@@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data)
        monitor_state = nc->monitor.state;
        spin_unlock_irqrestore(&nc->lock, flags);
 
-       if (!enabled || chained)
+       if (!enabled || chained) {
+               ncsi_stop_channel_monitor(nc);
                return;
+       }
        if (state != NCSI_CHANNEL_INACTIVE &&
-           state != NCSI_CHANNEL_ACTIVE)
+           state != NCSI_CHANNEL_ACTIVE) {
+               ncsi_stop_channel_monitor(nc);
                return;
+       }
 
        switch (monitor_state) {
        case NCSI_CHANNEL_MONITOR_START:
@@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data)
                nca.type = NCSI_PKT_CMD_GLS;
                nca.req_flags = 0;
                ret = ncsi_xmit_cmd(&nca);
-               if (ret) {
+               if (ret)
                        netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
                                   ret);
-                       return;
-               }
-
                break;
        case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
                break;
        default:
-               if (!(ndp->flags & NCSI_DEV_HWA) &&
-                   state == NCSI_CHANNEL_ACTIVE) {
+               if (!(ndp->flags & NCSI_DEV_HWA)) {
                        ncsi_report_link(ndp, true);
                        ndp->flags |= NCSI_DEV_RESHUFFLE;
                }
 
+               ncsi_stop_channel_monitor(nc);
+
+               ncm = &nc->modes[NCSI_MODE_LINK];
                spin_lock_irqsave(&nc->lock, flags);
                nc->state = NCSI_CHANNEL_INVISIBLE;
+               ncm->data[2] &= ~0x1;
                spin_unlock_irqrestore(&nc->lock, flags);
 
                spin_lock_irqsave(&ndp->lock, flags);
-               nc->state = NCSI_CHANNEL_INACTIVE;
+               nc->state = NCSI_CHANNEL_ACTIVE;
                list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                spin_unlock_irqrestore(&ndp->lock, flags);
                ncsi_process_next_channel(ndp);
@@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
        if (index < 0) {
                netdev_err(ndp->ndev.dev,
                           "Failed to add new VLAN tag, error %d\n", index);
+               if (index == -ENOSPC)
+                       netdev_err(ndp->ndev.dev,
+                                  "Channel %u already has all VLAN filters set\n",
+                                  nc->id);
                return -1;
        }
 
@@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
        struct ncsi_package *np;
        struct ncsi_channel *nc;
        unsigned int cap;
+       bool has_channel = false;
 
        /* The hardware arbitration is disabled if any one channel
         * doesn't support explicitly.
         */
        NCSI_FOR_EACH_PACKAGE(ndp, np) {
                NCSI_FOR_EACH_CHANNEL(np, nc) {
+                       has_channel = true;
+
                        cap = nc->caps[NCSI_CAP_GENERIC].cap;
                        if (!(cap & NCSI_CAP_GENERIC_HWA) ||
                            (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
@@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
                }
        }
 
-       ndp->flags |= NCSI_DEV_HWA;
-       return true;
+       if (has_channel) {
+               ndp->flags |= NCSI_DEV_HWA;
+               return true;
+       }
+
+       ndp->flags &= ~NCSI_DEV_HWA;
+       return false;
 }
 
 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
@@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
 
 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
-       struct ncsi_channel_filter *ncf;
        struct ncsi_dev_priv *ndp;
        unsigned int n_vids = 0;
        struct vlan_vid *vlan;
@@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        }
 
        ndp = TO_NCSI_DEV_PRIV(nd);
-       ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
 
        /* Add the VLAN id to our internal list */
        list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
@@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
                        return 0;
                }
        }
-
-       if (n_vids >= ncf->total) {
-               netdev_info(dev,
-                           "NCSI Channel supports up to %u VLAN tags but %u are already set\n",
-                           ncf->total, n_vids);
-               return -EINVAL;
+       if (n_vids >= NCSI_MAX_VLAN_VIDS) {
+               netdev_warn(dev,
+                           "tried to add vlan id %u but NCSI max already registered (%u)\n",
+                           vid, NCSI_MAX_VLAN_VIDS);
+               return -ENOSPC;
        }
 
        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
index 265b9a892d4171bc74572891edb631dabac98c41..927dad4759d1c9b23e4037b218152c267bafa7d9 100644 (file)
@@ -959,7 +959,7 @@ static struct ncsi_rsp_handler {
        { NCSI_PKT_RSP_EGMF,    4, ncsi_rsp_handler_egmf    },
        { NCSI_PKT_RSP_DGMF,    4, ncsi_rsp_handler_dgmf    },
        { NCSI_PKT_RSP_SNFC,    4, ncsi_rsp_handler_snfc    },
-       { NCSI_PKT_RSP_GVI,    36, ncsi_rsp_handler_gvi     },
+       { NCSI_PKT_RSP_GVI,    40, ncsi_rsp_handler_gvi     },
        { NCSI_PKT_RSP_GC,     32, ncsi_rsp_handler_gc      },
        { NCSI_PKT_RSP_GP,     -1, ncsi_rsp_handler_gp      },
        { NCSI_PKT_RSP_GCPS,  172, ncsi_rsp_handler_gcps    },
index f34750691c5c0e24aef976f59041d44e8342d2a0..b93148e8e9fb2dc9a22cccf34d168e99b55042de 100644 (file)
@@ -2307,6 +2307,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        size_t tlvlen = 0;
        struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
        unsigned int flags = 0;
+       bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
 
        /* Error messages get the original request appened, unless the user
         * requests to cap the error message, and get extra error data if
@@ -2317,7 +2318,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
                        payload += nlmsg_len(nlh);
                else
                        flags |= NLM_F_CAPPED;
-               if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+               if (nlk_has_extack && extack) {
                        if (extack->_msg)
                                tlvlen += nla_total_size(strlen(extack->_msg) + 1);
                        if (extack->bad_attr)
@@ -2326,8 +2327,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        } else {
                flags |= NLM_F_CAPPED;
 
-               if (nlk->flags & NETLINK_F_EXT_ACK &&
-                   extack && extack->cookie_len)
+               if (nlk_has_extack && extack && extack->cookie_len)
                        tlvlen += nla_total_size(extack->cookie_len);
        }
 
@@ -2355,7 +2355,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        errmsg->error = err;
        memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
 
-       if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+       if (nlk_has_extack && extack) {
                if (err) {
                        if (extack->_msg)
                                WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
index bec01a3daf5b02bd716dbff5c9efef8d6a7982be..2986941164b1952b3b6014ff81d2986b504c334a 100644 (file)
@@ -1769,7 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 
 out:
        if (err && rollover) {
-               kfree(rollover);
+               kfree_rcu(rollover, rcu);
                po->rollover = NULL;
        }
        mutex_unlock(&fanout_mutex);
@@ -1796,8 +1796,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
                else
                        f = NULL;
 
-               if (po->rollover)
+               if (po->rollover) {
                        kfree_rcu(po->rollover, rcu);
+                       po->rollover = NULL;
+               }
        }
        mutex_unlock(&fanout_mutex);
 
@@ -3851,6 +3853,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        void *data = &val;
        union tpacket_stats_u st;
        struct tpacket_rollover_stats rstats;
+       struct packet_rollover *rollover;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3929,13 +3932,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                       0);
                break;
        case PACKET_ROLLOVER_STATS:
-               if (!po->rollover)
+               rcu_read_lock();
+               rollover = rcu_dereference(po->rollover);
+               if (rollover) {
+                       rstats.tp_all = atomic_long_read(&rollover->num);
+                       rstats.tp_huge = atomic_long_read(&rollover->num_huge);
+                       rstats.tp_failed = atomic_long_read(&rollover->num_failed);
+                       data = &rstats;
+                       lv = sizeof(rstats);
+               }
+               rcu_read_unlock();
+               if (!rollover)
                        return -EINVAL;
-               rstats.tp_all = atomic_long_read(&po->rollover->num);
-               rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
-               rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
-               data = &rstats;
-               lv = sizeof(rstats);
                break;
        case PACKET_TX_HAS_OFF:
                val = po->tp_tx_has_off;
index fb17552fd292ef5a67bff1c0da2a19e4ef06c6b8..4b0a8288c98a65195519f60f5162422b6040e9c6 100644 (file)
@@ -308,10 +308,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
                                     gfp);
        /* The socket has been unlocked. */
-       if (!IS_ERR(call))
+       if (!IS_ERR(call)) {
                call->notify_rx = notify_rx;
+               mutex_unlock(&call->user_mutex);
+       }
 
-       mutex_unlock(&call->user_mutex);
        _leave(" = %p", call);
        return call;
 }
index d230cb4c809454137968be4bb1537d92bc8ebc5d..b480d7c792ba03e26e2eece4998f1b82e9c75b05 100644 (file)
@@ -234,6 +234,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
        tc_cls_common_offload_init(&cls_flower.common, tp);
        cls_flower.command = TC_CLSFLOWER_DESTROY;
        cls_flower.cookie = (unsigned long) f;
+       cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
 
        dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower);
 }
@@ -289,6 +290,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
        cls_flower.command = TC_CLSFLOWER_STATS;
        cls_flower.cookie = (unsigned long) f;
        cls_flower.exts = &f->exts;
+       cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
 
        dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
                                      &cls_flower);
index 92a07141fd07396a816478569f6de18a2aa13ebb..34f10e75f3b951a6fce87092a9af58dddb1f891b 100644 (file)
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
 {
        struct dst_entry *dst;
 
-       if (!t)
+       if (sock_owned_by_user(sk) || !t)
                return;
        dst = sctp_transport_dst_check(t);
        if (dst)
index d4730ada7f3233367be7a0e3bb10e286a25602c8..17841ab30798ecb2d7269296dcda3c62d434d622 100644 (file)
@@ -4906,6 +4906,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
        struct socket *sock;
        int err = 0;
 
+       /* Do not peel off from one netns to another one. */
+       if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
+               return -EINVAL;
+
        if (!asoc)
                return -EINVAL;
 
index 14ed5a344cdf302ba3f2d8e9dec4fb7c66fdd239..e21991fe883a7c5f03a3d6eefb52518ef9a67d6c 100644 (file)
@@ -310,11 +310,15 @@ static void hvs_close_connection(struct vmbus_channel *chan)
        struct sock *sk = get_per_channel_state(chan);
        struct vsock_sock *vsk = vsock_sk(sk);
 
+       lock_sock(sk);
+
        sk->sk_state = SS_UNCONNECTED;
        sock_set_flag(sk, SOCK_DONE);
        vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
 
        sk->sk_state_change(sk);
+
+       release_sock(sk);
 }
 
 static void hvs_open_connection(struct vmbus_channel *chan)
@@ -344,6 +348,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
        if (!sk)
                return;
 
+       lock_sock(sk);
+
        if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
            (!conn_from_host && sk->sk_state != SS_CONNECTING))
                goto out;
@@ -395,9 +401,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 
                vsock_insert_connected(vnew);
 
-               lock_sock(sk);
                vsock_enqueue_accept(sk, new);
-               release_sock(sk);
        } else {
                sk->sk_state = SS_CONNECTED;
                sk->sk_socket->state = SS_CONNECTED;
@@ -410,6 +414,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 out:
        /* Release refcnt obtained when we called vsock_find_bound_socket() */
        sock_put(sk);
+
+       release_sock(sk);
 }
 
 static u32 hvs_get_local_cid(void)
@@ -476,13 +482,21 @@ out:
 
 static void hvs_release(struct vsock_sock *vsk)
 {
+       struct sock *sk = sk_vsock(vsk);
        struct hvsock *hvs = vsk->trans;
-       struct vmbus_channel *chan = hvs->chan;
+       struct vmbus_channel *chan;
 
+       lock_sock(sk);
+
+       sk->sk_state = SS_DISCONNECTING;
+       vsock_remove_sock(vsk);
+
+       release_sock(sk);
+
+       chan = hvs->chan;
        if (chan)
                hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
 
-       vsock_remove_sock(vsk);
 }
 
 static void hvs_destruct(struct vsock_sock *vsk)
index f9b38ef82dc2449e56094b9c8c0c805673c0a7b0..52b0053274f425a6a07eb70dfbce89ca9c32d4fb 100644 (file)
@@ -62,7 +62,7 @@ int bpf_prog2(struct __sk_buff *skb)
                ret = 1;
 
        bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret);
-       return bpf_sk_redirect_map(&sock_map, ret, 0);
+       return bpf_sk_redirect_map(skb, &sock_map, ret, 0);
 }
 
 SEC("sockops")
index bc7fcf010a5b4ccb7c25dcfd651e53c61ce80b1a..446beb7ac48dc47f7319dcb6bcf91272bd0317d8 100644 (file)
@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg)
 }
 
 static DEFINE_MUTEX(thread_mutex);
+static bool simple_thread_cnt;
 
 int foo_bar_reg(void)
 {
+       mutex_lock(&thread_mutex);
+       if (simple_thread_cnt++)
+               goto out;
+
        pr_info("Starting thread for foo_bar_fn\n");
        /*
         * We shouldn't be able to start a trace when the module is
         * unloading (there's other locks to prevent that). But
         * for consistency sake, we still take the thread_mutex.
         */
-       mutex_lock(&thread_mutex);
        simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ out:
        mutex_unlock(&thread_mutex);
        return 0;
 }
 
 void foo_bar_unreg(void)
 {
-       pr_info("Killing thread for foo_bar_fn\n");
-       /* protect against module unloading */
        mutex_lock(&thread_mutex);
+       if (--simple_thread_cnt)
+               goto out;
+
+       pr_info("Killing thread for foo_bar_fn\n");
        if (simple_tsk_fn)
                kthread_stop(simple_tsk_fn);
        simple_tsk_fn = NULL;
+ out:
        mutex_unlock(&thread_mutex);
 }
 
index e4d90e50f6fece5db963af40a395d8ff4a4e8ecc..812657ab5aa3908322d58e461a4889001327bfde 100644 (file)
@@ -105,6 +105,7 @@ int main(void)
        DEVID_FIELD(input_device_id, sndbit);
        DEVID_FIELD(input_device_id, ffbit);
        DEVID_FIELD(input_device_id, swbit);
+       DEVID_FIELD(input_device_id, propbit);
 
        DEVID(eisa_device_id);
        DEVID_FIELD(eisa_device_id, sig);
index 29d6699d5a06c1eddb52e9aa4175766297754e4c..bc25898f6df0997b13f5752f97dd90879f4a0d37 100644 (file)
@@ -761,7 +761,7 @@ static void do_input(char *alias,
                        sprintf(alias + strlen(alias), "%X,*", i);
 }
 
-/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */
+/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwXprX where X is comma-separated %02X. */
 static int do_input_entry(const char *filename, void *symval,
                          char *alias)
 {
@@ -779,6 +779,7 @@ static int do_input_entry(const char *filename, void *symval,
        DEF_FIELD_ADDR(symval, input_device_id, sndbit);
        DEF_FIELD_ADDR(symval, input_device_id, ffbit);
        DEF_FIELD_ADDR(symval, input_device_id, swbit);
+       DEF_FIELD_ADDR(symval, input_device_id, propbit);
 
        sprintf(alias, "input:");
 
@@ -816,6 +817,9 @@ static int do_input_entry(const char *filename, void *symval,
        sprintf(alias + strlen(alias), "w*");
        if (flags & INPUT_DEVICE_ID_MATCH_SWBIT)
                do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
+       sprintf(alias + strlen(alias), "pr*");
+       if (flags & INPUT_DEVICE_ID_MATCH_PROPBIT)
+               do_input(alias, *propbit, 0, INPUT_DEVICE_ID_PROP_MAX);
        return 1;
 }
 ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
index c25e0d27537f87ea7d7d216b9448f47b83e26608..fc46f5b85251049265a53929e37d5822240a64e3 100644 (file)
@@ -585,13 +585,14 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
        struct vfs_ns_cap_data data, *nscaps = &data;
        struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
        kuid_t rootkuid;
-       struct user_namespace *fs_ns = inode->i_sb->s_user_ns;
+       struct user_namespace *fs_ns;
 
        memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
 
        if (!inode)
                return -ENODATA;
 
+       fs_ns = inode->i_sb->s_user_ns;
        size = __vfs_getxattr((struct dentry *)dentry, inode,
                              XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
        if (size == -ENODATA || size == -EOPNOTSUPP)
index 91eafada3164ec9dc77074565c156988c4101554..6462e6654ccf46df67db56959f5405b2f7d4e4c2 100644 (file)
@@ -45,6 +45,7 @@ config BIG_KEYS
        bool "Large payload keys"
        depends on KEYS
        depends on TMPFS
+       select CRYPTO
        select CRYPTO_AES
        select CRYPTO_GCM
        help
index e607830b6154ce4c8dbcd5563fb0d3607f680c8f..929e14978c421b227e592e937d9157ca2235b2f7 100644 (file)
@@ -247,7 +247,7 @@ void big_key_revoke(struct key *key)
 
        /* clear the quota */
        key_payload_reserve(key, 0);
-       if (key_is_instantiated(key) &&
+       if (key_is_positive(key) &&
            (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
                vfs_truncate(path, 0);
 }
@@ -279,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
 
        seq_puts(m, key->description);
 
-       if (key_is_instantiated(key))
+       if (key_is_positive(key))
                seq_printf(m, ": %zu [%s]",
                           datalen,
                           datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
index 69855ba0d3b3fcba2190844ae5665ead90dd38f9..d92cbf9687c33f090865f6d0caa99d9936f49e3a 100644 (file)
@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
 
        down_read(&ukey->sem);
        upayload = user_key_payload_locked(ukey);
+       if (!upayload) {
+               /* key was revoked before we acquired its semaphore */
+               up_read(&ukey->sem);
+               key_put(ukey);
+               ukey = ERR_PTR(-EKEYREVOKED);
+               goto error;
+       }
        *master_key = upayload->data;
        *master_keylen = upayload->datalen;
 error:
@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
        size_t datalen = prep->datalen;
        int ret = 0;
 
-       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+       if (key_is_negative(key))
                return -ENOKEY;
        if (datalen <= 0 || datalen > 32767 || !prep->data)
                return -EINVAL;
index 87cb260e4890f3ac464e8d3f3244077653510b74..f01d48cb3de1aac09266b59bcfa732597c6534b4 100644 (file)
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
        while (!list_empty(keys)) {
                struct key *key =
                        list_entry(keys->next, struct key, graveyard_link);
+               short state = key->state;
+
                list_del(&key->graveyard_link);
 
                kdebug("- %u", key->serial);
                key_check(key);
 
                /* Throw away the key data if the key is instantiated */
-               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
-                   !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
-                   key->type->destroy)
+               if (state == KEY_IS_POSITIVE && key->type->destroy)
                        key->type->destroy(key);
 
                security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                }
 
                atomic_dec(&key->user->nkeys);
-               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+               if (state != KEY_IS_UNINSTANTIATED)
                        atomic_dec(&key->user->nikeys);
 
                key_user_put(key->user);
index eb914a838840df416f26af2c0cd73f87c60a5032..83bf4b4afd49d24ba80209a9bff71a558bd01b6c 100644 (file)
@@ -401,6 +401,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
 }
 EXPORT_SYMBOL(key_payload_reserve);
 
+/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+       /* Commit the payload before setting the state; barrier versus
+        * key_read_state().
+        */
+       smp_store_release(&key->state,
+                         (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
 /*
  * Instantiate a key and link it into the target keyring atomically.  Must be
  * called with the target keyring's semaphore writelocked.  The target key's
@@ -424,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key,
        mutex_lock(&key_construction_mutex);
 
        /* can't instantiate twice */
-       if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+       if (key->state == KEY_IS_UNINSTANTIATED) {
                /* instantiate the key */
                ret = key->type->instantiate(key, prep);
 
                if (ret == 0) {
                        /* mark the key as being instantiated */
                        atomic_inc(&key->user->nikeys);
-                       set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+                       mark_key_instantiated(key, 0);
 
                        if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
                                awaken = 1;
@@ -577,13 +589,10 @@ int key_reject_and_link(struct key *key,
        mutex_lock(&key_construction_mutex);
 
        /* can't instantiate twice */
-       if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+       if (key->state == KEY_IS_UNINSTANTIATED) {
                /* mark the key as being negatively instantiated */
                atomic_inc(&key->user->nikeys);
-               key->reject_error = -error;
-               smp_wmb();
-               set_bit(KEY_FLAG_NEGATIVE, &key->flags);
-               set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+               mark_key_instantiated(key, -error);
                now = current_kernel_time();
                key->expiry = now.tv_sec + timeout;
                key_schedule_gc(key->expiry + key_gc_delay);
@@ -752,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
 
        ret = key->type->update(key, prep);
        if (ret == 0)
-               /* updating a negative key instantiates it */
-               clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+               /* Updating a negative key positively instantiates it */
+               mark_key_instantiated(key, 0);
 
        up_write(&key->sem);
 
@@ -936,6 +945,16 @@ error:
         */
        __key_link_end(keyring, &index_key, edit);
 
+       key = key_ref_to_ptr(key_ref);
+       if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+               ret = wait_for_key_construction(key, true);
+               if (ret < 0) {
+                       key_ref_put(key_ref);
+                       key_ref = ERR_PTR(ret);
+                       goto error_free_prep;
+               }
+       }
+
        key_ref = __key_update(key_ref, &prep);
        goto error_free_prep;
 }
@@ -986,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
 
        ret = key->type->update(key, &prep);
        if (ret == 0)
-               /* updating a negative key instantiates it */
-               clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+               /* Updating a negative key positively instantiates it */
+               mark_key_instantiated(key, 0);
 
        up_write(&key->sem);
 
index 365ff85d7e27122db220fad6d633f8e0b352209f..76d22f726ae49d7e112c648c59e9c0d3124063f1 100644 (file)
@@ -766,10 +766,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
 
        key = key_ref_to_ptr(key_ref);
 
-       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
-               ret = -ENOKEY;
-               goto error2;
-       }
+       ret = key_read_state(key);
+       if (ret < 0)
+               goto error2; /* Negatively instantiated */
 
        /* see if we can read it directly */
        ret = key_permission(key_ref, KEY_NEED_READ);
@@ -901,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
                atomic_dec(&key->user->nkeys);
                atomic_inc(&newowner->nkeys);
 
-               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+               if (key->state != KEY_IS_UNINSTANTIATED) {
                        atomic_dec(&key->user->nikeys);
                        atomic_inc(&newowner->nikeys);
                }
index 4fa82a8a9c0e6cb778a08874724ea546cd602ade..a7e51f7938671c76fa917ea66a7a1d6524f21364 100644 (file)
@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
        else
                seq_puts(m, "[anon]");
 
-       if (key_is_instantiated(keyring)) {
+       if (key_is_positive(keyring)) {
                if (keyring->keys.nr_leaves_on_tree != 0)
                        seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
                else
@@ -553,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
 {
        struct keyring_search_context *ctx = iterator_data;
        const struct key *key = keyring_ptr_to_key(object);
-       unsigned long kflags = key->flags;
+       unsigned long kflags = READ_ONCE(key->flags);
+       short state = READ_ONCE(key->state);
 
        kenter("{%d}", key->serial);
 
@@ -565,6 +566,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
 
        /* skip invalidated, revoked and expired keys */
        if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+               time_t expiry = READ_ONCE(key->expiry);
+
                if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
                              (1 << KEY_FLAG_REVOKED))) {
                        ctx->result = ERR_PTR(-EKEYREVOKED);
@@ -572,7 +575,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
                        goto skipped;
                }
 
-               if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+               if (expiry && ctx->now.tv_sec >= expiry) {
                        if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
                                ctx->result = ERR_PTR(-EKEYEXPIRED);
                        kleave(" = %d [expire]", ctx->skipped_ret);
@@ -597,9 +600,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
 
        if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
                /* we set a different error code if we pass a negative key */
-               if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
-                       smp_rmb();
-                       ctx->result = ERR_PTR(key->reject_error);
+               if (state < 0) {
+                       ctx->result = ERR_PTR(state);
                        kleave(" = %d [neg]", ctx->skipped_ret);
                        goto skipped;
                }
index 732cc0beffdfc74b74eddbf173ee30bd9d0455db..a72b4dd70c8abfab5d05503415c4a5fcef7b6df3 100644 (file)
@@ -88,7 +88,8 @@ EXPORT_SYMBOL(key_task_permission);
  */
 int key_validate(const struct key *key)
 {
-       unsigned long flags = key->flags;
+       unsigned long flags = READ_ONCE(key->flags);
+       time_t expiry = READ_ONCE(key->expiry);
 
        if (flags & (1 << KEY_FLAG_INVALIDATED))
                return -ENOKEY;
@@ -99,9 +100,9 @@ int key_validate(const struct key *key)
                return -EKEYREVOKED;
 
        /* check it hasn't expired */
-       if (key->expiry) {
+       if (expiry) {
                struct timespec now = current_kernel_time();
-               if (now.tv_sec >= key->expiry)
+               if (now.tv_sec >= expiry)
                        return -EKEYEXPIRED;
        }
 
index de834309d100206bedca18c84493af6fdb665d7a..6d1fcbba1e0961927c9b3a6027fcd8084a21f80a 100644 (file)
@@ -179,9 +179,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
        struct rb_node *_p = v;
        struct key *key = rb_entry(_p, struct key, serial_node);
        struct timespec now;
+       time_t expiry;
        unsigned long timo;
+       unsigned long flags;
        key_ref_t key_ref, skey_ref;
        char xbuf[16];
+       short state;
        int rc;
 
        struct keyring_search_context ctx = {
@@ -217,12 +220,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
        rcu_read_lock();
 
        /* come up with a suitable timeout value */
-       if (key->expiry == 0) {
+       expiry = READ_ONCE(key->expiry);
+       if (expiry == 0) {
                memcpy(xbuf, "perm", 5);
-       } else if (now.tv_sec >= key->expiry) {
+       } else if (now.tv_sec >= expiry) {
                memcpy(xbuf, "expd", 5);
        } else {
-               timo = key->expiry - now.tv_sec;
+               timo = expiry - now.tv_sec;
 
                if (timo < 60)
                        sprintf(xbuf, "%lus", timo);
@@ -236,18 +240,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
                        sprintf(xbuf, "%luw", timo / (60*60*24*7));
        }
 
-#define showflag(KEY, LETTER, FLAG) \
-       (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
+       state = key_read_state(key);
 
+#define showflag(FLAGS, LETTER, FLAG) \
+       ((FLAGS & (1 << FLAG)) ? LETTER : '-')
+
+       flags = READ_ONCE(key->flags);
        seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
                   key->serial,
-                  showflag(key, 'I', KEY_FLAG_INSTANTIATED),
-                  showflag(key, 'R', KEY_FLAG_REVOKED),
-                  showflag(key, 'D', KEY_FLAG_DEAD),
-                  showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
-                  showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
-                  showflag(key, 'N', KEY_FLAG_NEGATIVE),
-                  showflag(key, 'i', KEY_FLAG_INVALIDATED),
+                  state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
+                  showflag(flags, 'R', KEY_FLAG_REVOKED),
+                  showflag(flags, 'D', KEY_FLAG_DEAD),
+                  showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
+                  showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
+                  state < 0 ? 'N' : '-',
+                  showflag(flags, 'i', KEY_FLAG_INVALIDATED),
                   refcount_read(&key->usage),
                   xbuf,
                   key->perm,
index 293d3598153bf0c8611f319f4c1d7c50f364e600..740affd65ee98464e19fc32f830f86a1ad3f24f4 100644 (file)
@@ -730,7 +730,7 @@ try_again:
 
        ret = -EIO;
        if (!(lflags & KEY_LOOKUP_PARTIAL) &&
-           !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+           key_read_state(key) == KEY_IS_UNINSTANTIATED)
                goto invalid_key;
 
        /* check the permissions */
index 63e63a42db3c0aa4b1ed069e03e8e1f8e9139a32..e8036cd0ad5430a87ec2e2ea1496e921ae941b3d 100644 (file)
@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr)
                          intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
        if (ret)
                return -ERESTARTSYS;
-       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
-               smp_rmb();
-               return key->reject_error;
-       }
+       ret = key_read_state(key);
+       if (ret < 0)
+               return ret;
        return key_validate(key);
 }
 EXPORT_SYMBOL(wait_for_key_construction);
index 6ebf1af8fce963eeb0cc4c423c153318096e65a6..424e1d90412ea0c40e4e20453f47b566b0b25834 100644 (file)
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
 
        seq_puts(m, "key:");
        seq_puts(m, key->description);
-       if (key_is_instantiated(key))
+       if (key_is_positive(key))
                seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
 }
 
index ddfaebf60fc8649cf6159d2cba18e916931a3b4e..bd85315cbfeb3e1bad3b01e7d9a6538bbb5d3577 100644 (file)
@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
        char *datablob;
        int ret = 0;
 
-       if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+       if (key_is_negative(key))
                return -ENOKEY;
        p = key->payload.data[0];
        if (!p->migratable)
index 3d8c68eba5160286fa7af79c8da1ead6e6b05236..9f558bedba23a338da5980ab11dd1b716c7b0b67 100644 (file)
@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
 
        /* attach the new data, displacing the old */
        key->expiry = prep->expiry;
-       if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+       if (key_is_positive(key))
                zap = dereference_key_locked(key);
        rcu_assign_keypointer(key, prep->payload.data[0]);
        prep->payload.data[0] = NULL;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
 void user_describe(const struct key *key, struct seq_file *m)
 {
        seq_puts(m, key->description);
-       if (key_is_instantiated(key))
+       if (key_is_positive(key))
                seq_printf(m, ": %u", key->datalen);
 }
 
index 0ff7926a5a69ad8dfb2f18a768bf4d9a391c23ee..cda64b489e4200563de5e0bd2f43ea692b8abbce 100644 (file)
@@ -23,8 +23,6 @@
 #include <sound/core.h>
 #include "seq_lock.h"
 
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
 /* wait until all locks are released */
 void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
 {
@@ -41,5 +39,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
        }
 }
 EXPORT_SYMBOL(snd_use_lock_sync_helper);
-
-#endif
index 54044bc2c9ef24892a7e6747dcd67e92fb01b0f1..ac38031c370e681984da86f2b8b6981aad1e9ccc 100644 (file)
@@ -3,8 +3,6 @@
 
 #include <linux/sched.h>
 
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
 typedef atomic_t snd_use_lock_t;
 
 /* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
 void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
 #define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
 
-#else /* SMP || CONFIG_SND_DEBUG */
-
-typedef spinlock_t snd_use_lock_t;     /* dummy */
-#define snd_use_lock_init(lockp) /**/
-#define snd_use_lock_use(lockp) /**/
-#define snd_use_lock_free(lockp) /**/
-#define snd_use_lock_sync(lockp) /**/
-
-#endif /* SMP || CONFIG_SND_DEBUG */
-
 #endif /* __SND_SEQ_LOCK_H */
index 6c58e6f73a013bd33e47cc9a7ea8c1b46b7f17df..e43af18d43836367e263356eb2377cc4e08e8368 100644 (file)
@@ -484,3 +484,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only)
                master->hook(master->hook_private_data, master->val);
 }
 EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
+
+/**
+ * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave
+ * @kctl: vmaster kctl element
+ * @func: function to apply
+ * @arg: optional function argument
+ *
+ * Apply the function @func to each slave kctl of the given vmaster kctl.
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
+                                int (*func)(struct snd_kcontrol *, void *),
+                                void *arg)
+{
+       struct link_master *master;
+       struct link_slave *slave;
+       int err;
+
+       master = snd_kcontrol_chip(kctl);
+       err = master_init(master);
+       if (err < 0)
+               return err;
+       list_for_each_entry(slave, &master->slaves, list) {
+               err = func(&slave->slave, arg);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves);
index 978dc1801b3aceb8e2245b819097954c76595d77..f6d2985b2520cf1130a9a6c7d05781b6eafd44fb 100644 (file)
@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
                dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
                        (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
 
+               if (cur_cap == -1) {
+                       dev_dbg(bus->dev, "Invalid capability reg read\n");
+                       break;
+               }
+
                switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
                case AZX_ML_CAP_ID:
                        dev_dbg(bus->dev, "Found ML capability\n");
index 3db26c451837cf0e2893d1071c1a699465d89e1b..a0989d231fd00fa94259ccea2ddc397336d633b4 100644 (file)
@@ -1803,36 +1803,6 @@ static int check_slave_present(struct hda_codec *codec,
        return 1;
 }
 
-/* guess the value corresponding to 0dB */
-static int get_kctl_0dB_offset(struct hda_codec *codec,
-                              struct snd_kcontrol *kctl, int *step_to_check)
-{
-       int _tlv[4];
-       const int *tlv = NULL;
-       int val = -1;
-
-       if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
-           kctl->tlv.c == snd_hda_mixer_amp_tlv) {
-               get_ctl_amp_tlv(kctl, _tlv);
-               tlv = _tlv;
-       } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
-               tlv = kctl->tlv.p;
-       if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
-               int step = tlv[3];
-               step &= ~TLV_DB_SCALE_MUTE;
-               if (!step)
-                       return -1;
-               if (*step_to_check && *step_to_check != step) {
-                       codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
--                                 *step_to_check, step);
-                       return -1;
-               }
-               *step_to_check = step;
-               val = -tlv[2] / step;
-       }
-       return val;
-}
-
 /* call kctl->put with the given value(s) */
 static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
 {
@@ -1847,19 +1817,58 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
        return 0;
 }
 
-/* initialize the slave volume with 0dB */
-static int init_slave_0dB(struct hda_codec *codec,
-                         void *data, struct snd_kcontrol *slave)
+struct slave_init_arg {
+       struct hda_codec *codec;
+       int step;
+};
+
+/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
+static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
 {
-       int offset = get_kctl_0dB_offset(codec, slave, data);
-       if (offset > 0)
-               put_kctl_with_value(slave, offset);
+       struct slave_init_arg *arg = _arg;
+       int _tlv[4];
+       const int *tlv = NULL;
+       int step;
+       int val;
+
+       if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+               if (kctl->tlv.c != snd_hda_mixer_amp_tlv) {
+                       codec_err(arg->codec,
+                                 "Unexpected TLV callback for slave %s:%d\n",
+                                 kctl->id.name, kctl->id.index);
+                       return 0; /* ignore */
+               }
+               get_ctl_amp_tlv(kctl, _tlv);
+               tlv = _tlv;
+       } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
+               tlv = kctl->tlv.p;
+
+       if (!tlv || tlv[0] != SNDRV_CTL_TLVT_DB_SCALE)
+               return 0;
+
+       step = tlv[3];
+       step &= ~TLV_DB_SCALE_MUTE;
+       if (!step)
+               return 0;
+       if (arg->step && arg->step != step) {
+               codec_err(arg->codec,
+                         "Mismatching dB step for vmaster slave (%d!=%d)\n",
+                         arg->step, step);
+               return 0;
+       }
+
+       arg->step = step;
+       val = -tlv[2] / step;
+       if (val > 0) {
+               put_kctl_with_value(kctl, val);
+               return val;
+       }
+
        return 0;
 }
 
-/* unmute the slave */
-static int init_slave_unmute(struct hda_codec *codec,
-                            void *data, struct snd_kcontrol *slave)
+/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
+static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
 {
        return put_kctl_with_value(slave, 1);
 }
@@ -1919,9 +1928,13 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
        /* init with master mute & zero volume */
        put_kctl_with_value(kctl, 0);
        if (init_slave_vol) {
-               int step = 0;
-               map_slaves(codec, slaves, suffix,
-                          tlv ? init_slave_0dB : init_slave_unmute, &step);
+               struct slave_init_arg arg = {
+                       .codec = codec,
+                       .step = 0,
+               };
+               snd_ctl_apply_vmaster_slaves(kctl,
+                                            tlv ? init_slave_0dB : init_slave_unmute,
+                                            &arg);
        }
 
        if (ctl_ret)
index 9ddaae3784f5312a135733f5e7f1019609080073..4f5f18f22974ef8e2e7b95e4c0ea261e7a04a388 100644 (file)
@@ -1354,6 +1354,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
        case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
        case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+       case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index f90860d1f8979e03b10063a04179bed44c137e5a..24b35a1fd4d67eebf65e4dc83faaaacca9d2e1fb 100644 (file)
@@ -569,9 +569,10 @@ union bpf_attr {
  *     @flags: reserved for future use
  *     Return: 0 on success or negative error code
  *
- * int bpf_sk_redirect_map(map, key, flags)
+ * int bpf_sk_redirect_map(skb, map, key, flags)
  *     Redirect skb to a sock in map using key as a lookup key for the
  *     sock in map.
+ *     @skb: pointer to skb
  *     @map: pointer to sockmap
  *     @key: key to lookup sock in map
  *     @flags: reserved for future use
index a0c518ecf085135711e8b06488381f53127d9fee..c0e26ad1fa7e3dbd212b3a4fd02fbcc4d8afc74c 100644 (file)
@@ -267,12 +267,13 @@ static int decode_instructions(struct objtool_file *file)
                                                      &insn->immediate,
                                                      &insn->stack_op);
                        if (ret)
-                               return ret;
+                               goto err;
 
                        if (!insn->type || insn->type > INSN_LAST) {
                                WARN_FUNC("invalid instruction type %d",
                                          insn->sec, insn->offset, insn->type);
-                               return -1;
+                               ret = -1;
+                               goto err;
                        }
 
                        hash_add(file->insn_hash, &insn->hash, insn->offset);
@@ -296,6 +297,10 @@ static int decode_instructions(struct objtool_file *file)
        }
 
        return 0;
+
+err:
+       free(insn);
+       return ret;
 }
 
 /*
index e397453e5a465513af8d84103fd3feccb510d793..63526f4416ea4fb81292fafcfe6fdd55d9955639 100644 (file)
@@ -8,8 +8,8 @@ perf-record - Run a command and record its profile into perf.data
 SYNOPSIS
 --------
 [verse]
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+'perf record' [-e <EVENT> | --event=EVENT] [-a] <command>
+'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
 
 DESCRIPTION
 -----------
index 462fc755092e0b95cdf9ffd2ecf2317f4a4cf5df..7a84d73324e3c1209781296dcc54260745150b53 100755 (executable)
@@ -10,6 +10,9 @@
 
 . $(dirname $0)/lib/probe.sh
 
+ld=$(realpath /lib64/ld*.so.* | uniq)
+libc=$(echo $ld | sed 's/ld/libc/g')
+
 trace_libc_inet_pton_backtrace() {
        idx=0
        expected[0]="PING.*bytes"
@@ -18,8 +21,8 @@ trace_libc_inet_pton_backtrace() {
        expected[3]=".*packets transmitted.*"
        expected[4]="rtt min.*"
        expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
-       expected[6]=".*inet_pton[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
-       expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
+       expected[6]=".*inet_pton[[:space:]]\($libc\)$"
+       expected[7]="getaddrinfo[[:space:]]\($libc\)$"
        expected[8]=".*\(.*/bin/ping.*\)$"
 
        perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
@@ -35,7 +38,7 @@ trace_libc_inet_pton_backtrace() {
 }
 
 skip_if_no_perf_probe && \
-perf probe -q /lib64/libc-*.so inet_pton && \
+perf probe -q $libc inet_pton && \
 trace_libc_inet_pton_backtrace
 err=$?
 rm -f ${file}
index ddb2c6fbdf919e8124ffb40eee47c801b43bd6c6..db79017a6e56fc1b836fbbeb178e9eb7c57a7a89 100644 (file)
@@ -532,7 +532,7 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
 
 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
 {
-       list_del(&format->list);
+       list_del_init(&format->list);
 }
 
 void perf_hpp__cancel_cumulate(void)
@@ -606,6 +606,13 @@ next:
 
 static void fmt_free(struct perf_hpp_fmt *fmt)
 {
+       /*
+        * At this point fmt should be completely
+        * unhooked, if not it's a bug.
+        */
+       BUG_ON(!list_empty(&fmt->list));
+       BUG_ON(!list_empty(&fmt->sort_list));
+
        if (fmt->free)
                fmt->free(fmt);
 }
index c42edeac451fc809c2581f492cdc2e1741774bf7..dcfdafdc2f1c2f91b0090fc1f573fbdafde236de 100644 (file)
@@ -8,6 +8,9 @@
 
 %{
 #include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
 #include "../perf.h"
 #include "parse-events.h"
 #include "parse-events-bison.h"
@@ -53,9 +56,8 @@ static int str(yyscan_t scanner, int token)
        return token;
 }
 
-static bool isbpf(yyscan_t scanner)
+static bool isbpf_suffix(char *text)
 {
-       char *text = parse_events_get_text(scanner);
        int len = strlen(text);
 
        if (len < 2)
@@ -68,6 +70,17 @@ static bool isbpf(yyscan_t scanner)
        return false;
 }
 
+static bool isbpf(yyscan_t scanner)
+{
+       char *text = parse_events_get_text(scanner);
+       struct stat st;
+
+       if (!isbpf_suffix(text))
+               return false;
+
+       return stat(text, &st) == 0;
+}
+
 /*
  * This function is called when the parser gets two kind of input:
  *
index a7ebd9fe8e40ee56f79a108c247d528571d8ff64..76ab0709a20cbd2e6fd9b4d566188b37329d4163 100644 (file)
@@ -374,6 +374,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                tool->mmap2 = process_event_stub;
        if (tool->comm == NULL)
                tool->comm = process_event_stub;
+       if (tool->namespaces == NULL)
+               tool->namespaces = process_event_stub;
        if (tool->fork == NULL)
                tool->fork = process_event_stub;
        if (tool->exit == NULL)
index 4ba726c9087072184b445ddc4e788d7000ade01d..54af604621304a6eaed9dc67a6c43a17c902df65 100644 (file)
@@ -23,12 +23,12 @@ static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
 
 static inline int xyarray__max_y(struct xyarray *xy)
 {
-       return xy->max_x;
+       return xy->max_y;
 }
 
 static inline int xyarray__max_x(struct xyarray *xy)
 {
-       return xy->max_y;
+       return xy->max_x;
 }
 
 #endif /* _PERF_XYARRAY_H_ */
index 0dafba2c1e7d28c4eda6904274baf7537ee3062c..bd9c6b31a504df654e16a9389abbb9028cfa2bb7 100644 (file)
@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons;
 unsigned int crystal_hz;
 unsigned long long tsc_hz;
 int base_cpu;
-int do_migrate;
 double discover_bclk(unsigned int family, unsigned int model);
 unsigned int has_hwp;  /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
                        /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
 
 int cpu_migrate(int cpu)
 {
-       if (!do_migrate)
-               return 0;
-
        CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
        CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
        if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv)
                {"hide",        required_argument,      0, 'H'},        // meh, -h taken by --help
                {"Joules",      no_argument,            0, 'J'},
                {"list",        no_argument,            0, 'l'},
-               {"migrate",     no_argument,            0, 'm'},
                {"out",         required_argument,      0, 'o'},
                {"quiet",       no_argument,            0, 'q'},
                {"show",        required_argument,      0, 's'},
@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v",
+       while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
                                long_options, &option_index)) != -1) {
                switch (opt) {
                case 'a':
@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv)
                        list_header_only++;
                        quiet++;
                        break;
-               case 'm':
-                       do_migrate = 1;
-                       break;
                case 'o':
                        outf = fopen_or_die(optarg, "w");
                        break;
index 36fb9161b34acf8c14a585f4778905db9185b163..b2e02bdcd098f6380c80bc190e3986acb1007188 100644 (file)
@@ -65,7 +65,7 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
 static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
                             int optlen) =
        (void *) BPF_FUNC_setsockopt;
-static int (*bpf_sk_redirect_map)(void *map, int key, int flags) =
+static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
        (void *) BPF_FUNC_sk_redirect_map;
 static int (*bpf_sock_map_update)(void *map, void *key, void *value,
                                  unsigned long long flags) =
index 9b99bd10807d8990a82ba1a390e9f1304e04d214..2cd2d552938b19211da94c4b51ea7895408d35b0 100644 (file)
@@ -61,8 +61,8 @@ int bpf_prog2(struct __sk_buff *skb)
        bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk);
 
        if (!map)
-               return bpf_sk_redirect_map(&sock_map_rx, sk, 0);
-       return bpf_sk_redirect_map(&sock_map_tx, sk, 0);
+               return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
+       return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
 }
 
 char _license[] SEC("license") = "GPL";
index fe3a443a110228efb99bc893d2f0820cd03e6763..50ce52d2013d6feddadcb6a2f61cdc4667b3d78d 100644 (file)
@@ -466,7 +466,7 @@ static void test_sockmap(int tasks, void *data)
        int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
        struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
        int ports[] = {50200, 50201, 50202, 50204};
-       int err, i, fd, sfd[6] = {0xdeadbeef};
+       int err, i, fd, udp, sfd[6] = {0xdeadbeef};
        u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
        int parse_prog, verdict_prog;
        struct sockaddr_in addr;
@@ -548,6 +548,16 @@ static void test_sockmap(int tasks, void *data)
                goto out_sockmap;
        }
 
+       /* Test update with unsupported UDP socket */
+       udp = socket(AF_INET, SOCK_DGRAM, 0);
+       i = 0;
+       err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
+       if (!err) {
+               printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
+                      i, udp);
+               goto out_sockmap;
+       }
+
        /* Test update without programs */
        for (i = 0; i < 6; i++) {
                err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
index 26f3250bdcd2546c4f316eed8caffd9eb583e0d9..64ae21f644896a74bb624d6f873463b5b2d07312 100644 (file)
@@ -1130,15 +1130,27 @@ static struct bpf_test tests[] = {
                .errstr = "invalid bpf_context access",
        },
        {
-               "check skb->mark is writeable by SK_SKB",
+               "invalid access of skb->mark for SK_SKB",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result =  REJECT,
+               .prog_type = BPF_PROG_TYPE_SK_SKB,
+               .errstr = "invalid bpf_context access",
+       },
+       {
+               "check skb->mark is not writeable by SK_SKB",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
                                    offsetof(struct __sk_buff, mark)),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .result =  REJECT,
                .prog_type = BPF_PROG_TYPE_SK_SKB,
+               .errstr = "invalid bpf_context access",
        },
        {
                "check skb->tc_index is writeable by SK_SKB",
@@ -6645,6 +6657,500 @@ static struct bpf_test tests[] = {
                .errstr = "BPF_END uses reserved fields",
                .result = REJECT,
        },
+       {
+               "arithmetic ops make PTR_TO_CTX unusable",
+               .insns = {
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                                     offsetof(struct __sk_buff, data) -
+                                     offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "XDP pkt read, pkt_end mangling, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end mangling, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' > pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' > pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' > pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end > pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end > pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end > pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' < pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' < pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end < pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end < pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end < pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' >= pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end >= pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end >= pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end >= pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' <= pkt_end, good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end <= pkt_data', good access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 offset is outside of the packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)