Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 11 Sep 2018 18:46:11 +0000 (08:46 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 11 Sep 2018 18:46:11 +0000 (08:46 -1000)
Pull rdma fixes from Jason Gunthorpe:
 "This fixes one major regression with NFS and mlx4 due to the max_sg
  rework in this merge window, tidies a few minor error_path
  regressions, and various small fixes.

  The HFI1 driver is broken this cycle due to a regression caused by a
  PCI change, it is looking like Bjorn will merge a fix for this. Also,
  the lingering ipoib issue I mentioned earlier still remains unfixed.

  Summary:

   - Fix possible FD type confusion crash

   - Fix a user trigger-able crash in cxgb4

   - Fix bad handling of IOMMU resources causing user controlled leaking
     in bnxt

   - Add missing locking in ipoib to fix a rare 'stuck tx' situation

   - Add missing locking in cma

   - Add two missing missing uverbs cleanups on failure paths,
     regressions from this merge window

   - Fix a regression from this merge window that caused RDMA NFS to not
     work with the mlx4 driver due to the max_sg changes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/mlx4: Ensure that maximal send/receive SGE less than supported by HW
  RDMA/cma: Protect cma dev list with lock
  RDMA/uverbs: Fix error cleanup path of ib_uverbs_add_one()
  bnxt_re: Fix couple of memory leaks that could lead to IOMMU call traces
  IB/ipoib: Avoid a race condition between start_xmit and cm_rep_handler
  iw_cxgb4: only allow 1 flush on user qps
  IB/core: Release object lock if destroy failed
  RDMA/ucma: check fd type in ucma_migrate_id()

325 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/sh_eth.txt
Documentation/process/changes.rst
Documentation/scsi/scsi-parameters.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/dma-mapping.h [new file with mode: 0644]
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm/include/asm/kvm_host.h
arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/switch.c
arch/arm64/mm/mmu.c
arch/m68k/mm/mcfmmu.c
arch/mips/include/asm/kvm_host.h
arch/mips/kernel/vdso.c
arch/mips/kvm/mmu.c
arch/nds32/Kconfig
arch/nds32/Makefile
arch/nds32/include/asm/elf.h
arch/nds32/include/asm/ftrace.h [new file with mode: 0644]
arch/nds32/include/asm/nds32.h
arch/nds32/include/asm/uaccess.h
arch/nds32/kernel/Makefile
arch/nds32/kernel/atl2c.c
arch/nds32/kernel/ex-entry.S
arch/nds32/kernel/ex-exit.S
arch/nds32/kernel/ftrace.c [new file with mode: 0644]
arch/nds32/kernel/module.c
arch/nds32/kernel/stacktrace.c
arch/nds32/kernel/traps.c
arch/nds32/kernel/vmlinux.lds.S
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/s390/include/asm/mmu.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/kvm/vsie.c
arch/sparc/kernel/of_device_32.c
arch/sparc/kernel/of_device_64.c
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/tsc.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/pgtable.c
block/bfq-cgroup.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-throttle.c
drivers/acpi/acpi_lpss.c
drivers/acpi/bus.c
drivers/ata/libata-core.c
drivers/base/memory.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/char/Kconfig
drivers/char/random.c
drivers/dax/device.c
drivers/firmware/arm_scmi/perf.c
drivers/gpio/gpio-adp5588.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/fb_decoder.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/hwmon/raspberrypi-hwmon.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx-lpi2c.c
drivers/i2c/busses/i2c-uniphier-f.c
drivers/i2c/busses/i2c-uniphier.c
drivers/i2c/busses/i2c-xiic.c
drivers/irqchip/irq-gic-v3-its.c
drivers/md/md-cluster.c
drivers/md/raid10.c
drivers/md/raid5-log.h
drivers/md/raid5.c
drivers/memory/ti-aemif.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/phy/sfp.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/mac80211_hwsim.c
drivers/of/platform.c
drivers/scsi/Kconfig
drivers/scsi/aacraid/aacraid.h
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_hw.h
drivers/scsi/csiostor/csio_mb.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/scsi_lib.c
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
fs/afs/proc.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/ceph/super.c
fs/cifs/cifs_unicode.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/nilfs2/alloc.c
fs/nilfs2/alloc.h
fs/nilfs2/bmap.c
fs/nilfs2/bmap.h
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/btree.h
fs/nilfs2/cpfile.c
fs/nilfs2/cpfile.h
fs/nilfs2/dat.c
fs/nilfs2/dat.h
fs/nilfs2/dir.c
fs/nilfs2/direct.c
fs/nilfs2/direct.h
fs/nilfs2/file.c
fs/nilfs2/gcinode.c
fs/nilfs2/ifile.c
fs/nilfs2/ifile.h
fs/nilfs2/inode.c
fs/nilfs2/ioctl.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/namei.c
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/recovery.c
fs/nilfs2/segbuf.c
fs/nilfs2/segbuf.h
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/nilfs2/sufile.c
fs/nilfs2/sufile.h
fs/nilfs2/super.c
fs/nilfs2/sysfs.c
fs/nilfs2/sysfs.h
fs/nilfs2/the_nilfs.c
fs/nilfs2/the_nilfs.h
fs/notify/fsnotify.c
include/linux/blk-cgroup.h
include/linux/pci_ids.h
include/linux/timekeeping.h
include/linux/tracepoint.h
include/net/cfg80211.h
include/net/regulatory.h
include/uapi/linux/keyctl.h
include/uapi/linux/rds.h
include/uapi/linux/vhost.h
ipc/shm.c
kernel/bpf/sockmap.c
kernel/cpu.c
kernel/dma/direct.c
kernel/fork.c
kernel/printk/printk_safe.c
kernel/time/clocksource.c
lib/Kconfig.debug
mm/backing-dev.c
mm/huge_memory.c
mm/kmemleak.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/oom_kill.c
mm/page_alloc.c
mm/util.c
net/core/filter.c
net/core/rtnetlink.c
net/dsa/dsa.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/tcp_minisocks.c
net/ipv6/af_inet6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/route.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/mesh_hwmp.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/packet/af_packet.c
net/packet/internal.h
net/rds/Kconfig
net/rds/ib.c
net/rfkill/rfkill-gpio.c
net/sched/act_api.c
net/sched/act_ife.c
net/sched/act_pedit.c
net/sched/cls_api.c
net/sctp/proc.c
net/sctp/socket.c
net/tipc/bcast.c
net/tipc/diag.c
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/netlink.c
net/tipc/socket.c
net/tipc/socket.h
net/tipc/topsrv.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/util.c
scripts/checkpatch.pl
scripts/depmod.sh
scripts/kconfig/Makefile
scripts/kconfig/check-pkgconfig.sh [deleted file]
scripts/kconfig/gconf-cfg.sh
scripts/kconfig/mconf-cfg.sh
scripts/kconfig/mconf.c
scripts/kconfig/nconf-cfg.sh
scripts/kconfig/qconf-cfg.sh
scripts/recordmcount.pl
scripts/setlocalversion
security/apparmor/secid.c
security/keys/dh.c
sound/core/rawmidi.c
sound/hda/ext/hdac_ext_stream.c
sound/pci/hda/hda_codec.c
tools/bpf/bpftool/map.c
tools/kvm/kvm_stat/kvm_stat
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/tc-testing/tc-tests/actions/police.json
tools/vm/page-types.c
tools/vm/slabinfo.c
virt/kvm/arm/mmu.c
virt/kvm/arm/trace.h

index 9871e649ffeffe8798cd20a1450377a4f4777fca..64a3bf54b97492e3b2b25508a7785d1f1e9e4186 100644 (file)
        ramdisk_size=   [RAM] Sizes of RAM disks in kilobytes
                        See Documentation/blockdev/ramdisk.txt.
 
+       random.trust_cpu={on,off}
+                       [KNL] Enable or disable trusting the use of the
+                       CPU's random number generator (if available) to
+                       fully seed the kernel's CRNG. Default is controlled
+                       by CONFIG_RANDOM_TRUST_CPU.
+
        ras=option[,option,...] [KNL] RAS-specific options
 
                cec_disable     [X86]
index 00e4365d720685679824b8d6659fd59a58845ec2..091c8dfd322910e14712d4a818e9879538abf3d9 100644 (file)
@@ -3,7 +3,6 @@
 Required properties:
 - compatible :
   - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
-  - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
 - reg : address and length of the lpi2c master registers
 - interrupts : lpi2c interrupt
 - clocks : lpi2c clock specifier
@@ -11,7 +10,7 @@ Required properties:
 Examples:
 
 lpi2c7: lpi2c7@40a50000 {
-       compatible = "fsl,imx8dv-lpi2c";
+       compatible = "fsl,imx7ulp-lpi2c";
        reg = <0x40A50000 0x10000>;
        interrupt-parent = <&intc>;
        interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
index 41089369f89134b3fb42c98766a46986f1806cd4..b3acebe08eb0a9d4e3d4ac315cd513e340c65f16 100644 (file)
@@ -19,6 +19,10 @@ Required properties:
 - slaves               : Specifies number for slaves
 - active_slave         : Specifies the slave to use for time stamping,
                          ethtool and SIOCGMIIPHY
+- cpsw-phy-sel         : Specifies the phandle to the CPSW phy mode selection
+                         device. See also cpsw-phy-sel.txt for it's binding.
+                         Note that in legacy cases cpsw-phy-sel may be
+                         a child device instead of a phandle.
 
 Optional properties:
 - ti,hwmods            : Must be "cpgmac0"
@@ -75,6 +79,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
@@ -103,6 +108,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
index 76db9f13ad96c08b77f5dae0af4ea616254ee811..abc36274227c7299bf5ec088e21ebdeed25a3950 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
              "renesas,ether-r8a7794"  if the device is a part of R8A7794 SoC.
              "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
              "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+             "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
              "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
              "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
                                        device.
index 61f918b10a0c74d7c8369009ce0b6447d31f903a..d1bf143b446f3b60bc374b40dc7b6818d1c21aa8 100644 (file)
@@ -86,7 +86,7 @@ pkg-config
 
 The build system, as of 4.18, requires pkg-config to check for installed
 kconfig tools and to determine flags settings for use in
-'make {menu,n,g,x}config'.  Previously pkg-config was being used but not
+'make {g,x}config'.  Previously pkg-config was being used but not
 verified or documented.
 
 Flex
index 25a4b4cf04a6d462f4b640a396ae178b561984e2..92999d4e0cb800cfad588f413a0a23210fe92861 100644 (file)
@@ -97,6 +97,11 @@ parameters may be changed at runtime by the command
                        allowing boot to proceed.  none ignores them, expecting
                        user space to do the scan.
 
+       scsi_mod.use_blk_mq=
+                       [SCSI] use blk-mq I/O path by default
+                       See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
+                       Format: <y/n>
+
        sim710=         [SCSI,HW]
                        See header of drivers/scsi/sim710.c.
 
index 9ad052aeac39be98b3cb1d390e043f2c7583fd67..d870cb57c887a1c5fa773a8fd9d614c54cb3cce0 100644 (file)
@@ -2311,6 +2311,7 @@ F:        drivers/clocksource/cadence_ttc_timer.c
 F:     drivers/i2c/busses/i2c-cadence.c
 F:     drivers/mmc/host/sdhci-of-arasan.c
 F:     drivers/edac/synopsys_edac.c
+F:     drivers/i2c/busses/i2c-xiic.c
 
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
index 19948e55694138bae4cfdc49775bb72486e12eb8..4d5c883a98e5a018b4ac301cd420a5745886d500 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
index 6d5eb8267e429fa3eac927b053c31944500a74da..b4441b0764d71aff87b67fba665163b57bdb20b6 100644 (file)
@@ -9,6 +9,7 @@
 config ARC
        def_bool y
        select ARC_TIMERS
+       select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_HAS_SG_CHAIN
@@ -28,8 +29,12 @@ config ARC
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_FUTEX_CMPXCHG if FUTEX
+       select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IOREMAP_PROT
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZMA
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_MEMBLOCK
@@ -44,11 +49,6 @@ config ARC
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
        select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
-       select HAVE_DEBUG_STACKOVERFLOW
-       select HAVE_GENERIC_DMA_COHERENT
-       select HAVE_KERNEL_GZIP
-       select HAVE_KERNEL_LZMA
-       select ARCH_HAS_PTE_SPECIAL
 
 config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
index fb026196aaabac06629621fdfb15a593356833ee..99cce77ab98f2d79c3dbef3130bff70b91ea076d 100644 (file)
@@ -43,10 +43,7 @@ ifdef CONFIG_ARC_CURR_IN_REG
 LINUXINCLUDE   +=  -include ${src}/arch/arc/include/asm/current.h
 endif
 
-upto_gcc44    :=  $(call cc-ifversion, -le, 0404, y)
-atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
-
-cflags-$(atleast_gcc44)                        += -fsection-anchors
+cflags-y                               += -fsection-anchors
 
 cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
 cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
@@ -82,11 +79,6 @@ cflags-$(disable_small_data)         += -mno-sdata -fcall-used-gp
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mbig-endian
 ldflags-$(CONFIG_CPU_BIG_ENDIAN)       += -EB
 
-# STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
-# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
-# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
-ldflags-$(upto_gcc44)                  += -marclinux
-
 LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 
 # Modules with short calls might break for calls into builtin-kernel
index dc91c663bcc02e2cdc116f40ad31757d711ac485..d75d65ddf8e31db78c58fa9882b90c2e6be2ed4b 100644 (file)
                };
        };
 
+       /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
        /*
         * The DW APB ICTL intc on MB is connected to CPU intc via a
         * DT "invisible" DW APB GPIO block, configured to simply pass thru
index 69ff4895f2ba4b558f2bdfed547ef0ec27288174..a05bb737ea6392f5e77cd3830dceb8afe620943e 100644 (file)
                };
        };
 
+       /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
        /*
         * This INTC is actually connected to DW APB GPIO
         * which acts as a wire between MB INTC and CPU INTC.
index 47b74fbc403c21cc2f493f6f84d6216b7c5ef5c1..37bafd44e36d0fed9b85e80ea356cd78df0c1872 100644 (file)
@@ -9,6 +9,10 @@
  */
 
 / {
+       aliases {
+               ethernet = &gmac;
+       };
+
        axs10x_mb {
                compatible = "simple-bus";
                #address-cells = <1>;
@@ -68,7 +72,7 @@
                        };
                };
 
-               ethernet@0x18000 {
+               gmac: ethernet@0x18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
@@ -81,6 +85,7 @@
                        max-speed = <100>;
                        resets = <&creg_rst 5>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
                };
 
                ehci@0x40000 {
index 006aa3de5348f31c7462f52f173ad2e74434d062..ef149f59929ae394a30695fa0940060acef15817 100644 (file)
                bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
        };
 
+       aliases {
+               ethernet = &gmac;
+       };
+
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
                        #clock-cells = <0>;
                };
 
-               ethernet@8000 {
+               gmac: ethernet@8000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = <0x8000 0x2000>;
                        phy-handle = <&phy0>;
                        resets = <&cgu_rst HSDK_ETH_RESET>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+                       dma-coherent;
 
                        mdio {
                                #address-cells = <1>;
                        compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
                        reg = <0x60000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                ehci@40000 {
                        compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
                        reg = <0x40000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                mmc@a000 {
                        clock-names = "biu", "ciu";
                        interrupts = <12>;
                        bus-width = <4>;
+                       dma-coherent;
                };
        };
 
index a635ea972304e3531b205c23a0e3ef814608e313..41bc08be6a3b4202bbe27f74fdc8e01a56e4c3cd 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -63,7 +61,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index aa507e423075b16be125d95fbb29b55b5b08683c..1e1c4a8011b523dc88b89fb39e90dfeab5a3154b 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -64,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index eba07f4686545ed00383756ae53ba404b2b2b25e..6b0c0cfd5c304fd6ae58fc3fd92d9cb53e086d2d 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -65,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 098b19fbaa51f0116e7f0328eb3a17feb72f0123..240dd2cd514855ae96010f0e487c76b8bd37a6f9 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 0104c404d8970ee44ecb0ced17fe137363e4cf5b..14ae7e5acc7c9cc381ebd9be04ad36a24b282a20 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 6491be0ddbc9e9cfd457dccc452d5bebf28c1183..1dec2b4bc5e6ea70696249d6815dfe69e73eb21c 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NO_HZ_IDLE=y
index 7c9c706ae7f66eb29d4cf48ca0b95d5dd44630f5..31ba224bbfb474985b49930dea193c6bbb1a5f37 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 99e05cf63fca2c6d953b952386b0cf1649ae7332..8e0b8b134cd9ed89652b88aea3bade03881e95c9 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 0dc4f9b737e7a4f48b41ae7caaedaa2ce89c5b40..739b90e5e8931f1e5f1443aa19b3e0fac93a155b 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index be3c30a15e54c09db51112ca88fd9b32d73a0d34..b5895bdf3a9393027d9fed716c4b83f86a9e4915 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
@@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 3a74b9b217723d2c2c75a91510ef1aadeab7b89a..f14eeff7d3084948c16d8905677ec25a629ccdcc 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index ea2834b4dc1dad187193549b7b146da413726c37..025298a483056b1ca782e83056f8b0a44d193809 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 80a5a1b4924bcf086ed57c34d7778304288f35a2..df7b77b13b823dc0c8d41f543181b12a20212cbd 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 2cc87f909747c1818385de9ba99c0bbeda6197b8..a7f65313f84a56a3ddc0307c669bbfbcf4c0386f 100644 (file)
@@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index f629493929ea620a50630ba49842383ecc3662a4..db47c3541f15931b2927fd1bd27749f2568e9761 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
@@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y
 CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 21f0ca26a05d5cffeb58541aa127ed1154a3ec98..a8ac5e917d9a5895a4bc3ba30be01fd222ecec71 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
index 4e0072730241220c84ddc5019bba91c856de6f34..158af079838d007480f66b7d7ffe08d72b16c2d7 100644 (file)
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                       \
        "1:     llock   %[orig], [%[ctr]]               \n"             \
        "       " #asm_op " %[val], %[orig], %[i]       \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val),                                          \
          [orig] "=&r" (orig)                                           \
        : [ctr] "r"     (&v->counter),                                  \
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644 (file)
index 0000000..c946c0a
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier:  GPL-2.0
+// (C) 2018 Synopsys, Inc. (www.synopsys.com)
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-mapping.h>
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent);
+#define arch_setup_dma_ops arch_setup_dma_ops
+
+#endif
index 783b20354f8bf7889075ec4219310eb9b6f4fbf7..e8d9fb4523462a9807358fea19c4e7668cc1126d 100644 (file)
@@ -83,9 +83,6 @@ done:
 static void show_faulting_vma(unsigned long address, char *buf)
 {
        struct vm_area_struct *vma;
-       struct inode *inode;
-       unsigned long ino = 0;
-       dev_t dev = 0;
        char *nm = buf;
        struct mm_struct *active_mm = current->active_mm;
 
@@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
         * if the container VMA is not found
         */
        if (vma && (vma->vm_start <= address)) {
-               struct file *file = vma->vm_file;
-               if (file) {
-                       nm = file_path(file, buf, PAGE_SIZE - 1);
-                       inode = file_inode(vma->vm_file);
-                       dev = inode->i_sb->s_dev;
-                       ino = inode->i_ino;
+               if (vma->vm_file) {
+                       nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+                       if (IS_ERR(nm))
+                               nm = "?";
                }
                pr_info("    @off 0x%lx in [%s]\n"
                        "    VMA: 0x%08lx to 0x%08lx\n",
index 25c631942500ffe2802654f6690d9a223e2fbfaf..f2701c13a66b209571ff89b71ac6c93cabb9835d 100644 (file)
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
 
        n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
                       perip_base,
-                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
+                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
 
        return buf;
 }
@@ -896,15 +896,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
        slc_op(start, sz, OP_FLUSH);
 }
 
-/*
- * DMA ops for systems with IOC
- * IOC hardware snoops all DMA traffic keeping the caches consistent with
- * memory - eliding need for any explicit cache maintenance of DMA buffers
- */
-static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
-
 /*
  * Exported DMA API
  */
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void)
 {
        unsigned int ioc_base, mem_sz;
 
+       /*
+        * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+        * simultaneously. This happens because as of today IOC aperture covers
+        * only ZONE_NORMAL (low mem) and any dma transactions outside this
+        * region won't be HW coherent.
+        * If we want to use both IOC and ZONE_HIGHMEM we can use
+        * bounce_buffer to handle dma transactions to HIGHMEM.
+        * Also it is possible to modify dma_direct cache ops or increase IOC
+        * aperture size if we are planning to use HIGHMEM without PAE.
+        */
+       if (IS_ENABLED(CONFIG_HIGHMEM))
+               panic("IOC and HIGHMEM can't be used simultaneously");
+
        /* Flush + invalidate + disable L1 dcache */
        __dc_disable();
 
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void)
        if (is_isa_arcv2() && ioc_enable)
                arc_ioc_setup();
 
-       if (is_isa_arcv2() && ioc_enable) {
-               __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
-               __dma_cache_inv = __dma_cache_inv_ioc;
-               __dma_cache_wback = __dma_cache_wback_ioc;
-       } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+       if (is_isa_arcv2() && l2_line_sz && slc_enable) {
                __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
                __dma_cache_inv = __dma_cache_inv_slc;
                __dma_cache_wback = __dma_cache_wback_slc;
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void)
                __dma_cache_inv = __dma_cache_inv_l1;
                __dma_cache_wback = __dma_cache_wback_l1;
        }
+       /*
+        * In case of IOC (say IOC+SLC case), pointers above could still be set
+        * but end up not being relevant as the first function in chain is not
+        * called at all for @dma_direct_ops
+        *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
+        */
 }
 
 void __ref arc_cache_init(void)
index ec47e6079f5d08371a65ea21277b2985bec989d5..c75d5c3470e3595ce7af09f00346d5f82fc92a4c 100644 (file)
@@ -6,20 +6,17 @@
  * published by the Free Software Foundation.
  */
 
-/*
- * DMA Coherent API Notes
- *
- * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessing it using a kernel virtual address, with
- * Cache bit off in the TLB entry.
- *
- * The default DMA address == Phy address which is 0x8000_0000 based.
- */
-
 #include <linux/dma-noncoherent.h>
 #include <asm/cache.h>
 #include <asm/cacheflush.h>
 
+/*
+ * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
+ *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
+ *  - But still handle both coherent and non-coherent requests from caller
+ *
+ * For DMA coherent hardware (IOC) generic code suffices
+ */
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs)
 {
@@ -27,42 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
        struct page *page;
        phys_addr_t paddr;
        void *kvaddr;
-       int need_coh = 1, need_kvaddr = 0;
-
-       page = alloc_pages(gfp, order);
-       if (!page)
-               return NULL;
+       bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
 
        /*
-        * IOC relies on all data (even coherent DMA data) being in cache
-        * Thus allocate normal cached memory
-        *
-        * The gains with IOC are two pronged:
-        *   -For streaming data, elides need for cache maintenance, saving
-        *    cycles in flush code, and bus bandwidth as all the lines of a
-        *    buffer need to be flushed out to memory
-        *   -For coherent data, Read/Write to buffers terminate early in cache
-        *   (vs. always going to memory - thus are faster)
+        * __GFP_HIGHMEM flag is cleared by upper layer functions
+        * (in include/linux/dma-mapping.h) so we should never get a
+        * __GFP_HIGHMEM here.
         */
-       if ((is_isa_arcv2() && ioc_enable) ||
-           (attrs & DMA_ATTR_NON_CONSISTENT))
-               need_coh = 0;
+       BUG_ON(gfp & __GFP_HIGHMEM);
 
-       /*
-        * - A coherent buffer needs MMU mapping to enforce non-cachability
-        * - A highmem page needs a virtual handle (hence MMU mapping)
-        *   independent of cachability
-        */
-       if (PageHighMem(page) || need_coh)
-               need_kvaddr = 1;
+       page = alloc_pages(gfp, order);
+       if (!page)
+               return NULL;
 
        /* This is linear addr (0x8000_0000 based) */
        paddr = page_to_phys(page);
 
        *dma_handle = paddr;
 
-       /* This is kernel Virtual address (0x7000_0000 based) */
-       if (need_kvaddr) {
+       /*
+        * A coherent buffer needs MMU mapping to enforce non-cachability.
+        * kvaddr is kernel Virtual address (0x7000_0000 based).
+        */
+       if (need_coh) {
                kvaddr = ioremap_nocache(paddr, size);
                if (kvaddr == NULL) {
                        __free_pages(page, order);
@@ -93,12 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 {
        phys_addr_t paddr = dma_handle;
        struct page *page = virt_to_page(paddr);
-       int is_non_coh = 1;
-
-       is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
-                       (is_isa_arcv2() && ioc_enable);
 
-       if (PageHighMem(page) || !is_non_coh)
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT))
                iounmap((void __force __iomem *)vaddr);
 
        __free_pages(page, get_order(size));
@@ -185,3 +165,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
                break;
        }
 }
+
+/*
+ * Plug in coherent or noncoherent dma ops
+ */
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent)
+{
+       /*
+        * IOC hardware snoops all DMA traffic keeping the caches consistent
+        * with memory - eliding need for any explicit cache maintenance of
+        * DMA buffers - so we can use dma_direct cache ops.
+        */
+       if (is_isa_arcv2() && ioc_enable && coherent) {
+               set_dma_ops(dev, &dma_direct_ops);
+               dev_info(dev, "use dma_direct_ops cache ops\n");
+       } else {
+               set_dma_ops(dev, &dma_noncoherent_ops);
+               dev_info(dev, "use dma_noncoherent_ops cache ops\n");
+       }
+}
index 79906cecb091e4fbf8dafb850d9d4b7ea19c3c41..3ad482d2f1eb91c8bfe6b597788e1e70b9521234 100644 (file)
@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index ceffc40810eec42e3655f80fcd39d66965617c73..48daec7f78ba7cf4e674687fca73ed976c3b5b4b 100644 (file)
@@ -46,6 +46,7 @@
        pinctrl-0 = <&mmc0_pins>;
        vmmc-supply = <&reg_cldo1>;
        cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+       bus-width = <4>;
        status = "okay";
 };
 
@@ -56,6 +57,7 @@
        vqmmc-supply = <&reg_bldo2>;
        non-removable;
        cap-mmc-hw-reset;
+       bus-width = <8>;
        status = "okay";
 };
 
index f26055f2306e1f9a479417507c2edf428c9f99de..3d6d7336f871221fd29bcc3bc4faa2cee0a7765f 100644 (file)
@@ -61,8 +61,7 @@ struct kvm_arch {
        u64    vmid_gen;
        u32    vmid;
 
-       /* 1-level 2nd stage table and lock */
-       spinlock_t pgd_lock;
+       /* 1-level 2nd stage table, protected by kvm->mmu_lock */
        pgd_t *pgd;
 
        /* VTTBR value associated with above pgd and vmid */
@@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index d496ef579859627edd1ba98c1233d9584cd407e3..ca46153d79154bae1b0833231245129752484362 100644 (file)
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
        val = read_sysreg(cpacr_el1);
        val |= CPACR_EL1_TTA;
        val &= ~CPACR_EL1_ZEN;
-       if (!update_fp_enabled(vcpu))
+       if (!update_fp_enabled(vcpu)) {
                val &= ~CPACR_EL1_FPEN;
+               __activate_traps_fpsimd32(vcpu);
+       }
 
        write_sysreg(val, cpacr_el1);
 
@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 
        val = CPTR_EL2_DEFAULT;
        val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
-       if (!update_fp_enabled(vcpu))
+       if (!update_fp_enabled(vcpu)) {
                val |= CPTR_EL2_TFP;
+               __activate_traps_fpsimd32(vcpu);
+       }
 
        write_sysreg(val, cptr_el2);
 }
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
        if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
                write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
 
-       __activate_traps_fpsimd32(vcpu);
        if (has_vhe())
                activate_traps_vhe(vcpu);
        else
index 65f86271f02bc1bf71a44b07fba51aefee49d381..8080c9f489c3e43af385066514f3f60cca629141 100644 (file)
@@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
 
        pmd = READ_ONCE(*pmdp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pmd_present(pmd) || !pmd_table(pmd)) {
+       if (!pmd_present(pmd))
+               return 1;
+       if (!pmd_table(pmd)) {
                VM_WARN_ON(!pmd_table(pmd));
                return 1;
        }
@@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
 
        pud = READ_ONCE(*pudp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pud_present(pud) || !pud_table(pud)) {
+       if (!pud_present(pud))
+               return 1;
+       if (!pud_table(pud)) {
                VM_WARN_ON(!pud_table(pud));
                return 1;
        }
index 70dde040779b56feaeaa650a7c342a23750a1fce..f5453d944ff5e19d341925590cca753ffb4fcd7e 100644 (file)
@@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void)
        high_memory = (void *)_ramend;
 
        /* Reserve kernel text/data/bss */
-       memblock_reserve(memstart, memstart - _rambase);
+       memblock_reserve(_rambase, memstart - _rambase);
 
        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
        module_fixup(NULL, __start_fixup, __stop_fixup);
index a9af1d2dcd699114d00a55689c29137cef384841..2c1c53d12179302140d3576dddd11a732a5b13d9 100644 (file)
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
                                                   bool write);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 019035d7225c4fd942c96c6628b6605f8d2af1b4..8f845f6e5f4266568288969b9b19b7357b86598b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -20,6 +21,7 @@
 
 #include <asm/abi.h>
 #include <asm/mips-cps.h>
+#include <asm/page.h>
 #include <asm/vdso.h>
 
 /* Kernel-provided data used by the VDSO. */
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        vvar_size = gic_size + PAGE_SIZE;
        size = vvar_size + image->size;
 
+       /*
+        * Find a region that's large enough for us to perform the
+        * colour-matching alignment below.
+        */
+       if (cpu_has_dc_aliases)
+               size += shm_align_mask + 1;
+
        base = get_unmapped_area(NULL, 0, size, 0, 0);
        if (IS_ERR_VALUE(base)) {
                ret = base;
                goto out;
        }
 
+       /*
+        * If we suffer from dcache aliasing, ensure that the VDSO data page
+        * mapping is coloured the same as the kernel's mapping of that memory.
+        * This ensures that when the kernel updates the VDSO data userland
+        * will observe it without requiring cache invalidations.
+        */
+       if (cpu_has_dc_aliases) {
+               base = __ALIGN_MASK(base, shm_align_mask);
+               base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+       }
+
        data_addr = base + gic_size;
        vdso_addr = data_addr + PAGE_SIZE;
 
index ee64db03279336db79ac5c98e7634074d47608ac..d8dcdb350405900928b83e7afa2112ecf3122518 100644 (file)
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
        return 1;
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       unsigned long end = hva + PAGE_SIZE;
-
-       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-
-       kvm_mips_callbacks->flush_shadow_all(kvm);
-       return 0;
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 {
        handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
index 1d4248fa55e995fdc7cba95f9209372ed6533b52..7068f341133d7eb038bb94a9953a3b9946d9bf51 100644 (file)
@@ -40,6 +40,10 @@ config NDS32
        select NO_IOPORT_MAP
        select RTC_LIB
        select THREAD_INFO_IN_TASK
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_DYNAMIC_FTRACE
        help
          Andes(nds32) Linux support.
 
index 63f4f173e5f4b97afa7078a3d8600fbfb50c13a6..3509fac104919ff8d9727d4cdfe2185d10473746 100644 (file)
@@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig
 
 comma = ,
 
+ifdef CONFIG_FUNCTION_TRACER
+arch-y += -malways-save-lp -mno-relax
+endif
+
 KBUILD_CFLAGS  += $(call cc-option, -mno-sched-prolog-epilog)
 KBUILD_CFLAGS  += -mcmodel=large
 
index 56c47905880256455a17943427651b0bb72e03ed..f5f9cf7e054401431f89b72af891c5a44b20baba 100644 (file)
@@ -121,9 +121,9 @@ struct elf32_hdr;
  */
 #define ELF_CLASS      ELFCLASS32
 #ifdef __NDS32_EB__
-#define ELF_DATA       ELFDATA2MSB;
+#define ELF_DATA       ELFDATA2MSB
 #else
-#define ELF_DATA       ELFDATA2LSB;
+#define ELF_DATA       ELFDATA2LSB
 #endif
 #define ELF_ARCH       EM_NDS32
 #define USE_ELF_CORE_DUMP
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..2f96cc9
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_NDS32_FTRACE_H
+#define __ASM_NDS32_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+/* mcount call is composed of three instructions:
+ * sethi + ori + jral
+ */
+#define MCOUNT_INSN_SIZE 12
+
+extern void _mcount(unsigned long parent_ip);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_ADDR ((unsigned long)_ftrace_caller)
+
+#ifdef __NDS32_EL__
+#define INSN_NOP               0x09000040
+#define INSN_SIZE(insn)                (((insn & 0x00000080) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0x000000fe) == 0x00000046)
+#define ENDIAN_CONVERT(insn)   be32_to_cpu(insn)
+#else /* __NDS32_EB__ */
+#define INSN_NOP               0x40000009
+#define INSN_SIZE(insn)                (((insn & 0x80000000) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0xfe000000) == 0x46000000)
+#define ENDIAN_CONVERT(insn)   (insn)
+#endif
+
+extern void _ftrace_caller(unsigned long parent_ip);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       return addr;
+}
+struct dyn_arch_ftrace {
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* __ASM_NDS32_FTRACE_H */
index 19b19394a936cfdecaa51dba9159ee03473b76e7..68c38151c3e41c24c24081f92449a097bf077f02 100644 (file)
@@ -17,6 +17,7 @@
 #else
 #define FP_OFFSET (-2)
 #endif
+#define LP_OFFSET (-1)
 
 extern void __init early_trap_init(void);
 static inline void GIE_ENABLE(void)
index 18a009f3804d5e94ac23bd67b6eb4144ffa2c614..362a32d9bd16871e1db6c45d544eb2c4450bfeb5 100644 (file)
@@ -38,7 +38,7 @@ struct exception_table_entry {
 extern int fixup_exception(struct pt_regs *regs);
 
 #define KERNEL_DS      ((mm_segment_t) { ~0UL })
-#define USER_DS        ((mm_segment_t) {TASK_SIZE - 1})
+#define USER_DS                ((mm_segment_t) {TASK_SIZE - 1})
 
 #define get_ds()       (KERNEL_DS)
 #define get_fs()       (current_thread_info()->addr_limit)
@@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a, b)    ((a) == (b))
+#define segment_eq(a, b)       ((a) == (b))
 
 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
 
-#define access_ok(type, addr, size)                 \
+#define access_ok(type, addr, size)    \
        __range_ok((unsigned long)addr, (unsigned long)size)
 /*
  * Single-value transfer routines.  They automatically use the right
@@ -75,70 +75,73 @@ static inline void set_fs(mm_segment_t fs)
  * versions are void (ie, don't return a value as such).
  */
 
-#define get_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_READ,  p, sizeof(*p)))) {            \
-               __e = __get_user(x,p);                                  \
-       } else                                                          \
-               x = 0;                                                  \
-       __e;                                                            \
-})
-#define __get_user(x,ptr)                                              \
+#define get_user       __get_user                                      \
+
+#define __get_user(x, ptr)                                             \
 ({                                                                     \
        long __gu_err = 0;                                              \
-       __get_user_err((x),(ptr),__gu_err);                             \
+       __get_user_check((x), (ptr), __gu_err);                         \
        __gu_err;                                                       \
 })
 
-#define __get_user_error(x,ptr,err)                                    \
+#define __get_user_error(x, ptr, err)                                  \
 ({                                                                     \
-       __get_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __get_user_check((x), (ptr), (err));                            \
+       (void)0;                                                        \
 })
 
-#define __get_user_err(x,ptr,err)                                      \
+#define __get_user_check(x, ptr, err)                                  \
+({                                                                     \
+       const __typeof__(*(ptr)) __user *__p = (ptr);                   \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {                \
+               __get_user_err((x), __p, (err));                        \
+       } else {                                                        \
+               (x) = 0; (err) = -EFAULT;                               \
+       }                                                               \
+})
+
+#define __get_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __get_user_asm("lbi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lbi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm("lhi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lhi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm("lwi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lwi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __get_user_asm_dword(__gu_val,__gu_addr,err);           \
+               __get_user_asm_dword(__gu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
                break;                                                  \
        }                                                               \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 } while (0)
 
-#define __get_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move %0, %3\n"                                          \
-       "       move %1, #0\n"                                          \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err), "=&r" (x)                                         \
-       : "r" (addr), "i" (-EFAULT)                                     \
-       : "cc")
+#define __get_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move %0, %3\n"                                  \
+               "       move %1, #0\n"                                  \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err), "=&r" (x)                                 \
+               : "r" (addr), "i" (-EFAULT)                             \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __gu_reg_oper0 "%H1"
@@ -149,61 +152,66 @@ do {                                                                      \
 #endif
 
 #define __get_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                           \
-       "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err), "=&r"(x)                                           \
-       : "r"(addr), "i"(-EFAULT)                                       \
-       : "cc")
-#define put_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_WRITE,  p, sizeof(*p)))) {           \
-               __e = __put_user(x,p);                                  \
-       }                                                               \
-       __e;                                                            \
-})
-#define __put_user(x,ptr)                                              \
+       __asm__ __volatile__ (                                          \
+               "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                   \
+               "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err), "=&r"(x)                                   \
+               : "r"(addr), "i"(-EFAULT)                               \
+               : "cc")
+
+#define put_user       __put_user                                      \
+
+#define __put_user(x, ptr)                                             \
 ({                                                                     \
        long __pu_err = 0;                                              \
-       __put_user_err((x),(ptr),__pu_err);                             \
+       __put_user_err((x), (ptr), __pu_err);                           \
        __pu_err;                                                       \
 })
 
-#define __put_user_error(x,ptr,err)                                    \
+#define __put_user_error(x, ptr, err)                                  \
+({                                                                     \
+       __put_user_err((x), (ptr), (err));                              \
+       (void)0;                                                        \
+})
+
+#define __put_user_check(x, ptr, err)                                  \
 ({                                                                     \
-       __put_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {               \
+               __put_user_err((x), __p, (err));                        \
+       } else  {                                                       \
+               (err) = -EFAULT;                                        \
+       }                                                               \
 })
 
-#define __put_user_err(x,ptr,err)                                      \
+#define __put_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __pu_addr = (unsigned long)(ptr);                 \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __put_user_asm("sbi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("sbi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __put_user_asm("shi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("shi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm("swi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("swi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __put_user_asm_dword(__pu_val,__pu_addr,err);           \
+               __put_user_asm_dword(__pu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
@@ -211,22 +219,22 @@ do {                                                                      \
        }                                                               \
 } while (0)
 
-#define __put_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move    %0, %3\n"                                       \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err)                                                    \
-       : "r" (x), "r" (addr), "i" (-EFAULT)                            \
-       : "cc")
+#define __put_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move    %0, %3\n"                               \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err)                                            \
+               : "r" (x), "r" (addr), "i" (-EFAULT)                    \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __pu_reg_oper0 "%H2"
@@ -237,23 +245,24 @@ do {                                                                      \
 #endif
 
 #define __put_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                           \
-       "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err)                                                     \
-       : "r"(addr), "r"(x), "i"(-EFAULT)                               \
-       : "cc")
+       __asm__ __volatile__ (                                          \
+               "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                   \
+               "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err)                                             \
+               : "r"(addr), "r"(x), "i"(-EFAULT)                       \
+               : "cc")
+
 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
 extern long strncpy_from_user(char *dest, const char __user * src, long count);
 extern __must_check long strlen_user(const char __user * str);
index 42792743e8b953290b7cec0b4722684c72744df3..27cded39fa66266171a427afc988ad9db35fe75e 100644 (file)
@@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds
 
 
 obj-y                          += vdso/
+
+obj-$(CONFIG_FUNCTION_TRACER)   += ftrace.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
index 0c6d031a1c4a9df00dc991c0f52dbdd7bddab51c..0c5386e72098e45f48b925efee3431bb275a1398 100644 (file)
@@ -9,7 +9,8 @@
 
 void __iomem *atl2c_base;
 static const struct of_device_id atl2c_ids[] __initconst = {
-       {.compatible = "andestech,atl2c",}
+       {.compatible = "andestech,atl2c",},
+       {}
 };
 
 static int __init atl2c_of_init(void)
index b8ae4e9a6b93db793d0b6b7d434adcdb47827743..21a144071566989af1daa55400a0c9fab98b34ee 100644 (file)
@@ -118,7 +118,7 @@ common_exception_handler:
        /* interrupt */
 2:
 #ifdef CONFIG_TRACE_IRQFLAGS
-       jal     trace_hardirqs_off
+       jal     __trace_hardirqs_off
 #endif
        move    $r0, $sp
        sethi   $lp, hi20(ret_from_intr)
index 03e4f7788a1882a15cf89e68aa325978cbc3b0ce..f00af92f7e22fde904fc6e54999a9ebcdde7e950 100644 (file)
@@ -138,8 +138,8 @@ no_work_pending:
 #ifdef CONFIG_TRACE_IRQFLAGS
        lwi     $p0, [$sp+(#IPSW_OFFSET)]
        andi    $p0, $p0, #0x1
-       la      $r10, trace_hardirqs_off
-       la      $r9, trace_hardirqs_on
+       la      $r10, __trace_hardirqs_off
+       la      $r9, __trace_hardirqs_on
        cmovz   $r9, $p0, $r10
        jral    $r9
 #endif
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..a0a9679
--- /dev/null
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+extern void (*ftrace_trace_function)(unsigned long, unsigned long,
+                                    struct ftrace_ops*, struct pt_regs*);
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+extern void ftrace_graph_caller(void);
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void _mcount(unsigned long parent_ip)
+{
+       /* save all state by the compiler prologue */
+
+       unsigned long ip = (unsigned long)__builtin_return_address(0);
+
+       if (ftrace_trace_function != ftrace_stub)
+               ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
+                                     NULL, NULL);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
+           || ftrace_graph_entry != ftrace_graph_entry_stub)
+               ftrace_graph_caller();
+#endif
+
+       /* restore all state by the compiler epilogue */
+}
+EXPORT_SYMBOL(_mcount);
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void __naked _mcount(unsigned long parent_ip)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+EXPORT_SYMBOL(_mcount);
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+void _ftrace_caller(unsigned long parent_ip)
+{
+       /* save all state needed by the compiler prologue */
+
+       /*
+        * prepare arguments for real tracing function
+        * first  arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
+        * second arg : parent_ip
+        */
+       __asm__ __volatile__ (
+               "move $r1, %0                              \n\t"
+               "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
+               :
+               : "r" (parent_ip), "r" (__builtin_return_address(0)));
+
+       /* a placeholder for the call to a real tracing function */
+       __asm__ __volatile__ (
+               "ftrace_call:           \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* a placeholder for the call to ftrace_graph_caller */
+       __asm__ __volatile__ (
+               "ftrace_graph_call:     \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+#endif
+       /* restore all state needed by the compiler epilogue */
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+       return 0;
+}
+
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_all_modules_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_all_modules_text_ro();
+       return 0;
+}
+
+static unsigned long gen_sethi_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x46000000;
+       unsigned long imm = addr >> 12;
+       unsigned long rt_num = 0xf << 20;
+
+       return ENDIAN_CONVERT(opcode | rt_num | imm);
+}
+
+static unsigned long gen_ori_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x58000000;
+       unsigned long imm = addr & 0x0000fff;
+       unsigned long rt_num = 0xf << 20;
+       unsigned long ra_num = 0xf << 15;
+
+       return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
+}
+
+static unsigned long gen_jral_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x4a000001;
+       unsigned long rt_num = 0x1e << 20;
+       unsigned long rb_num = 0xf << 10;
+
+       return ENDIAN_CONVERT(opcode | rt_num | rb_num);
+}
+
+static void ftrace_gen_call_insn(unsigned long *call_insns,
+                                unsigned long addr)
+{
+       call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u       */
+       call_insns[1] = gen_ori_insn(addr);   /* ori   $r15, $r15, imm15u */
+       call_insns[2] = gen_jral_insn(addr);  /* jral  $lp,  $r15         */
+}
+
+static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                               unsigned long *new_insn, bool validate)
+{
+       unsigned long orig_insn[3];
+
+       if (validate) {
+               if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
+                       return -EFAULT;
+               if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
+                       return -EINVAL;
+       }
+
+       if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       return 0;
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                             unsigned long *new_insn, bool validate)
+{
+       int ret;
+
+       ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
+       if (ret)
+               return ret;
+
+       flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+       return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long pc = (unsigned long)&ftrace_call;
+       unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       if (func != ftrace_stub)
+               ftrace_gen_call_insn(new_insn, (unsigned long)func);
+
+       return ftrace_modify_code(pc, old_insn, new_insn, false);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, nop_insn, call_insn, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+                   unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       struct ftrace_graph_ent trace;
+       unsigned long old;
+       int err;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+
+       trace.func = self_addr;
+       trace.depth = current->curr_ret_stack + 1;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace))
+               return;
+
+       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+                                      frame_pointer, NULL);
+
+       if (err == -EBUSY)
+               return;
+
+       *parent = return_hooker;
+}
+
+noinline void ftrace_graph_caller(void)
+{
+       unsigned long *parent_ip =
+               (unsigned long *)(__builtin_frame_address(2) - 4);
+
+       unsigned long selfpc =
+               (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
+
+       unsigned long frame_pointer =
+               (unsigned long)__builtin_frame_address(3);
+
+       prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
+}
+
+extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+void __naked return_to_handler(void)
+{
+       __asm__ __volatile__ (
+               /* save state needed by the ABI     */
+               "smw.adm $r0,[$sp],$r1,#0x0  \n\t"
+
+               /* get original return address      */
+               "move $r0, $fp               \n\t"
+               "bal ftrace_return_to_handler\n\t"
+               "move $lp, $r0               \n\t"
+
+               /* restore state nedded by the ABI  */
+               "lmw.bim $r0,[$sp],$r1,#0x0  \n\t");
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+       unsigned long pc = (unsigned long)&ftrace_graph_call;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
+
+       if (enable)
+               return ftrace_modify_code(pc, nop_insn, call_insn, true);
+       else
+               return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+noinline void __trace_hardirqs_off(void)
+{
+       trace_hardirqs_off();
+}
+noinline void __trace_hardirqs_on(void)
+{
+       trace_hardirqs_on();
+}
+#endif /* CONFIG_TRACE_IRQFLAGS */
index 4167283d8293f16c65e5f1b100a40201265a1c26..1e31829cbc2a71ec4dd3c6be62874245cda010c2 100644 (file)
@@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
@@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
index 8b231e910ea68980dbd517be895200ad19e49f55..d974c0c1c65f34123af8c7d2a63e2fb3e390e9f9 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
+#include <linux/ftrace.h>
 
 void save_stack_trace(struct stack_trace *trace)
 {
@@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        unsigned long *fpn;
        int skip = trace->skip;
        int savesched;
+       int graph_idx = 0;
 
        if (tsk == current) {
                __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
@@ -29,10 +31,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
               && (fpn >= (unsigned long *)TASK_SIZE)) {
                unsigned long lpp, fpp;
 
-               lpp = fpn[-1];
+               lpp = fpn[LP_OFFSET];
                fpp = fpn[FP_OFFSET];
                if (!__kernel_text_address(lpp))
                        break;
+               else
+                       lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
 
                if (savesched || !in_sched_functions(lpp)) {
                        if (skip) {
index a6205fd4db521eaf83c606d399975a4fe275c250..1496aab48998817c00cb175dbd8b09b3453d73dd 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kdebug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/uaccess.h>
+#include <linux/ftrace.h>
 
 #include <asm/proc-fns.h>
 #include <asm/unistd.h>
@@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs)
        set_fs(fs);
 }
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#include <linux/ftrace.h>
-static void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-       if (*addr == (unsigned long)return_to_handler) {
-               int index = tsk->curr_ret_stack;
-
-               if (tsk->ret_stack && index >= *graph) {
-                       index -= *graph;
-                       *addr = tsk->ret_stack[index].ret;
-                       (*graph)++;
-               }
-       }
-}
-#else
-static inline void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-}
-#endif
-
 #define LOOP_TIMES (100)
 static void __dump(struct task_struct *tsk, unsigned long *base_reg)
 {
@@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                while (!kstack_end(base_reg)) {
                        ret_addr = *base_reg++;
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -137,15 +117,12 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                       !((unsigned long)base_reg & 0x3) &&
                       ((unsigned long)base_reg >= TASK_SIZE)) {
                        unsigned long next_fp;
-#if !defined(NDS32_ABI_2)
-                       ret_addr = base_reg[0];
-                       next_fp = base_reg[1];
-#else
-                       ret_addr = base_reg[-1];
+                       ret_addr = base_reg[LP_OFFSET];
                        next_fp = base_reg[FP_OFFSET];
-#endif
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -196,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err)
        pr_emerg("CPU: %i\n", smp_processor_id());
        show_regs(regs);
        pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
-                tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
+                tsk->comm, tsk->pid, end_of_stack(tsk));
 
        if (!user_mode(regs) || in_interrupt()) {
-               dump_mem("Stack: ", regs->sp,
-                        THREAD_SIZE + (unsigned long)task_thread_info(tsk));
+               dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
                dump_instr(regs);
                dump_stack();
        }
index 288313b886efa269fb0e13eb9231d50879e72947..9e90f30a181d7d9c9b06dc04d230c02bfde67b78 100644 (file)
@@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32)
 ENTRY(_stext_lma)
 jiffies = jiffies_64;
 
+#if defined(CONFIG_GCOV_KERNEL)
+#define NDS32_EXIT_KEEP(x)     x
+#else
+#define NDS32_EXIT_KEEP(x)
+#endif
+
 SECTIONS
 {
        _stext_lma = TEXTADDR - LOAD_OFFSET;
        . = TEXTADDR;
        __init_begin = .;
        HEAD_TEXT_SECTION
+       .exit.text : {
+               NDS32_EXIT_KEEP(EXIT_TEXT)
+       }
        INIT_TEXT_SECTION(PAGE_SIZE)
        INIT_DATA_SECTION(16)
+       .exit.data : {
+               NDS32_EXIT_KEEP(EXIT_DATA)
+       }
        PERCPU_SECTION(L1_CACHE_BYTES)
        __init_end = .;
 
index 3c0e8fb2b773ebaf3bdfe404b2909d2d5ed48b4a..68e14afecac85b1d1fd0eff661f6a3aabe0217a6 100644 (file)
@@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
        unsigned long pp, key;
        unsigned long v, orig_v, gr;
        __be64 *hptep;
-       int index;
+       long int index;
        int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
 
        if (kvm_is_radix(vcpu->kvm))
index 0af1c0aea1fe659fca4723e17a02cc17eb8fa08f..fd6e8c13685f4c0223749647ad04c34a71ad4589 100644 (file)
@@ -725,10 +725,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
                                              gpa, shift);
                kvmppc_radix_tlbie_page(kvm, gpa, shift);
                if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
-                       unsigned long npages = 1;
+                       unsigned long psize = PAGE_SIZE;
                        if (shift)
-                               npages = 1ul << (shift - PAGE_SHIFT);
-                       kvmppc_update_dirty_map(memslot, gfn, npages);
+                               psize = 1ul << shift;
+                       kvmppc_update_dirty_map(memslot, gfn, psize);
                }
        }
        return 0;                               
index f31a15044c24a56875661aa6d3e195d75bd9ef9c..a8418e1379eb7ee08c92acd034eae000cb19c695 100644 (file)
@@ -16,7 +16,13 @@ typedef struct {
        unsigned long asce;
        unsigned long asce_limit;
        unsigned long vdso_base;
-       /* The mmu context allocates 4K page tables. */
+       /*
+        * The following bitfields need a down_write on the mm
+        * semaphore when they are written to. As they are only
+        * written once, they can be read without a lock.
+        *
+        * The mmu context allocates 4K page tables.
+        */
        unsigned int alloc_pgste:1;
        /* The mmu context uses extended page tables. */
        unsigned int has_pgste:1;
index 91ad4a9425c0b74c024f07fe8347651c91f4bfb7..f69333fd2fa3818c5eeb8bff9240f67e29f37ef0 100644 (file)
@@ -695,7 +695,9 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                        r = -EINVAL;
                else {
                        r = 0;
+                       down_write(&kvm->mm->mmap_sem);
                        kvm->mm->context.allow_gmap_hpage_1m = 1;
+                       up_write(&kvm->mm->mmap_sem);
                        /*
                         * We might have to create fake 4k page
                         * tables. To avoid that the hardware works on
index d68f10441a164f2c22236adaa143edf6b1d0f7d0..8679bd74d337a583a3dde940d0cef1f427373a4b 100644 (file)
@@ -280,9 +280,11 @@ retry:
                        goto retry;
                }
        }
-       if (rc)
-               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        up_read(&current->mm->mmap_sem);
+       if (rc == -EFAULT)
+               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       if (rc < 0)
+               return rc;
        vcpu->run->s.regs.gprs[reg1] &= ~0xff;
        vcpu->run->s.regs.gprs[reg1] |= key;
        return 0;
@@ -324,9 +326,11 @@ retry:
                        goto retry;
                }
        }
-       if (rc < 0)
-               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        up_read(&current->mm->mmap_sem);
+       if (rc == -EFAULT)
+               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       if (rc < 0)
+               return rc;
        kvm_s390_set_psw_cc(vcpu, rc);
        return 0;
 }
@@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu)
                                              FAULT_FLAG_WRITE, &unlocked);
                        rc = !rc ? -EAGAIN : rc;
                }
+               up_read(&current->mm->mmap_sem);
                if (rc == -EFAULT)
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
-               up_read(&current->mm->mmap_sem);
-               if (rc >= 0)
-                       start += PAGE_SIZE;
+               if (rc < 0)
+                       return rc;
+               start += PAGE_SIZE;
        }
 
        if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
                                                      FAULT_FLAG_WRITE, &unlocked);
                                rc = !rc ? -EAGAIN : rc;
                        }
+                       up_read(&current->mm->mmap_sem);
                        if (rc == -EFAULT)
                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
-                       up_read(&current->mm->mmap_sem);
-                       if (rc >= 0)
-                               start += PAGE_SIZE;
+                       if (rc == -EAGAIN)
+                               continue;
+                       if (rc < 0)
+                               return rc;
                }
+               start += PAGE_SIZE;
        }
        if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
                if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
index 63844b95c22c9902df769928313b988ee7d7667b..a2b28cd1e3fedb2bdcc6dbb1cff530ba0e3371a7 100644 (file)
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                return set_validity_icpt(scb_s, 0x0039U);
 
        /* copy only the wrapping keys */
-       if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+       if (read_guest_real(vcpu, crycb_addr + 72,
+                           vsie_page->crycb.dea_wrapping_key_mask, 56))
                return set_validity_icpt(scb_s, 0x0035U);
 
        scb_s->ecb3 |= ecb3_flags;
index 3641a294ed5478ef3065d0cca08b6c54618fc0e2..e4abe9b8f97a60ba0304479e975b16442bfa9af7 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/irq.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
 #include <asm/leon.h>
 #include <asm/leon_amba.h>
 
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
 
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
+
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
                       dp->full_name);
index 44e4d4435bed78032b1b7a0d9935e577ec196538..6df6086968c6a6064b2fe35f3c5ff03e7083fbc2 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
+#include <linux/dma-mapping.h>
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/mod_devicetable.h>
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
                dev_set_name(&op->dev, "root");
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
 
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
index b143717b92b3447c480495e23be68470bc7bf463..ce84388e540c918a01e31599bc78881a5b9d2954 100644 (file)
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 
 /**
  * arch_atomic_inc - increment atomic variable
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic_inc arch_atomic_inc
 static __always_inline void arch_atomic_inc(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "incl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_inc arch_atomic_inc
 
 /**
  * arch_atomic_dec - decrement atomic variable
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic_dec arch_atomic_dec
 static __always_inline void arch_atomic_dec(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "decl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_dec arch_atomic_dec
 
 /**
  * arch_atomic_dec_and_test - decrement and test
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
 }
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 
 /**
  * arch_atomic_inc_and_test - increment and test
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
 }
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 
 /**
  * arch_atomic_add_negative - add and test if negative
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic_add_negative arch_atomic_add_negative
 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic_add_negative arch_atomic_add_negative
 
 /**
  * arch_atomic_add_return - add integer and return
index ef959f02d0702b8d16979f1dd3ad6c1f405363e9..6a5b0ec460da8c5c6f4fe83074c6b2ec916bae80 100644 (file)
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static inline void arch_atomic64_inc(atomic64_t *v)
 {
        __alternative_atomic64(inc, inc_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static inline void arch_atomic64_dec(atomic64_t *v)
 {
        __alternative_atomic64(dec, dec_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_add_unless - add unless the number is a given value
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
        return (int)a;
 }
 
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
        int r;
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
                             "S" (v) : "ecx", "edx", "memory");
        return r;
 }
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        long long r;
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
                             "S" (v) : "ecx", "memory");
        return r;
 }
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
 #undef alternative_atomic64
 #undef __alternative_atomic64
index 4343d9b4f30e32056d5f6d5d1d6a273b556a3bba..5f851d92eecd9ee8eaad86c6b002937633e9144f 100644 (file)
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 
 /**
  * arch_atomic64_inc - increment atomic64 variable
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "decq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_dec_and_test - decrement and test
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 
 /**
  * arch_atomic64_inc_and_test - increment and test
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 
 /**
  * arch_atomic64_add_negative - add and test if negative
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic64_add_negative arch_atomic64_add_negative
 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic64_add_negative arch_atomic64_add_negative
 
 /**
  * arch_atomic64_add_return - add and return
index 395c9631e000a3a17aa574c1b25fcc2cafd5b5fb..75f1e35e7c1537d8323cf7ce17d05c95759c350b 100644 (file)
@@ -22,10 +22,20 @@ enum die_val {
        DIE_NMIUNKNOWN,
 };
 
+enum show_regs_mode {
+       SHOW_REGS_SHORT,
+       /*
+        * For when userspace crashed, but we don't think it's our fault, and
+        * therefore don't print kernel registers.
+        */
+       SHOW_REGS_USER,
+       SHOW_REGS_ALL
+};
+
 extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
-extern void __show_regs(struct pt_regs *regs, int all);
+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
 extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
index 00ddb0c9e612a6e084298ef0f7372b924b76ba12..8e90488c3d56895f62080666e2141df1731092f3 100644 (file)
@@ -1237,19 +1237,12 @@ enum emulation_result {
 #define EMULTYPE_NO_DECODE         (1 << 0)
 #define EMULTYPE_TRAP_UD           (1 << 1)
 #define EMULTYPE_SKIP              (1 << 2)
-#define EMULTYPE_RETRY             (1 << 3)
-#define EMULTYPE_NO_REEXECUTE      (1 << 4)
-#define EMULTYPE_NO_UD_ON_FAIL     (1 << 5)
-#define EMULTYPE_VMWARE                    (1 << 6)
-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
-                           int emulation_type, void *insn, int insn_len);
-
-static inline int emulate_instruction(struct kvm_vcpu *vcpu,
-                       int emulation_type)
-{
-       return x86_emulate_instruction(vcpu, 0,
-                       emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
-}
+#define EMULTYPE_ALLOW_RETRY       (1 << 3)
+#define EMULTYPE_NO_UD_ON_FAIL     (1 << 4)
+#define EMULTYPE_VMWARE                    (1 << 5)
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+                                       void *insn, int insn_len);
 
 void kvm_enable_efer_bits(u64);
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
@@ -1450,7 +1443,6 @@ asmlinkage void kvm_spurious_fault(void);
        ____kvm_handle_fault_on_reboot(insn, "")
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -1463,7 +1455,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
 
 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-                   unsigned long ipi_bitmap_high, int min,
+                   unsigned long ipi_bitmap_high, u32 min,
                    unsigned long icr, int op_64_bit);
 
 u64 kvm_get_arch_capabilities(void);
index e4ffa565a69f0633cab94325bd894eac4bbe6a19..690c0307afed0932974e5965ccf7bed98daa10f9 100644 (file)
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
                return xchg(pmdp, pmd);
        } else {
                pmd_t old = *pmdp;
-               *pmdp = pmd;
+               WRITE_ONCE(*pmdp, pmd);
                return old;
        }
 }
index f773d5e6c8cc465da7069987f9f24d6507c259af..ce2b59047cb839053043508d75ea47f8a726c8b9 100644 (file)
@@ -55,15 +55,15 @@ struct mm_struct;
 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
 
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
-                                   pte_t *ptep)
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
 {
-       *ptep = native_make_pte(0);
+       WRITE_ONCE(*ptep, pte);
 }
 
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+                                   pte_t *ptep)
 {
-       *ptep = pte;
+       native_set_pte(ptep, native_make_pte(0));
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -73,7 +73,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-       *pmdp = pmd;
+       WRITE_ONCE(*pmdp, pmd);
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -109,7 +109,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
-       *pudp = pud;
+       WRITE_ONCE(*pudp, pud);
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -137,13 +137,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        pgd_t pgd;
 
        if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
-               *p4dp = p4d;
+               WRITE_ONCE(*p4dp, p4d);
                return;
        }
 
        pgd = native_make_pgd(native_p4d_val(p4d));
        pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
-       *p4dp = native_make_p4d(native_pgd_val(pgd));
+       WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
 }
 
 static inline void native_p4d_clear(p4d_t *p4d)
@@ -153,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-       *pgdp = pti_set_user_pgtbl(pgdp, pgd);
+       WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
index 9f148e3d45b4fe2ec57eecf5bd13ba528f97fa37..7654febd510277bedf28f8cef6a5d599bcfe1f5c 100644 (file)
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd)
        if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
                /* Something in the core code broke! Survive gracefully */
                pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
-               return EINVAL;
+               return -EINVAL;
        }
 
        ret = assign_managed_vector(irqd, vector_searchmask);
index 0624957aa0681fe7a260389fe8b64740383ff7c9..07b5fc00b188047cdd2830f164626e19de9da8f9 100644 (file)
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
        struct microcode_amd *mc_amd;
        struct ucode_cpu_info *uci;
        struct ucode_patch *p;
+       enum ucode_state ret;
        u32 rev, dummy;
 
        BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
 
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
-               c->microcode = rev;
-               uci->cpu_sig.rev = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        if (__apply_microcode_amd(mc_amd)) {
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
                        cpu, mc_amd->hdr.patch_id);
                return UCODE_ERROR;
        }
-       pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
-               mc_amd->hdr.patch_id);
 
-       uci->cpu_sig.rev = mc_amd->hdr.patch_id;
-       c->microcode = mc_amd->hdr.patch_id;
+       rev = mc_amd->hdr.patch_id;
+       ret = UCODE_UPDATED;
+
+       pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
 
-       return UCODE_UPDATED;
+out:
+       uci->cpu_sig.rev = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
+
+       return ret;
 }
 
 static int install_equiv_cpu_table(const u8 *buf)
index 97ccf4c3b45bec517605813b1f24518b10466002..16936a24795c8457b789f7b428216cfc5bd1fb4e 100644 (file)
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_intel *mc;
+       enum ucode_state ret;
        static int prev_rev;
        u32 rev;
 
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
         */
        rev = intel_get_microcode_revision();
        if (rev >= mc->hdr.rev) {
-               uci->cpu_sig.rev = rev;
-               c->microcode = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        /*
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
                prev_rev = rev;
        }
 
+       ret = UCODE_UPDATED;
+
+out:
        uci->cpu_sig.rev = rev;
-       c->microcode = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
 
-       return UCODE_UPDATED;
+       return ret;
 }
 
 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
index f56895106ccf61d51b2c712f8194e85fcfdc8e6c..2b5886401e5f4eb2c221f813675927a53cc97d61 100644 (file)
@@ -146,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
         * they can be printed in the right context.
         */
        if (!partial && on_stack(info, regs, sizeof(*regs))) {
-               __show_regs(regs, 0);
+               __show_regs(regs, SHOW_REGS_SHORT);
 
        } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
                                       IRET_FRAME_SIZE)) {
@@ -344,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
        oops_exit();
 
        /* Executive summary in case the oops scrolled away */
-       __show_regs(&exec_summary_regs, true);
+       __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
 
        if (!signr)
                return;
@@ -407,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err)
 
 void show_regs(struct pt_regs *regs)
 {
-       bool all = true;
-
        show_regs_print_info(KERN_DEFAULT);
 
-       if (IS_ENABLED(CONFIG_X86_32))
-               all = !user_mode(regs);
-
-       __show_regs(regs, all);
+       __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
 
        /*
         * When in-kernel, we also print out the stack at the time of the fault..
index 2924fd447e617dd6b4a2aaf1741fefe31c94b676..5046a3c9dec2feaa6761e38c9947e90ad4030efb 100644 (file)
@@ -59,7 +59,7 @@
 #include <asm/intel_rdt_sched.h>
 #include <asm/proto.h>
 
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
               (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
 
-       if (!all)
+       if (mode != SHOW_REGS_ALL)
                return;
 
        cr0 = read_cr0();
index a451bc374b9b3e5001be9978995848946867759b..ea5ea850348da94cafb85010c4ba123d83fbba57 100644 (file)
@@ -62,7 +62,7 @@
 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
 /* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
-       if (!all)
+       if (mode == SHOW_REGS_SHORT)
                return;
 
+       if (mode == SHOW_REGS_USER) {
+               rdmsrl(MSR_FS_BASE, fs);
+               rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+               printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
+                      fs, shadowgs);
+               return;
+       }
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
index 1463468ba9a0a5dc3914c92f88fffb491dc36d4f..6490f618e09696a7a407859037a0b1635cbb6f9f 100644 (file)
@@ -1415,7 +1415,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
 
 static unsigned long __init get_loops_per_jiffy(void)
 {
-       unsigned long lpj = tsc_khz * KHZ;
+       u64 lpj = (u64)tsc_khz * KHZ;
 
        do_div(lpj, HZ);
        return lpj;
index 0cefba28c864a3a0925378ed9b438a05b554cff2..17c0472c5b344faaaac3153ff53c4600a3fcd81e 100644 (file)
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 }
 
 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-                   unsigned long ipi_bitmap_high, int min,
+                   unsigned long ipi_bitmap_high, u32 min,
                    unsigned long icr, int op_64_bit)
 {
        int i;
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
        rcu_read_lock();
        map = rcu_dereference(kvm->arch.apic_map);
 
+       if (min > map->max_apic_id)
+               goto out;
        /* Bits above cluster_size are masked in the caller.  */
-       for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
-               vcpu = map->phys_map[min + i]->vcpu;
-               count += kvm_apic_set_irq(vcpu, &irq, NULL);
+       for_each_set_bit(i, &ipi_bitmap_low,
+               min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+               if (map->phys_map[min + i]) {
+                       vcpu = map->phys_map[min + i]->vcpu;
+                       count += kvm_apic_set_irq(vcpu, &irq, NULL);
+               }
        }
 
        min += cluster_size;
-       for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
-               vcpu = map->phys_map[min + i]->vcpu;
-               count += kvm_apic_set_irq(vcpu, &irq, NULL);
+
+       if (min > map->max_apic_id)
+               goto out;
+
+       for_each_set_bit(i, &ipi_bitmap_high,
+               min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+               if (map->phys_map[min + i]) {
+                       vcpu = map->phys_map[min + i]->vcpu;
+                       count += kvm_apic_set_irq(vcpu, &irq, NULL);
+               }
        }
 
+out:
        rcu_read_unlock();
        return count;
 }
index a282321329b51a10e7563b0d422bd9e38d57b530..e24ea7067373af69d258c46995007b0446a69fdc 100644 (file)
@@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
        return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 {
        return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
@@ -5217,7 +5212,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                       void *insn, int insn_len)
 {
-       int r, emulation_type = EMULTYPE_RETRY;
+       int r, emulation_type = 0;
        enum emulation_result er;
        bool direct = vcpu->arch.mmu.direct_map;
 
@@ -5230,10 +5225,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
        r = RET_PF_INVALID;
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
                r = handle_mmio_page_fault(vcpu, cr2, direct);
-               if (r == RET_PF_EMULATE) {
-                       emulation_type = 0;
+               if (r == RET_PF_EMULATE)
                        goto emulate;
-               }
        }
 
        if (r == RET_PF_INVALID) {
@@ -5260,8 +5253,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                return 1;
        }
 
-       if (mmio_info_in_cache(vcpu, cr2, direct))
-               emulation_type = 0;
+       /*
+        * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
+        * optimistically try to just unprotect the page and let the processor
+        * re-execute the instruction that caused the page fault.  Do not allow
+        * retrying MMIO emulation, as it's not only pointless but could also
+        * cause us to enter an infinite loop because the processor will keep
+        * faulting on the non-existent MMIO address.  Retrying an instruction
+        * from a nested guest is also pointless and dangerous as we are only
+        * explicitly shadowing L1's page tables, i.e. unprotecting something
+        * for L1 isn't going to magically fix whatever issue cause L2 to fail.
+        */
+       if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
+               emulation_type = EMULTYPE_ALLOW_RETRY;
 emulate:
        /*
         * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
index 6276140044d0848b58e6778c31cf4d247520ac44..89c4c5aa15f16c71af5404f302a627313a61fd96 100644 (file)
@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        }
 
        if (!svm->next_rip) {
-               if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+               if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
                                EMULATE_DONE)
                        printk(KERN_DEBUG "%s: NOP\n", __func__);
                return;
@@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm)
 
        WARN_ON_ONCE(!enable_vmware_backdoor);
 
-       er = emulate_instruction(vcpu,
+       er = kvm_emulate_instruction(vcpu,
                EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
        if (er == EMULATE_USER_EXIT)
                return 0;
@@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm)
        string = (io_info & SVM_IOIO_STR_MASK) != 0;
        in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
        if (string)
-               return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm)
 static int invlpg_interception(struct vcpu_svm *svm)
 {
        if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
-               return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 
        kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
        return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3869,13 +3869,13 @@ static int invlpg_interception(struct vcpu_svm *svm)
 
 static int emulate_on_interception(struct vcpu_svm *svm)
 {
-       return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
 static int rsm_interception(struct vcpu_svm *svm)
 {
-       return x86_emulate_instruction(&svm->vcpu, 0, 0,
-                                      rsm_ins_bytes, 2) == EMULATE_DONE;
+       return kvm_emulate_instruction_from_buffer(&svm->vcpu,
+                                       rsm_ins_bytes, 2) == EMULATE_DONE;
 }
 
 static int rdpmc_interception(struct vcpu_svm *svm)
@@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
                ret = avic_unaccel_trap_write(svm);
        } else {
                /* Handling Fault */
-               ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
+               ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
        }
 
        return ret;
@@ -6747,7 +6747,7 @@ e_free:
 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 {
        unsigned long vaddr, vaddr_end, next_vaddr;
-       unsigned long dst_vaddr, dst_vaddr_end;
+       unsigned long dst_vaddr;
        struct page **src_p, **dst_p;
        struct kvm_sev_dbg debug;
        unsigned long n;
@@ -6763,7 +6763,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        size = debug.len;
        vaddr_end = vaddr + size;
        dst_vaddr = debug.dst_uaddr;
-       dst_vaddr_end = dst_vaddr + size;
 
        for (; vaddr < vaddr_end; vaddr = next_vaddr) {
                int len, s_off, d_off;
index 1d26f3c4985ba6dd5fc88d72959cd49b084606fb..533a327372c876df0b1c2b99ea3558e2aaa92df4 100644 (file)
@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
         * Cause the #SS fault with 0 error code in VM86 mode.
         */
        if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
-               if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
+               if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
                        if (vcpu->arch.halt_request) {
                                vcpu->arch.halt_request = 0;
                                return kvm_vcpu_halt(vcpu);
@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
 
        if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
                WARN_ON_ONCE(!enable_vmware_backdoor);
-               er = emulate_instruction(vcpu,
+               er = kvm_emulate_instruction(vcpu,
                        EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
                if (er == EMULATE_USER_EXIT)
                        return 0;
@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
        ++vcpu->stat.io_exits;
 
        if (string)
-               return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
        port = exit_qualification >> 16;
        size = (exit_qualification & 7) + 1;
@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
 static int handle_desc(struct kvm_vcpu *vcpu)
 {
        WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_cr(struct kvm_vcpu *vcpu)
@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
 
 static int handle_invd(struct kvm_vcpu *vcpu)
 {
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_invlpg(struct kvm_vcpu *vcpu)
@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
                        return kvm_skip_emulated_instruction(vcpu);
                }
        }
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
@@ -7704,8 +7704,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
                if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
                        return kvm_skip_emulated_instruction(vcpu);
                else
-                       return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
-                                                      NULL, 0) == EMULATE_DONE;
+                       return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+                                                               EMULATE_DONE;
        }
 
        return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
-               err = emulate_instruction(vcpu, 0);
+               err = kvm_emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_USER_EXIT) {
                        ++vcpu->stat.mmio_exits;
@@ -12537,8 +12537,11 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        bool from_vmentry = !!exit_qual;
        u32 dummy_exit_qual;
+       u32 vmcs01_cpu_exec_ctrl;
        int r = 0;
 
+       vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+
        enter_guest_mode(vcpu);
 
        if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@@ -12574,6 +12577,25 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
                kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
        }
 
+       /*
+        * If L1 had a pending IRQ/NMI until it executed
+        * VMLAUNCH/VMRESUME which wasn't delivered because it was
+        * disallowed (e.g. interrupts disabled), L0 needs to
+        * evaluate if this pending event should cause an exit from L2
+        * to L1 or delivered directly to L2 (e.g. In case L1 don't
+        * intercept EXTERNAL_INTERRUPT).
+        *
+        * Usually this would be handled by L0 requesting a
+        * IRQ/NMI window by setting VMCS accordingly. However,
+        * this setting was done on VMCS01 and now VMCS02 is active
+        * instead. Thus, we force L0 to perform pending event
+        * evaluation by requesting a KVM_REQ_EVENT.
+        */
+       if (vmcs01_cpu_exec_ctrl &
+               (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+       }
+
        /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
         * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
@@ -13988,9 +14010,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
            check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
                return -EINVAL;
 
-       if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
-               vmx->nested.nested_run_pending = 1;
-
        vmx->nested.dirty_vmcs12 = true;
        ret = enter_vmx_non_root_mode(vcpu, NULL);
        if (ret)
index 506bd2b4b8bb76e21a959310d33f6de6180719f8..542f6315444d75aa365ca04dba4bddd7bfb369d3 100644 (file)
@@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
                emul_type = 0;
        }
 
-       er = emulate_instruction(vcpu, emul_type);
+       er = kvm_emulate_instruction(vcpu, emul_type);
        if (er == EMULATE_USER_EXIT)
                return 0;
        if (er != EMULATE_DONE)
@@ -5870,7 +5870,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
        gpa_t gpa = cr2;
        kvm_pfn_t pfn;
 
-       if (emulation_type & EMULTYPE_NO_REEXECUTE)
+       if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+               return false;
+
+       if (WARN_ON_ONCE(is_guest_mode(vcpu)))
                return false;
 
        if (!vcpu->arch.mmu.direct_map) {
@@ -5958,7 +5961,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
         */
        vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
 
-       if (!(emulation_type & EMULTYPE_RETRY))
+       if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+               return false;
+
+       if (WARN_ON_ONCE(is_guest_mode(vcpu)))
                return false;
 
        if (x86_page_table_writing_insn(ctxt))
@@ -6276,7 +6282,19 @@ restart:
 
        return r;
 }
-EXPORT_SYMBOL_GPL(x86_emulate_instruction);
+
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
+{
+       return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
+
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+                                       void *insn, int insn_len)
+{
+       return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
 
 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
@@ -7734,7 +7752,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
 {
        int r;
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-       r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+       r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
        if (r != EMULATE_DONE)
                return 0;
index 257f27620bc272e3312295714a120de07963441f..67b9568613f34abdcacc208b4f23ef1414a512eb 100644 (file)
@@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                                          int page_num);
 bool kvm_vector_hashing_enabled(void);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+                           int emulation_type, void *insn, int insn_len);
 
 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
                                | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
index e848a48117856c8e58dcf313339853563f5c6f7f..ae394552fb945def068d2c877a153e9d7f040228 100644 (file)
@@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
        if (pgd_val(pgd) != 0) {
                pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 
-               *pgdp = native_make_pgd(0);
+               pgd_clear(pgdp);
 
                paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
                pmd_free(mm, pmd);
@@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
        int changed = !pte_same(*ptep, entry);
 
        if (changed && dirty)
-               *ptep = entry;
+               set_pte(ptep, entry);
 
        return changed;
 }
@@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        if (changed && dirty) {
-               *pmdp = entry;
+               set_pmd(pmdp, entry);
                /*
                 * We had a write-protection fault here and changed the pmd
                 * to to more permissive. No need to flush the TLB for that,
@@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PUD_MASK);
 
        if (changed && dirty) {
-               *pudp = entry;
+               set_pud(pudp, entry);
                /*
                 * We had a write-protection fault here and changed the pud
                 * to to more permissive. No need to flush the TLB for that,
index 58c6efa9f9a99970e7599b673df3f65f5f4a9810..9fe5952d117d553f12f32055fde8683c554b06a8 100644 (file)
@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
 
 void bfqg_and_blkg_put(struct bfq_group *bfqg)
 {
-       bfqg_put(bfqg);
-
        blkg_put(bfqg_to_blkg(bfqg));
+
+       bfqg_put(bfqg);
 }
 
 /* @stats = 0 */
index b12966e415d3f807d0ac27bab9b5e010f771d0b0..8c680a776171c8c1bc7dcbefee2d4b6bb9cc5ebc 100644 (file)
@@ -2015,7 +2015,8 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
 {
        if (unlikely(bio->bi_blkg))
                return -EBUSY;
-       blkg_get(blkg);
+       if (!blkg_try_get(blkg))
+               return -ENODEV;
        bio->bi_blkg = blkg;
        return 0;
 }
index 694595b29b8fd2faac52ac79cf9e6e4540e7dcd6..c19f9078da1ed9b17295eb4fcce108b1d5658c93 100644 (file)
@@ -310,28 +310,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
        }
 }
 
-static void blkg_pd_offline(struct blkcg_gq *blkg)
-{
-       int i;
-
-       lockdep_assert_held(blkg->q->queue_lock);
-       lockdep_assert_held(&blkg->blkcg->lock);
-
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
-
-               if (blkg->pd[i] && !blkg->pd[i]->offline &&
-                   pol->pd_offline_fn) {
-                       pol->pd_offline_fn(blkg->pd[i]);
-                       blkg->pd[i]->offline = true;
-               }
-       }
-}
-
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
        struct blkcg *blkcg = blkg->blkcg;
        struct blkcg_gq *parent = blkg->parent;
+       int i;
 
        lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
@@ -340,6 +323,13 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        WARN_ON_ONCE(list_empty(&blkg->q_node));
        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
+
+               if (blkg->pd[i] && pol->pd_offline_fn)
+                       pol->pd_offline_fn(blkg->pd[i]);
+       }
+
        if (parent) {
                blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
                blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@@ -382,7 +372,6 @@ static void blkg_destroy_all(struct request_queue *q)
                struct blkcg *blkcg = blkg->blkcg;
 
                spin_lock(&blkcg->lock);
-               blkg_pd_offline(blkg);
                blkg_destroy(blkg);
                spin_unlock(&blkcg->lock);
        }
@@ -1053,59 +1042,64 @@ static struct cftype blkcg_legacy_files[] = {
        { }     /* terminate */
 };
 
+/*
+ * blkcg destruction is a three-stage process.
+ *
+ * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
+ *    which offlines writeback.  Here we tie the next stage of blkg destruction
+ *    to the completion of writeback associated with the blkcg.  This lets us
+ *    avoid punting potentially large amounts of outstanding writeback to root
+ *    while maintaining any ongoing policies.  The next stage is triggered when
+ *    the nr_cgwbs count goes to zero.
+ *
+ * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
+ *    and handles the destruction of blkgs.  Here the css reference held by
+ *    the blkg is put back eventually allowing blkcg_css_free() to be called.
+ *    This work may occur in cgwb_release_workfn() on the cgwb_release
+ *    workqueue.  Any submitted ios that fail to get the blkg ref will be
+ *    punted to the root_blkg.
+ *
+ * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
+ *    This finally frees the blkcg.
+ */
+
 /**
  * blkcg_css_offline - cgroup css_offline callback
  * @css: css of interest
  *
- * This function is called when @css is about to go away and responsible
- * for offlining all blkgs pd and killing all wbs associated with @css.
- * blkgs pd offline should be done while holding both q and blkcg locks.
- * As blkcg lock is nested inside q lock, this function performs reverse
- * double lock dancing.
- *
- * This is the blkcg counterpart of ioc_release_fn().
+ * This function is called when @css is about to go away.  Here the cgwbs are
+ * offlined first and only once writeback associated with the blkcg has
+ * finished do we start step 2 (see above).
  */
 static void blkcg_css_offline(struct cgroup_subsys_state *css)
 {
        struct blkcg *blkcg = css_to_blkcg(css);
-       struct blkcg_gq *blkg;
-
-       spin_lock_irq(&blkcg->lock);
-
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
-               struct request_queue *q = blkg->q;
-
-               if (spin_trylock(q->queue_lock)) {
-                       blkg_pd_offline(blkg);
-                       spin_unlock(q->queue_lock);
-               } else {
-                       spin_unlock_irq(&blkcg->lock);
-                       cpu_relax();
-                       spin_lock_irq(&blkcg->lock);
-               }
-       }
-
-       spin_unlock_irq(&blkcg->lock);
 
+       /* this prevents anyone from attaching or migrating to this blkcg */
        wb_blkcg_offline(blkcg);
+
+       /* put the base cgwb reference allowing step 2 to be triggered */
+       blkcg_cgwb_put(blkcg);
 }
 
 /**
- * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
+ * blkcg_destroy_blkgs - responsible for shooting down blkgs
  * @blkcg: blkcg of interest
  *
- * This function is called when blkcg css is about to free and responsible for
- * destroying all blkgs associated with @blkcg.
- * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
+ * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
  * is nested inside q lock, this function performs reverse double lock dancing.
+ * Destroying the blkgs releases the reference held on the blkcg's css allowing
+ * blkcg_css_free to eventually be called.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
  */
-static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
+void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
        spin_lock_irq(&blkcg->lock);
+
        while (!hlist_empty(&blkcg->blkg_list)) {
                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
-                                                   struct blkcg_gq,
-                                                   blkcg_node);
+                                               struct blkcg_gq, blkcg_node);
                struct request_queue *q = blkg->q;
 
                if (spin_trylock(q->queue_lock)) {
@@ -1117,6 +1111,7 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
                        spin_lock_irq(&blkcg->lock);
                }
        }
+
        spin_unlock_irq(&blkcg->lock);
 }
 
@@ -1125,8 +1120,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
        struct blkcg *blkcg = css_to_blkcg(css);
        int i;
 
-       blkcg_destroy_all_blkgs(blkcg);
-
        mutex_lock(&blkcg_pol_mutex);
 
        list_del(&blkcg->all_blkcgs_node);
@@ -1189,6 +1182,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
        INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&blkcg->cgwb_list);
+       refcount_set(&blkcg->cgwb_refcnt, 1);
 #endif
        list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
 
@@ -1480,11 +1474,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
                if (blkg->pd[pol->plid]) {
-                       if (!blkg->pd[pol->plid]->offline &&
-                           pol->pd_offline_fn) {
+                       if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
-                               blkg->pd[pol->plid]->offline = true;
-                       }
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
index dee56c282efb092196cd9be8400b6bc68c7f7f27..4dbc93f43b38231657ec2c97e836b5063a7265cf 100644 (file)
@@ -2163,9 +2163,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
 {
        const int op = bio_op(bio);
 
-       if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
+       if (part->policy && op_is_write(op)) {
                char b[BDEVNAME_SIZE];
 
+               if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+                       return false;
+
                WARN_ONCE(1,
                       "generic_make_request: Trying to write "
                        "to read-only block-device %s (partno %d)\n",
index a3eede00d3020b7edc84915caf6dae52a726df21..01d0620a4e4a5e829c9de5c2dec0ac5e0b2b3ab3 100644 (file)
@@ -2129,8 +2129,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
 {
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-       if (bio->bi_css)
-               bio_associate_blkg(bio, tg_to_blkg(tg));
+       /* fallback to root_blkg if we fail to get a blkg ref */
+       if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
+               bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
        bio_issue_init(&bio->bi_issue, bio_sectors(bio));
 #endif
 }
index 9706613eecf9e2320209225b41d4856fecbf3ae1..bf64cfa30febf173640729db80f9a567ce50b4b0 100644 (file)
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
 #define LPSS_GPIODEF0_DMA_LLP          BIT(13)
 
 static DEFINE_MUTEX(lpss_iosf_mutex);
-static bool lpss_iosf_d3_entered;
+static bool lpss_iosf_d3_entered = true;
 
 static void lpss_iosf_enter_d3_state(void)
 {
index 292088fcc6245ba2a683f02899750a1a70249701..d2e29a19890d14db1a9e6fd85f207d558c6a3c6e 100644 (file)
 #include <linux/delay.h>
 #ifdef CONFIG_X86
 #include <asm/mpspec.h>
+#include <linux/dmi.h>
 #endif
 #include <linux/acpi_iort.h>
 #include <linux/pci.h>
 #include <acpi/apei.h>
-#include <linux/dmi.h>
 #include <linux/suspend.h>
 
 #include "internal.h"
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
        },
        {}
 };
-#else
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
-       {}
-};
 #endif
 
 /* --------------------------------------------------------------------------
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
+#ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
-        * DSDT will be copied to memory
+        * DSDT will be copied to memory.
+        * Note that calling dmi_check_system() here on other architectures
+        * would not be OK because only x86 initializes dmi early enough.
+        * Thankfully only x86 systems need such quirks for now.
         */
        dmi_check_system(dsdt_dmi_table);
+#endif
 
        status = acpi_reallocate_root_table();
        if (ACPI_FAILURE(status)) {
index 172e32840256bef2365a0841950da638900a098e..599e01bcdef22b7064853b30b31ad5f3e6a9b432 100644 (file)
@@ -7394,4 +7394,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown);
 EXPORT_SYMBOL_GPL(ata_cable_ignore);
 EXPORT_SYMBOL_GPL(ata_cable_sata);
 EXPORT_SYMBOL_GPL(ata_host_get);
-EXPORT_SYMBOL_GPL(ata_host_put);
\ No newline at end of file
+EXPORT_SYMBOL_GPL(ata_host_put);
index c8a1cb0b61361f63088ecb940c0ca8dc7dee67cf..817320c7c4c1b72cf248b73a184bc8dee6ef28ac 100644 (file)
@@ -416,26 +416,24 @@ static ssize_t show_valid_zones(struct device *dev,
        struct zone *default_zone;
        int nid;
 
-       /*
-        * The block contains more than one zone can not be offlined.
-        * This can happen e.g. for ZONE_DMA and ZONE_DMA32
-        */
-       if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
-               return sprintf(buf, "none\n");
-
-       start_pfn = valid_start_pfn;
-       nr_pages = valid_end_pfn - start_pfn;
-
        /*
         * Check the existing zone. Make sure that we do that only on the
         * online nodes otherwise the page_zone is not reliable
         */
        if (mem->state == MEM_ONLINE) {
+               /*
+                * The block contains more than one zone can not be offlined.
+                * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+                */
+               if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
+                                         &valid_start_pfn, &valid_end_pfn))
+                       return sprintf(buf, "none\n");
+               start_pfn = valid_start_pfn;
                strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
                goto out;
        }
 
-       nid = pfn_to_nid(start_pfn);
+       nid = mem->nid;
        default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
        strcat(buf, default_zone->name);
 
index 3863c00372bb9ed9b68fa41f3e692736d779b81c..14a51254c3db7f19c94cdab62e1d9e192c7ae02f 100644 (file)
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_SOCK:
                return nbd_add_socket(nbd, arg, false);
        case NBD_SET_BLKSIZE:
+               if (!arg || !is_power_of_2(arg) || arg < 512 ||
+                   arg > PAGE_SIZE)
+                       return -EINVAL;
                nbd_size_set(nbd, arg,
                             div_s64(config->bytesize, arg));
                return 0;
index 7915f3b03736eadebf623801802bdb1d7bd42f75..73ed5f3a862dfcde598227671d55c2491d33408b 100644 (file)
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev,
 
                count += sprintf(&buf[count], "%s"
                            "pool_id %llu\npool_name %s\n"
+                           "pool_ns %s\n"
                            "image_id %s\nimage_name %s\n"
                            "snap_id %llu\nsnap_name %s\n"
                            "overlap %llu\n",
                            !count ? "" : "\n", /* first? */
                            spec->pool_id, spec->pool_name,
+                           spec->pool_ns ?: "",
                            spec->image_id, spec->image_name ?: "(unknown)",
                            spec->snap_id, spec->snap_name,
                            rbd_dev->parent_overlap);
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
                                                &rbd_dev->header.features);
 }
 
+struct parent_image_info {
+       u64             pool_id;
+       const char      *pool_ns;
+       const char      *image_id;
+       u64             snap_id;
+
+       bool            has_overlap;
+       u64             overlap;
+};
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int decode_parent_image_spec(void **p, void *end,
+                                   struct parent_image_info *pii)
+{
+       u8 struct_v;
+       u32 struct_len;
+       int ret;
+
+       ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
+                                 &struct_v, &struct_len);
+       if (ret)
+               return ret;
+
+       ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
+       pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->pool_ns)) {
+               ret = PTR_ERR(pii->pool_ns);
+               pii->pool_ns = NULL;
+               return ret;
+       }
+       pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int __get_parent_info(struct rbd_device *rbd_dev,
+                            struct page *req_page,
+                            struct page *reply_page,
+                            struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret == -EOPNOTSUPP ? 1 : ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ret = decode_parent_image_spec(&p, end, pii);
+       if (ret)
+               return ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
+       if (pii->has_overlap)
+               ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
+                                   struct page *req_page,
+                                   struct page *reply_page,
+                                   struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "get_parent", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
+       pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
+       pii->has_overlap = true;
+       ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int get_parent_info(struct rbd_device *rbd_dev,
+                          struct parent_image_info *pii)
+{
+       struct page *req_page, *reply_page;
+       void *p;
+       int ret;
+
+       req_page = alloc_page(GFP_KERNEL);
+       if (!req_page)
+               return -ENOMEM;
+
+       reply_page = alloc_page(GFP_KERNEL);
+       if (!reply_page) {
+               __free_page(req_page);
+               return -ENOMEM;
+       }
+
+       p = page_address(req_page);
+       ceph_encode_64(&p, rbd_dev->spec->snap_id);
+       ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
+       if (ret > 0)
+               ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
+                                              pii);
+
+       __free_page(req_page);
+       __free_page(reply_page);
+       return ret;
+}
+
 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
 {
        struct rbd_spec *parent_spec;
-       size_t size;
-       void *reply_buf = NULL;
-       __le64 snapid;
-       void *p;
-       void *end;
-       u64 pool_id;
-       char *image_id;
-       u64 snap_id;
-       u64 overlap;
+       struct parent_image_info pii = { 0 };
        int ret;
 
        parent_spec = rbd_spec_alloc();
        if (!parent_spec)
                return -ENOMEM;
 
-       size = sizeof (__le64) +                                /* pool_id */
-               sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +        /* image_id */
-               sizeof (__le64) +                               /* snap_id */
-               sizeof (__le64);                                /* overlap */
-       reply_buf = kmalloc(size, GFP_KERNEL);
-       if (!reply_buf) {
-               ret = -ENOMEM;
+       ret = get_parent_info(rbd_dev, &pii);
+       if (ret)
                goto out_err;
-       }
 
-       snapid = cpu_to_le64(rbd_dev->spec->snap_id);
-       ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
-                                 &rbd_dev->header_oloc, "get_parent",
-                                 &snapid, sizeof(snapid), reply_buf, size);
-       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
-       if (ret < 0)
-               goto out_err;
+       dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+            __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
+            pii.has_overlap, pii.overlap);
 
-       p = reply_buf;
-       end = reply_buf + ret;
-       ret = -ERANGE;
-       ceph_decode_64_safe(&p, end, pool_id, out_err);
-       if (pool_id == CEPH_NOPOOL) {
+       if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
                /*
                 * Either the parent never existed, or we have
                 * record of it but the image got flattened so it no
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                 * overlap to 0.  The effect of this is that all new
                 * requests will be treated as if the image had no
                 * parent.
+                *
+                * If !pii.has_overlap, the parent image spec is not
+                * applicable.  It's there to avoid duplication in each
+                * snapshot record.
                 */
                if (rbd_dev->parent_overlap) {
                        rbd_dev->parent_overlap = 0;
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        /* The ceph file layout needs to fit pool id in 32 bits */
 
        ret = -EIO;
-       if (pool_id > (u64)U32_MAX) {
+       if (pii.pool_id > (u64)U32_MAX) {
                rbd_warn(NULL, "parent pool id too large (%llu > %u)",
-                       (unsigned long long)pool_id, U32_MAX);
+                       (unsigned long long)pii.pool_id, U32_MAX);
                goto out_err;
        }
 
-       image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
-       if (IS_ERR(image_id)) {
-               ret = PTR_ERR(image_id);
-               goto out_err;
-       }
-       ceph_decode_64_safe(&p, end, snap_id, out_err);
-       ceph_decode_64_safe(&p, end, overlap, out_err);
-
        /*
         * The parent won't change (except when the clone is
         * flattened, already handled that).  So we only need to
         * record the parent spec we have not already done so.
         */
        if (!rbd_dev->parent_spec) {
-               parent_spec->pool_id = pool_id;
-               parent_spec->image_id = image_id;
-               parent_spec->snap_id = snap_id;
-
-               /* TODO: support cloning across namespaces */
-               if (rbd_dev->spec->pool_ns) {
-                       parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
-                                                      GFP_KERNEL);
-                       if (!parent_spec->pool_ns) {
-                               ret = -ENOMEM;
-                               goto out_err;
-                       }
+               parent_spec->pool_id = pii.pool_id;
+               if (pii.pool_ns && *pii.pool_ns) {
+                       parent_spec->pool_ns = pii.pool_ns;
+                       pii.pool_ns = NULL;
                }
+               parent_spec->image_id = pii.image_id;
+               pii.image_id = NULL;
+               parent_spec->snap_id = pii.snap_id;
 
                rbd_dev->parent_spec = parent_spec;
                parent_spec = NULL;     /* rbd_dev now owns this */
-       } else {
-               kfree(image_id);
        }
 
        /*
         * We always update the parent overlap.  If it's zero we issue
         * a warning, as we will proceed as if there was no parent.
         */
-       if (!overlap) {
+       if (!pii.overlap) {
                if (parent_spec) {
                        /* refresh, careful to warn just once */
                        if (rbd_dev->parent_overlap)
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                        rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
                }
        }
-       rbd_dev->parent_overlap = overlap;
+       rbd_dev->parent_overlap = pii.overlap;
 
 out:
        ret = 0;
 out_err:
-       kfree(reply_buf);
+       kfree(pii.pool_ns);
+       kfree(pii.image_id);
        rbd_spec_put(parent_spec);
-
        return ret;
 }
 
index ce277ee0a28a21a328bee687d6872842a1cda5cc..40728491f37b6a46be7e59399c6530263d2d40c1 100644 (file)
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
        that CPU manufacturer (perhaps with the insistence or mandate
        of a Nation State's intelligence or law enforcement agencies)
        has not installed a hidden back door to compromise the CPU's
-       random number generation facilities.
-
+       random number generation facilities. This can also be configured
+       at boot with "random.trust_cpu=on/off".
index bf5f99fc36f1b654aad2f4cfe196c9b66814be82..c75b6cdf00533ac82ec6fb957bc1669f1f44a34d 100644 (file)
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
 
 static void invalidate_batched_entropy(void);
 
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+       return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+
 static void crng_initialize(struct crng_state *crng)
 {
        int             i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
                }
                crng->state[i] ^= rv;
        }
-#ifdef CONFIG_RANDOM_TRUST_CPU
-       if (arch_init) {
+       if (trust_cpu && arch_init) {
                crng_init = 2;
                pr_notice("random: crng done (trusting CPU's manufacturer)\n");
        }
-#endif
        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 }
 
index 6fd46083e62958eea61716225cd9f6c7fe11e748..bbe4d72ca105b001e36b1d09d382ee9e3a89ee7c 100644 (file)
@@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
 {
        struct file *filp = vmf->vma->vm_file;
        unsigned long fault_size;
-       int rc, id;
+       vm_fault_t rc = VM_FAULT_SIGBUS;
+       int id;
        pfn_t pfn;
        struct dev_dax *dev_dax = filp->private_data;
 
index 721e6c57beae5bfecb0f135c514fedd9ea2bdff0..64342944d9175c54918100c36f0b43a20e6acae3 100644 (file)
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
                                        le32_to_cpu(attr->sustained_freq_khz);
                dom_info->sustained_perf_level =
                                        le32_to_cpu(attr->sustained_perf_level);
-               dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
+               if (!dom_info->sustained_freq_khz ||
+                   !dom_info->sustained_perf_level)
+                       /* CPUFreq converts to kHz, hence default 1000 */
+                       dom_info->mult_factor = 1000;
+               else
+                       dom_info->mult_factor =
+                                       (dom_info->sustained_freq_khz * 1000) /
                                        dom_info->sustained_perf_level;
                memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
        }
index 3530ccd17e04487153cb5d1cff0b9796ca38673a..da9781a2ef4adf230fc6704945fc02ac2bca3823 100644 (file)
@@ -41,6 +41,8 @@ struct adp5588_gpio {
        uint8_t int_en[3];
        uint8_t irq_mask[3];
        uint8_t irq_stat[3];
+       uint8_t int_input_en[3];
+       uint8_t int_lvl_cached[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
        struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
        int i;
 
-       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+               if (dev->int_input_en[i]) {
+                       mutex_lock(&dev->lock);
+                       dev->dir[i] &= ~dev->int_input_en[i];
+                       dev->int_input_en[i] = 0;
+                       adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+                                          dev->dir[i]);
+                       mutex_unlock(&dev->lock);
+               }
+
+               if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+                       dev->int_lvl_cached[i] = dev->int_lvl[i];
+                       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+                                          dev->int_lvl[i]);
+               }
+
                if (dev->int_en[i] ^ dev->irq_mask[i]) {
                        dev->int_en[i] = dev->irq_mask[i];
                        adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
                                           dev->int_en[i]);
                }
+       }
 
        mutex_unlock(&dev->irq_lock);
 }
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
        else
                return -EINVAL;
 
-       adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
-       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
-                          dev->int_lvl[bank]);
+       dev->int_input_en[bank] |= bit;
 
        return 0;
 }
index 28da700f5f5258f54117dee459904d2140d0d411..044888fd96a1f642617cba9a9dfc98655c5493aa 100644 (file)
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
 out_unregister:
        dwapb_gpio_unregister(gpio);
        dwapb_irq_teardown(gpio);
+       clk_disable_unprepare(gpio->clk);
 
        return err;
 }
index c48ed9d89ff5f7b74eb03fb7ee079fa2b18e0dcf..8b9d7e42c600b60d26bad7f26c32ed938be6e7a2 100644 (file)
@@ -25,7 +25,6 @@
 
 struct acpi_gpio_event {
        struct list_head node;
-       struct list_head initial_sync_list;
        acpi_handle handle;
        unsigned int pin;
        unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
        struct mutex conn_lock;
        struct gpio_chip *chip;
        struct list_head events;
+       struct list_head deferred_req_irqs_list_entry;
 };
 
-static LIST_HEAD(acpi_gpio_initial_sync_list);
-static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+/*
+ * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * late_initcall_sync handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run.  This list contains gpiochips
+ * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
 
 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
 {
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
        return gpiochip_get_desc(chip, pin);
 }
 
-static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
-static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       if (!list_empty(&event->initial_sync_list))
-               list_del_init(&event->initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 {
        struct acpi_gpio_event *event = data;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
-       value = gpiod_get_value(desc);
+       value = gpiod_get_value_cansleep(desc);
 
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        event->irq = irq;
        event->pin = pin;
        event->desc = desc;
-       INIT_LIST_HEAD(&event->initial_sync_list);
 
        ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
                                   "ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
         * may refer to OperationRegions from other (builtin) drivers which
         * may be probed after us.
         */
-       if (handler == acpi_gpio_irq_handler &&
-           (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-            ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
-               acpi_gpio_add_to_initial_sync_list(event);
+       if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+           ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+               handler(event->irq, event);
 
        return AE_OK;
 
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        struct acpi_gpio_chip *acpi_gpio;
        acpi_handle handle;
        acpi_status status;
+       bool defer;
 
        if (!chip->parent || !chip->to_irq)
                return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       defer = !acpi_gpio_deferred_req_irqs_done;
+       if (defer)
+               list_add(&acpi_gpio->deferred_req_irqs_list_entry,
+                        &acpi_gpio_deferred_req_irqs_list);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+       if (defer)
+               return;
+
        acpi_walk_resources(handle, "_AEI",
                            acpi_gpiochip_request_interrupt, acpi_gpio);
 }
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
-               acpi_gpio_del_from_initial_sync_list(event);
-
                if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
                        disable_irq_wake(event->irq);
 
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
 
        acpi_gpio->chip = chip;
        INIT_LIST_HEAD(&acpi_gpio->events);
+       INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
 
        status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
        if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
        return con_id == NULL;
 }
 
-/* Sync the initial state of handlers after all builtin drivers have probed */
-static int acpi_gpio_initial_sync(void)
+/* Run deferred acpi_gpiochip_request_interrupts() */
+static int acpi_gpio_handle_deferred_request_interrupts(void)
 {
-       struct acpi_gpio_event *event, *ep;
+       struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       list_for_each_entry_safe(acpi_gpio, tmp,
+                                &acpi_gpio_deferred_req_irqs_list,
+                                deferred_req_irqs_list_entry) {
+               acpi_handle handle;
 
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
-                                initial_sync_list) {
-               acpi_evaluate_object(event->handle, NULL, NULL, NULL);
-               list_del_init(&event->initial_sync_list);
+               handle = ACPI_HANDLE(acpi_gpio->chip->parent);
+               acpi_walk_resources(handle, "_AEI",
+                                   acpi_gpiochip_request_interrupt, acpi_gpio);
+
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
        }
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+       acpi_gpio_deferred_req_irqs_done = true;
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
 
        return 0;
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_initial_sync);
+late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
index a4f1157d6aa0893707d3880c9f0be4639c909b03..d4e7a09598faedbecb1ccdee24e6138b70e4e7d8 100644 (file)
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
        struct of_phandle_args *gpiospec = data;
 
        return chip->gpiodev->dev.of_node == gpiospec->np &&
+                               chip->of_xlate &&
                                chip->of_xlate(chip, gpiospec, NULL) >= 0;
 }
 
index 6e3f56684f4ec03e7285b9e3715688ce31de311d..51ed99a37803310e2d9b7e84882ae7df97e3b59c 100644 (file)
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
-               switch (info->drm_format_mod << 10) {
-               case PLANE_CTL_TILED_LINEAR:
+               switch (info->drm_format_mod) {
+               case DRM_FORMAT_MOD_LINEAR:
                        tiling_mode = I915_TILING_NONE;
                        break;
-               case PLANE_CTL_TILED_X:
+               case I915_FORMAT_MOD_X_TILED:
                        tiling_mode = I915_TILING_X;
                        stride = info->stride;
                        break;
-               case PLANE_CTL_TILED_Y:
+               case I915_FORMAT_MOD_Y_TILED:
+               case I915_FORMAT_MOD_Yf_TILED:
                        tiling_mode = I915_TILING_Y;
                        stride = info->stride;
                        break;
                default:
-                       gvt_dbg_core("not supported tiling mode\n");
+                       gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
+                                    info->drm_format_mod);
                }
                obj->tiling_and_stride = tiling_mode | stride;
        } else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                info->height = p.height;
                info->stride = p.stride;
                info->drm_format = p.drm_format;
-               info->drm_format_mod = p.tiled;
+
+               switch (p.tiled) {
+               case PLANE_CTL_TILED_LINEAR:
+                       info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
+                       break;
+               case PLANE_CTL_TILED_X:
+                       info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+                       break;
+               case PLANE_CTL_TILED_Y:
+                       info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+                       break;
+               case PLANE_CTL_TILED_YF:
+                       info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+                       break;
+               default:
+                       gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
+               }
+
                info->size = (((p.stride * p.height * p.bpp) / 8) +
-                               (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
index face664be3e8e8bf673e589c7ff9b176f8102a76..481896fb712abf4c178b28af2f224a369d0aefd9 100644 (file)
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) {
-               plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
-               _PLANE_CTL_TILED_SHIFT;
+               plane->tiled = val & PLANE_CTL_TILED_MASK;
                fmt = skl_format_to_drm(
                        val & PLANE_CTL_FORMAT_MASK,
                        val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
                return  -EINVAL;
        }
 
-       plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+       plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
                (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) ?
index cb055f3c81a29c629f2dcc327ce0da9a1b827ee3..60c155085029cbb4742341a94d4d24a56da3d0c5 100644 (file)
@@ -101,7 +101,7 @@ struct intel_gvt;
 /* color space conversion and gamma correction are not included */
 struct intel_vgpu_primary_plane_format {
        u8      enabled;        /* plane is enabled */
-       u8      tiled;          /* X-tiled */
+       u32     tiled;          /* tiling mode: linear, X-tiled, Y tiled, etc */
        u8      bpp;            /* bits per pixel */
        u32     hw_format;      /* format field in the PRI_CTL register */
        u32     drm_format;     /* format in DRM definition */
index 7a58ca5551977a086ce8b25dbe18aa26497f45bc..72afa518edd91157f6a221f277536c0f96e30899 100644 (file)
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       write_vreg(vgpu, offset, p_data, bytes);
+
+       if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
+               vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
+       else
+               vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
+
+       return 0;
+}
+
 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
        unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
        u32 v = *(u32 *)p_data;
        u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
 
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+       switch (offset) {
+       case _PHY_CTL_FAMILY_EDP:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+               break;
+       case _PHY_CTL_FAMILY_DDI:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+               break;
+       }
 
        vgpu_vreg(vgpu, offset) = v;
 
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
                skl_power_well_ctl_write);
 
+       MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+
        MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
                NULL, gen9_trtte_write);
        MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
 
-       MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
-
        MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
 
        MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
        MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
-               NULL, NULL);
+                NULL, NULL);
+       MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
 
        MMIO_D(_MMIO(0x4ab8), D_KBL);
        MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
index 42e1e6bdcc2cfe64a3446eea8019b9a912141ba0..e872f4847fbe0ce9a1e646bbea84eb0b6b2e1f29 100644 (file)
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
         * performace for batch mmio read/write, so we need
         * handle forcewake mannually.
         */
-       intel_runtime_pm_get(dev_priv);
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
        switch_mmio(pre, next, ring_id);
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_runtime_pm_put(dev_priv);
 }
 
 /**
index 09d7bb72b4ff30e45688add463c1fdaaa9a71ad1..c32e7d5e862914b787d7cf5cbf706cb5c4a41748 100644 (file)
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
        return false;
 }
 
+/* We give 2 seconds higher prio for vGPU during start */
+#define GVT_SCHED_VGPU_PRI_TIME  2
+
 struct vgpu_sched_data {
        struct list_head lru_list;
        struct intel_vgpu *vgpu;
        bool active;
-
+       bool pri_sched;
+       ktime_t pri_time;
        ktime_t sched_in_time;
        ktime_t sched_time;
        ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
                if (!vgpu_has_pending_workload(vgpu_data->vgpu))
                        continue;
 
+               if (vgpu_data->pri_sched) {
+                       if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
+                               vgpu = vgpu_data->vgpu;
+                               break;
+                       } else
+                               vgpu_data->pri_sched = false;
+               }
+
                /* Return the vGPU only if it has time slice left */
                if (vgpu_data->left_ts > 0) {
                        vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct vgpu_sched_data *vgpu_data;
        struct intel_vgpu *vgpu = NULL;
+
        /* no active vgpu or has already had a target */
        if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
                goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        vgpu = find_busy_vgpu(sched_data);
        if (vgpu) {
                scheduler->next_vgpu = vgpu;
-
-               /* Move the last used vGPU to the tail of lru_list */
                vgpu_data = vgpu->sched_data;
-               list_del_init(&vgpu_data->lru_list);
-               list_add_tail(&vgpu_data->lru_list,
-                               &sched_data->lru_runq_head);
+               if (!vgpu_data->pri_sched) {
+                       /* Move the last used vGPU to the tail of lru_list */
+                       list_del_init(&vgpu_data->lru_list);
+                       list_add_tail(&vgpu_data->lru_list,
+                                     &sched_data->lru_runq_head);
+               }
        } else {
                scheduler->next_vgpu = gvt->idle_vgpu;
        }
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
 {
        struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       ktime_t now;
 
        if (!list_empty(&vgpu_data->lru_list))
                return;
 
-       list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+       now = ktime_get();
+       vgpu_data->pri_time = ktime_add(now,
+                                       ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
+       vgpu_data->pri_sched = true;
+
+       list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
 
        if (!hrtimer_active(&sched_data->timer))
                hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                &vgpu->gvt->scheduler;
        int ring_id;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        if (!vgpu_data->active)
                return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->current_vgpu = NULL;
        }
 
+       intel_runtime_pm_get(dev_priv);
        spin_lock_bh(&scheduler->mmio_context_lock);
        for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
                if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&vgpu->gvt->sched_lock);
 }
index 08ec7446282e7f981f74272e8c29c755abad9e21..9e63cd47b60f30a5b0f7682d2c3038dd81e2b3ce 100644 (file)
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
 #define  DSC_INITIAL_DEC_DELAY(dec_delay)       ((dec_delay) << 16)
 #define  DSC_INITIAL_XMIT_DELAY(xmit_delay)     ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
 #define  DSC_SCALE_DEC_INTINT(scale_dec)       ((scale_dec) << 16)
 #define  DSC_SCALE_INC_INT(scale_inc)          ((scale_inc) << 0)
index 8761513f3532c5c4bfb56151833ab6217a5ab99f..c9af34861d9e3a5bf3c090f4d5cf7afafed83dfa 100644 (file)
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
 
-       intel_ddi_enable_pipe_clock(crtc_state);
+       if (!is_mst)
+               intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 
-       intel_ddi_disable_pipe_clock(old_crtc_state);
-
-       /*
-        * Power down sink before disabling the port, otherwise we end
-        * up getting interrupts from the sink on detecting link loss.
-        */
-       if (!is_mst)
+       if (!is_mst) {
+               intel_ddi_disable_pipe_clock(old_crtc_state);
+               /*
+                * Power down sink before disabling the port, otherwise we end
+                * up getting interrupts from the sink on detecting link loss.
+                */
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+       }
 
        intel_disable_ddi_buf(encoder);
 
index cd0f649b57a5b75dff70265637a3a4b0ead4373b..1193202766a2cb86721cc8aa918c5f692c822c7e 100644 (file)
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
        return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
 }
 
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx)
 {
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 }
 
 static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+                   struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
                 */
                status = connector_status_disconnected;
                goto out;
+       } else {
+               /*
+                * If display is now connected check links status,
+                * there has been known issues of link loss triggering
+                * long pulse.
+                *
+                * Some sinks (eg. ASUS PB287Q) seem to perform some
+                * weird HPD ping pong during modesets. So we can apparently
+                * end up with HPD going low during a modeset, and then
+                * going back up soon after. And once that happens we must
+                * retrain the link to get a picture. That's in case no
+                * userspace component reacted to intermittent HPD dip.
+                */
+               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+               intel_dp_retrain_link(encoder, ctx);
        }
 
        /*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
                                return ret;
                }
 
-               status = intel_dp_long_pulse(intel_dp->attached_connector);
+               status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
        }
 
        intel_dp->detect_done = false;
index 7e3e01607643d3e6b4dfc6332284be001966e280..4ecd65375603391ee02f6a11831feecdae397b0b 100644 (file)
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        /* this can fail */
        drm_dp_check_act_status(&intel_dp->mst_mgr);
        /* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
        I915_WRITE(DP_TP_STATUS(port), temp);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+       intel_ddi_enable_pipe_clock(pipe_config);
 }
 
 static void intel_mst_enable_dp(struct intel_encoder *encoder,
index fb4e4a6bb1f63c2b34749a31ad4b18105f297d5b..be5ba469089531b26b0ba2d0b1866c20f3e5312b 100644 (file)
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
 MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:raspberrypi-hwmon");
index 04b60a349d7ed6edf44e8f75dd524969a30cf853..c91e145ef5a56dbb1a512c23611f06ad7d22aa05 100644 (file)
 
 #define SBREG_BAR              0x10
 #define SBREG_SMBCTRL          0xc6000c
+#define SBREG_SMBCTRL_DNV      0xcf000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS         BIT(3)
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv)
        spin_unlock(&p2sb_spinlock);
 
        res = &tco_res[ICH_RES_MEM_OFF];
-       res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+       if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+       else
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
        res->end = res->start + 3;
        res->flags = IORESOURCE_MEM;
 
index 6d975f5221ca0e37410c9d2e770f7d78bfe270b2..06c4c767af322aa2704d5ae51a59e5c103579fc5 100644 (file)
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
 
 static const struct of_device_id lpi2c_imx_of_match[] = {
        { .compatible = "fsl,imx7ulp-lpi2c" },
-       { .compatible = "fsl,imx8dv-lpi2c" },
        { },
 };
 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
index 9918bdd816196281ae6c64d364d34ecaf89a65ef..a403e8579b652b6e2486f746b91b9357c3ab7381 100644 (file)
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
                if (ret)
index bb181b0882919acef145d27ec236b705b8f2e7f0..454f914ae66dbd49931575122bb7c7dea662b11b 100644 (file)
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
                if (ret)
index 9a71e50d21f1fbae53ebdb2b1831e7f6067a67b9..0c51c0ffdda9d99d03f28c5503ef1cffaa5316c2 100644 (file)
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 {
        u8 rx_watermark;
        struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+       unsigned long flags;
 
        /* Clear and enable Rx full interrupt. */
        xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
                rx_watermark = IIC_RX_FIFO_DEPTH;
        xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
 
+       local_irq_save(flags);
        if (!(msg->flags & I2C_M_NOSTART))
                /* write the address */
                xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 
        xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
                msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+       local_irq_restore(flags);
+
        if (i2c->nmsgs == 1)
                /* very last, enable bus not busy as well */
                xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
index 316a57530f6d108ace5345f0e4077e8f20771924..c2df341ff6fafd83df6463669c9ad6a1ceac56c9 100644 (file)
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
  * The consequence of the above is that allocation is cost is low, but
  * freeing is expensive. We assumes that freeing rarely occurs.
  */
+#define ITS_MAX_LPI_NRBITS     16 /* 64K LPIs */
 
 static DEFINE_MUTEX(lpi_range_lock);
 static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
-       lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
+       lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+                               ITS_MAX_LPI_NRBITS);
        gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
index 94329e03001ec8d0ca15ff4ad0d04e7fdebd0b5d..0b2af6e74fc375ed163824fa9cdaf84d1b9ffd95 100644 (file)
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
 static int resync_finish(struct mddev *mddev)
 {
        struct md_cluster_info *cinfo = mddev->cluster_info;
+       int ret = 0;
 
        clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
-       dlm_unlock_sync(cinfo->resync_lockres);
 
        /*
         * If resync thread is interrupted so we can't say resync is finished,
         * another node will launch resync thread to continue.
         */
-       if (test_bit(MD_CLOSING, &mddev->flags))
-               return 0;
-       else
-               return resync_info_update(mddev, 0, 0);
+       if (!test_bit(MD_CLOSING, &mddev->flags))
+               ret = resync_info_update(mddev, 0, 0);
+       dlm_unlock_sync(cinfo->resync_lockres);
+       return ret;
 }
 
 static int area_resyncing(struct mddev *mddev, int direction,
index 9818980494914eb72e085173fc015e012afb5dce..d6f7978b4449e92aba522035941ff51137e2ce38 100644 (file)
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
                allow_barrier(conf);
        }
 
+       raise_barrier(conf, 0);
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
        r10_bio = raid10_alloc_init_r10buf(conf);
        r10_bio->state = 0;
-       raise_barrier(conf, sectors_done != 0);
+       raise_barrier(conf, 1);
        atomic_set(&r10_bio->remaining, 0);
        r10_bio->mddev = mddev;
        r10_bio->sector = sector_nr;
@@ -4629,6 +4630,8 @@ read_more:
        if (sector_nr <= last)
                goto read_more;
 
+       lower_barrier(conf);
+
        /* Now that we have done the whole section we can
         * update reshape_progress
         */
index a001808a2b77da16bc2ae3c1d4aa32b5f944a939..bfb81140706140a53af24ccf90420e6cc107cef1 100644 (file)
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
 extern void ppl_quiesce(struct r5conf *conf, int quiesce);
 extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
 
+static inline bool raid5_has_log(struct r5conf *conf)
+{
+       return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+}
+
 static inline bool raid5_has_ppl(struct r5conf *conf)
 {
        return test_bit(MD_HAS_PPL, &conf->mddev->flags);
index 4ce0d7502fad84ac9422b6fc00404136340397ac..e4e98f47865def0449979058c6c7e51228b9b42b 100644 (file)
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
 {
        struct r5conf *conf = sh->raid_conf;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return false;
        return test_bit(STRIPE_BATCH_READY, &sh->state) &&
                !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
        sector_t newsize;
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        sectors &= ~((sector_t)conf->chunk_sectors - 1);
        newsize = raid5_size(mddev, sectors, mddev->raid_disks);
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
 {
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        if (mddev->delta_disks == 0 &&
            mddev->new_layout == mddev->layout &&
index 31112f622b884f2577d46caec86b2b63370e81de..475e5b3790edb4085055fcd38df2f6d77d98388d 100644 (file)
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
                        if (ret < 0)
                                goto error;
                }
-       } else {
+       } else if (pdata) {
                for (i = 0; i < pdata->num_sub_devices; i++) {
                        pdata->sub_devices[i].dev.parent = dev;
                        ret = platform_device_register(&pdata->sub_devices[i]);
index 8bb1e38b1681a32f2175c649b756791694f08105..cecbb1d1f587f936b0d2960739c911fc28f05d51 100644 (file)
@@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
        return bp->hw_resc.max_cp_rings;
 }
 
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
 {
-       bp->hw_resc.max_cp_rings = max;
+       return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
 }
 
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 
@@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
                hw_resc->resv_rx_rings = 0;
                hw_resc->resv_hw_ring_grps = 0;
                hw_resc->resv_vnics = 0;
+               bp->tx_nr_rings = 0;
+               bp->rx_nr_rings = 0;
        }
        return rc;
 }
@@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
-       *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+       *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
+                       hw_resc->max_irqs);
        *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
        if (bp->tx_nr_rings)
                return 0;
 
+       bnxt_ulp_irq_stop(bp);
+       bnxt_clear_int_mode(bp);
        rc = bnxt_set_dflt_rings(bp, true);
        if (rc) {
                netdev_err(bp->dev, "Not enough rings available.\n");
-               return rc;
+               goto init_dflt_ring_err;
        }
        rc = bnxt_init_int_mode(bp);
        if (rc)
-               return rc;
+               goto init_dflt_ring_err;
+
        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
        if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
                bp->flags |= BNXT_FLAG_RFS;
                bp->dev->features |= NETIF_F_NTUPLE;
        }
-       return 0;
+init_dflt_ring_err:
+       bnxt_ulp_irq_restart(bp, rc);
+       return rc;
 }
 
 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
index fefa011320e0f461047cd397e363d220b5401357..bde384630a75f9aaa723a6976ba1b891465cd9e2 100644 (file)
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
 int bnxt_get_avail_msix(struct bnxt *bp, int num);
 int bnxt_reserve_rings(struct bnxt *bp);
 void bnxt_tx_disable(struct bnxt *bp);
index 6d583bcd2a81b2f813128ac08b2f55c19feb8545..fcd085a9853a96e694bcbb3847d554d41f1a014c 100644 (file)
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
 
-       vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
        max_stat_ctxs = hw_resc->max_stat_ctxs;
 
        /* Remaining rings are distributed equally amongs VF's for now */
-       vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+       vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
+                      bp->cp_nr_rings) / num_vfs;
        vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
         */
        vfs_supported = *num_vfs;
 
-       avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        avail_cp = min_t(int, avail_cp, avail_stat);
 
index c37b2842f972ced2fb363513368c94b3e42b6815..beee61292d5e522bae0842e5e76253eeb4e1d1f4 100644 (file)
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
        edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return avail_msix;
 }
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
 {
        struct net_device *dev = edev->net;
        struct bnxt *bp = netdev_priv(dev);
-       int max_cp_rings, msix_requested;
 
        ASSERT_RTNL();
        if (ulp_id != BNXT_ROCE_ULP)
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
        if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
                return 0;
 
-       max_cp_rings = bnxt_get_max_func_cp_rings(bp);
-       msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
        edev->ulp_tbl[ulp_id].msix_requested = 0;
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
        if (netif_running(dev)) {
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
        return 0;
 }
 
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
-{
-       ASSERT_RTNL();
-       if (bnxt_ulp_registered(bp->edev, ulp_id)) {
-               struct bnxt_en_dev *edev = bp->edev;
-               unsigned int msix_req, max;
-
-               msix_req = edev->ulp_tbl[ulp_id].msix_requested;
-               max = bnxt_get_max_func_cp_rings(bp);
-               bnxt_set_max_func_cp_rings(bp, max - msix_req);
-               max = bnxt_get_max_func_stat_ctxs(bp);
-               bnxt_set_max_func_stat_ctxs(bp, max - 1);
-       }
-}
-
 static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
                         struct bnxt_fw_msg *fw_msg)
 {
index df48ac71729f51d897da9a134d0a1f923b13aeaa..d9bea37cd211f5ce85642f7c6a2f489f51f909b5 100644 (file)
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
 
 int bnxt_get_ulp_msix_num(struct bnxt *bp);
 int bnxt_get_ulp_msix_base(struct bnxt *bp);
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
 void bnxt_ulp_stop(struct bnxt *bp);
 void bnxt_ulp_start(struct bnxt *bp);
 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
index b773bc07edf7cf23154f883a96724bb0407ad303..14b49612aa8639816c1d4b58bdbf5d9ff1995248 100644 (file)
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
 #define UMAC_MAC1                      0x010
 #define UMAC_MAX_FRAME_LEN             0x014
 
+#define UMAC_MODE                      0x44
+#define  MODE_LINK_STATUS              (1 << 5)
+
 #define UMAC_EEE_CTRL                  0x064
 #define  EN_LPI_RX_PAUSE               (1 << 0)
 #define  EN_LPI_TX_PFC                 (1 << 1)
index 5333274a283cbf3d3e1f20c9e9a1905b4d3aa967..4241ae928d4abb4f61d39344e93d08603a2d2c86 100644 (file)
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
 static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
                                          struct fixed_phy_status *status)
 {
-       if (dev && dev->phydev && status)
-               status->link = dev->phydev->link;
+       struct bcmgenet_priv *priv;
+       u32 reg;
+
+       if (dev && dev->phydev && status) {
+               priv = netdev_priv(dev);
+               reg = bcmgenet_umac_readl(priv, UMAC_MODE);
+               status->link = !!(reg & MODE_LINK_STATUS);
+       }
 
        return 0;
 }
index c6707ea2d75198c1157c5e675975d9319e150a9a..16e4ef7d71855c7ed10905c3fdcf8d3a255f20c4 100644 (file)
@@ -649,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
                if (!(status & MACB_BIT(TGO)))
                        return 0;
 
-               usleep_range(10, 250);
+               udelay(250);
        } while (time_before(halt_time, timeout));
 
        return -ETIMEDOUT;
index cad52bd331f7b295853b4d2765ad5b1ebd37b0d9..08a750fb60c49d397c61845130e153fe1e3b0b3e 100644 (file)
@@ -486,6 +486,8 @@ struct hnae_ae_ops {
                        u8 *auto_neg, u16 *speed, u8 *duplex);
        void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
        void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+       bool (*need_adjust_link)(struct hnae_handle *handle,
+                                int speed, int duplex);
        int (*set_loopback)(struct hnae_handle *handle,
                            enum hnae_loop loop_mode, int en);
        void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
index e6aad30e7e69cd27c904542b29abaaec908bb9c7..b52029e26d15323b98811c5180a3d78ac288bf52 100644 (file)
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
                hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
 }
 
+static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+{
+       struct dsaf_device *dsaf_dev;
+       struct hns_ppe_cb *ppe_cb;
+       struct hnae_vf_cb *vf_cb;
+       int ret;
+       int i;
+
+       for (i = 0; i < handle->q_num; i++) {
+               ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
+               if (ret)
+                       return ret;
+       }
+
+       ppe_cb = hns_get_ppe_cb(handle);
+       ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
+       if (ret)
+               return ret;
+
+       dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+       if (!dsaf_dev)
+               return -EINVAL;
+       ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
+       if (ret)
+               return ret;
+
+       vf_cb = hns_ae_get_vf_cb(handle);
+       ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
+       if (ret)
+               return ret;
+
+       mdelay(10);
+       return 0;
+}
+
 static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
 {
        int q_num = handle->q_num;
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
        return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
 }
 
+static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
+                                   int duplex)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       return hns_mac_need_adjust_link(mac_cb, speed, duplex);
+}
+
 static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
                               int duplex)
 {
        struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
 
-       hns_mac_adjust_link(mac_cb, speed, duplex);
+       switch (mac_cb->dsaf_dev->dsaf_ver) {
+       case AE_VERSION_1:
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               break;
+
+       case AE_VERSION_2:
+               /* chip need to clear all pkt inside */
+               hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
+               if (hns_ae_wait_flow_down(handle)) {
+                       hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+                       break;
+               }
+
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+               break;
+
+       default:
+               break;
+       }
+
+       return;
 }
 
 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
        .get_status = hns_ae_get_link_status,
        .get_info = hns_ae_get_mac_info,
        .adjust_link = hns_ae_adjust_link,
+       .need_adjust_link = hns_ae_need_adjust_link,
        .set_loopback = hns_ae_config_loopback,
        .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
        .get_pauseparam = hns_ae_get_pauseparam,
index 5488c6e89f211d355ab33e0cda4033b84dcdccb3..09e4061d1fa60a80584b47cea3725374606a429d 100644 (file)
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
        *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
 }
 
+static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
+                                     int duplex)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct hns_mac_cb *mac_cb = drv->mac_cb;
+
+       return (mac_cb->speed != speed) ||
+               (mac_cb->half_duplex == duplex);
+}
+
 static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
                                u32 full_duplex)
 {
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
                hns_gmac_set_uc_match(mac_drv, en);
 }
 
+int hns_gmac_wait_fifo_clean(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
+               /* bit5~bit0 is not send complete pkts */
+               if ((val & 0x3f) == 0)
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(drv->dev,
+                       "hns ge %d fifo was not idle.\n", drv->mac_id);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 static void hns_gmac_init(void *mac_drv)
 {
        u32 port;
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->mac_disable = hns_gmac_disable;
        mac_drv->mac_free = hns_gmac_free;
        mac_drv->adjust_link = hns_gmac_adjust_link;
+       mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
        mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
        mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
        mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->get_strings = hns_gmac_get_strings;
        mac_drv->update_stats = hns_gmac_update_stats;
        mac_drv->set_promiscuous = hns_gmac_set_promisc;
+       mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
 
        return (void *)mac_drv;
 }
index 1c2326bd76e24d301d5689dd163c588d811dc4e8..6ed6f142427e4b68434df612568a7fea2f0433bc 100644 (file)
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
        return 0;
 }
 
+/**
+ *hns_mac_is_adjust_link - check is need change mac speed and duplex register
+ *@mac_cb: mac device
+ *@speed: phy device speed
+ *@duplex:phy device duplex
+ *
+ */
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+       struct mac_driver *mac_ctrl_drv;
+
+       mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+       if (mac_ctrl_drv->need_adjust_link)
+               return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
+                       (enum mac_speed)speed, duplex);
+       else
+               return true;
+}
+
 void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
 {
        int ret;
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
        return 0;
 }
 
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+       if (drv->wait_fifo_clean)
+               return drv->wait_fifo_clean(drv);
+
+       return 0;
+}
+
 void hns_mac_reset(struct hns_mac_cb *mac_cb)
 {
        struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
                return  DSAF_MAX_PORT_NUM;
 }
 
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
+}
+
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
+}
+
 /**
  * hns_mac_init - init mac
  * @dsaf_dev: dsa fabric device struct pointer
index bbc0a98e7ca3260c7f8b1b8cb7f7f203860bfdce..fbc75341bef760b82a1d7a10469d5649db91e366 100644 (file)
@@ -356,6 +356,9 @@ struct mac_driver {
        /*adjust mac mode of port,include speed and duplex*/
        int (*adjust_link)(void *mac_drv, enum mac_speed speed,
                           u32 full_duplex);
+       /* need adjust link */
+       bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
+                                int duplex);
        /* config autoegotaite mode of port*/
        void (*set_an_mode)(void *mac_drv, u8 enable);
        /* config loopbank mode */
@@ -394,6 +397,7 @@ struct mac_driver {
        void (*get_info)(void *mac_drv, struct mac_info *mac_info);
 
        void (*update_stats)(void *mac_drv);
+       int (*wait_fifo_clean)(void *mac_drv);
 
        enum mac_mode mac_mode;
        u8 mac_id;
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
 
 int hns_mac_init(struct dsaf_device *dsaf_dev);
 void mac_adjust_link(struct net_device *net_dev);
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb,        u32 *link_status);
 int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
 int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
 int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
                       const unsigned char *addr);
 int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
 
 #endif /* _HNS_DSAF_MAC_H */
index ca50c2553a9cb16c92e2d1dbfeaac7e03808fc71..e557a4ef5996c6772804ca746830af633a4adca9 100644 (file)
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
        soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
 }
 
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+{
+       u32 val, val_tmp;
+       int wait_cnt;
+
+       if (port >= DSAF_SERVICE_NW_NUM)
+               return 0;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               if (val == val_tmp)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
+                       val, val_tmp);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * dsaf_probe - probo dsaf dev
  * @pdev: dasf platform device
index 4507e8222683c112c05eeca4633e65990b789b8f..0e1cd99831a6083faa790aa80be1f6c635b15a50 100644 (file)
@@ -44,6 +44,8 @@ struct hns_mac_cb;
 #define DSAF_ROCE_CREDIT_CHN   8
 #define DSAF_ROCE_CHAN_MODE    3
 
+#define HNS_MAX_WAIT_CNT 10000
+
 enum dsaf_roce_port_mode {
        DSAF_ROCE_6PORT_MODE,
        DSAF_ROCE_4PORT_MODE,
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
 
 int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
index d160d8c9e45ba60b3543643c13748272baada910..0942e4916d9d0d1b0b78958481ea099e66e59639 100644 (file)
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
        dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
 }
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
+{
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
+               if (!val)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
+                       val);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * ppe_init_hw - init ppe
  * @ppe_cb: ppe device
index 9d8e643e8aa6ff518ab68d4af4c42d292a37ae47..f670e63a5a018cd5b48b4a62093c104905aa4463 100644 (file)
@@ -100,6 +100,7 @@ struct ppe_common_cb {
 
 };
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
 int hns_ppe_init(struct dsaf_device *dsaf_dev);
 
 void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
index 9d76e2e54f9df576b65702f720a79ab2a612dbb3..5d64519b9b1dc3cfd6e2c403126aa4ffa5832dfc 100644 (file)
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
                        "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
 }
 
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
+{
+       u32 head, tail;
+       int wait_cnt;
+
+       tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
+               if (tail == head)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  *hns_rcb_reset_ring_hw - ring reset
  *@q: ring struct pointer
index 602816498c8dd0c4aecd20ae9f5df196d7944821..2319b772a271e519d6a69a0976713a19c60f4a8a 100644 (file)
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
 void hns_rcb_init_hw(struct ring_pair_cb *ring);
 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
 u32 hns_rcb_get_rx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx);
 u32 hns_rcb_get_tx_coalesced_frames(
index 886cbbf25761aadc04c18573e9536ecd22e8095b..74d935d82cbc6050a287a07532024675ce75254e 100644 (file)
 #define RCB_RING_INTMSK_TX_OVERTIME_REG                0x000C4
 #define RCB_RING_INTSTS_TX_OVERTIME_REG                0x000C8
 
+#define GMAC_FIFO_STATE_REG                    0x0000UL
 #define GMAC_DUPLEX_TYPE_REG                   0x0008UL
 #define GMAC_FD_FC_TYPE_REG                    0x000CUL
 #define GMAC_TX_WATER_LINE_REG                 0x0010UL
index 02a0ba20fad55f43c0e3cd8531bc38f60f790827..f56855e63c961333f20f842a3558a920d201ccc9 100644 (file)
@@ -1112,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
        struct hnae_handle *h = priv->ae_handle;
        int state = 1;
 
+       /* If there is no phy, do not need adjust link */
        if (ndev->phydev) {
-               h->dev->ops->adjust_link(h, ndev->phydev->speed,
-                                        ndev->phydev->duplex);
-               state = ndev->phydev->link;
+               /* When phy link down, do nothing */
+               if (ndev->phydev->link == 0)
+                       return;
+
+               if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
+                                                 ndev->phydev->duplex)) {
+                       /* because Hi161X chip don't support to change gmac
+                        * speed and duplex with traffic. Delay 200ms to
+                        * make sure there is no more data in chip FIFO.
+                        */
+                       netif_carrier_off(ndev);
+                       msleep(200);
+                       h->dev->ops->adjust_link(h, ndev->phydev->speed,
+                                                ndev->phydev->duplex);
+                       netif_carrier_on(ndev);
+               }
        }
+
        state = state && h->dev->ops->get_status(h);
 
        if (state != priv->link) {
index 08f3c4743f747c7cbe020503d330821fe5427b68..774beda040a16a93a80db08e9383a2fe69a85eba 100644 (file)
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
        }
 
        if (h->dev->ops->adjust_link) {
+               netif_carrier_off(net_dev);
                h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+               netif_carrier_on(net_dev);
                return 0;
        }
 
index 354c0982847b8f1cc478e598f8bd068225aa1241..3726646863095dc145e8df1ccc5d082f9fd802bd 100644 (file)
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
        case 16384:
                ret |= EMAC_MR1_RFS_16K;
                break;
-       case 8192:
-               ret |= EMAC4_MR1_RFS_8K;
-               break;
        case 4096:
                ret |= EMAC_MR1_RFS_4K;
                break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
        case 16384:
                ret |= EMAC4_MR1_RFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_RFS_8K;
+               break;
        case 4096:
                ret |= EMAC4_MR1_RFS_4K;
                break;
index dafdd4ade705b346349ce4671910424fa7b28d15..4f0daf67b18df2dcf11d7a406ebc1982e0fee466 100644 (file)
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        adapter->map_id = 1;
                        release_rx_pools(adapter);
                        release_tx_pools(adapter);
-                       init_rx_pools(netdev);
-                       init_tx_pools(netdev);
+                       rc = init_rx_pools(netdev);
+                       if (rc)
+                               return rc;
+                       rc = init_tx_pools(netdev);
+                       if (rc)
+                               return rc;
 
                        release_napi(adapter);
-                       init_napi(adapter);
+                       rc = init_napi(adapter);
+                       if (rc)
+                               return rc;
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
index 32d785b616e1e270f2adb47978ce3b3f172f02ac..28500417843ed3db0b7b0d53e705f7384b3341b1 100644 (file)
@@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        dev->min_mtu = ETH_MIN_MTU;
        /* 9704 == 9728 - 20 and rounding to 8 */
        dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+       dev->dev.of_node = port_node;
 
        /* Phylink isn't used w/ ACPI as of now */
        if (port_node) {
index 86478a6b99c5068e13688f2556e954ee3b3f9486..c8c315eb512804033fd88723c217504007010453 100644 (file)
@@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
        u32 sq_strides_offset;
+       u32 rq_pg_remainder;
        int err;
 
        mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
                      MLX5_GET(qpc, qpc, log_rq_size),
                      &wq->rq.fbc);
 
-       sq_strides_offset =
-               ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+       rq_pg_remainder   = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
+       sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
 
        mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
                             MLX5_GET(qpc, qpc, log_sq_size),
index a8b9fbab5f73391eaf20f10afb2c70f44ad44d29..253bdaef150557a7e20ded3de921021a051a9ca7 100644 (file)
@@ -229,29 +229,16 @@ done:
        spin_unlock_bh(&nn->reconfig_lock);
 }
 
-/**
- * nfp_net_reconfig() - Reconfigure the firmware
- * @nn:      NFP Net device to reconfigure
- * @update:  The value for the update field in the BAR config
- *
- * Write the update word to the BAR and ping the reconfig queue.  The
- * poll until the firmware has acknowledged the update by zeroing the
- * update word.
- *
- * Return: Negative errno on error, 0 on success
- */
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
 {
        bool cancelled_timer = false;
        u32 pre_posted_requests;
-       int ret;
 
        spin_lock_bh(&nn->reconfig_lock);
 
        nn->reconfig_sync_present = true;
 
        if (nn->reconfig_timer_active) {
-               del_timer(&nn->reconfig_timer);
                nn->reconfig_timer_active = false;
                cancelled_timer = true;
        }
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
 
        spin_unlock_bh(&nn->reconfig_lock);
 
-       if (cancelled_timer)
+       if (cancelled_timer) {
+               del_timer_sync(&nn->reconfig_timer);
                nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+       }
 
        /* Run the posted reconfigs which were issued before we started */
        if (pre_posted_requests) {
                nfp_net_reconfig_start(nn, pre_posted_requests);
                nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
        }
+}
+
+static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
+{
+       nfp_net_reconfig_sync_enter(nn);
+
+       spin_lock_bh(&nn->reconfig_lock);
+       nn->reconfig_sync_present = false;
+       spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig() - Reconfigure the firmware
+ * @nn:      NFP Net device to reconfigure
+ * @update:  The value for the update field in the BAR config
+ *
+ * Write the update word to the BAR and ping the reconfig queue.  The
+ * poll until the firmware has acknowledged the update by zeroing the
+ * update word.
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+       int ret;
+
+       nfp_net_reconfig_sync_enter(nn);
 
        nfp_net_reconfig_start(nn, update);
        ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
  */
 void nfp_net_free(struct nfp_net *nn)
 {
+       WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
        if (nn->dp.netdev)
                free_netdev(nn->dp.netdev);
        else
@@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn)
                return;
 
        unregister_netdev(nn->dp.netdev);
+       nfp_net_reconfig_wait_posted(nn);
 }
index 0efa977c422dd5a32a8a241a5c32b6fc2cd71664..b08d51bf7a20482cece8df7c95e6e861cfbffe4f 100644 (file)
@@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8161), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
+       { PCI_DEVICE(PCI_VENDOR_ID_NCUBE,       0x8168), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
        { PCI_VENDOR_ID_DLINK,                  0x4300,
                PCI_VENDOR_ID_DLINK, 0x4b10,             0, 0, RTL_CFG_1 },
@@ -4522,7 +4523,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        rtl_hw_reset(tp);
 }
 
-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
        /* Set DMA burst size and Interframe Gap Time */
        RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
@@ -4633,12 +4634,14 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
 
        rtl_set_rx_max_size(tp);
        rtl_set_rx_tx_desc_registers(tp);
-       rtl_set_rx_tx_config_registers(tp);
+       rtl_set_tx_config_registers(tp);
        RTL_W8(tp, Cfg9346, Cfg9346_Lock);
 
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
        RTL_R8(tp, IntrMask);
        RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+       rtl_init_rxcfg(tp);
+
        rtl_set_rx_mode(tp->dev);
        /* no early-rx interrupts */
        RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
index ad4433d592377a9ee2b65753cf61f02735f90518..f27a0dc8c56331db3f7063c251333b04716f0365 100644 (file)
@@ -798,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
        .magic          = 1,
        .cexcr          = 1,
 };
+
+/* R7S9210 */
+static struct sh_eth_cpu_data r7s9210_data = {
+       .soft_reset     = sh_eth_soft_reset,
+
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate_rcar,
+
+       .register_type  = SH_ETH_REG_FAST_SH4,
+
+       .edtrr_trns     = EDTRR_TRNS_ETHER,
+       .ecsr_value     = ECSR_ICD,
+       .ecsipr_value   = ECSIPR_ICDIP,
+       .eesipr_value   = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
+                         EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
+                         EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
+                         EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
+                         EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
+                         EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
+
+       .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
+
+       .fdr_value      = 0x0000070f,
+
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .hw_swap        = 1,
+       .rpadir         = 1,
+       .no_ade         = 1,
+       .xdfar_rw       = 1,
+};
 #endif /* CONFIG_OF */
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -3121,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
        { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
        { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
        { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+       { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
        { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
        { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
        { }
index bf4acebb6bcddd8807041a75c000251bbc28f19d..324049eebb9b140a1ca9a2c65de1eaf82048d263 100644 (file)
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
 
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
-       default ARCH_SOCFPGA
+       default (ARCH_SOCFPGA || ARCH_STRATIX10)
        depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
index 76649adf8fb0639e66094cb29f83ed58f124bf2c..c0a855b7ab3b4304a0ba734117ee63d903701df8 100644 (file)
@@ -112,7 +112,6 @@ struct stmmac_priv {
        u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
-       bool tx_timer_armed;
 
        int tx_coalesce;
        int hwts_tx_en;
index ff1ffb46198a7614bb282e992c8e0a3f58052893..9f458bb16f2a6edb6ab8cca33c00176b1cce1d67 100644 (file)
@@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
         * element in case of no SG.
         */
        priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
-           !priv->tx_timer_armed) {
+       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
                mod_timer(&priv->txtimer,
                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
-               priv->tx_timer_armed = true;
        } else {
                priv->tx_count_frames = 0;
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
-               priv->tx_timer_armed = false;
        }
 
        skb_tx_timestamp(skb);
index 0c1adad7415da7d9b858925d0ec5715e9ca7dfec..396e1cd1066796b815cedf4ab172215aaf238f33 100644 (file)
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
        struct device_node *node;
        struct cpsw_phy_sel_priv *priv;
 
-       node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+       node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
        if (!node) {
-               dev_err(dev, "Phy mode driver DT not found\n");
-               return;
+               node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+               if (!node) {
+                       dev_err(dev, "Phy mode driver DT not found\n");
+                       return;
+               }
        }
 
        dev = bus_find_device(&platform_bus_type, NULL, node, match);
index 1121a1ec407cd0951e938572fd27c4fa93ca3c85..70921bbe0e28b448256d18cd8897eba9ae731c31 100644 (file)
@@ -2206,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       /* We must get rtnl lock before scheduling nvdev->subchan_work,
+        * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+        * all subchannels to show up, but that may not happen because
+        * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
+        * -> ... -> device_add() -> ... -> __device_attach() can't get
+        * the device lock, so all the subchannels can't be processed --
+        * finally netvsc_subchan_work() hangs for ever.
+        */
+       rtnl_lock();
+
        if (nvdev->num_chn > 1)
                schedule_work(&nvdev->subchan_work);
 
@@ -2224,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev,
        else
                net->max_mtu = ETH_DATA_LEN;
 
-       rtnl_lock();
        ret = register_netdevice(net);
        if (ret != 0) {
                pr_err("Unable to register netdev.\n");
index 4637d980310e1503fc314004d18906ee2c1ac50f..52fffb98fde9ac3fd05c7f6fd8e5dc123ecae341 100644 (file)
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
        switch (type) {
        case hwmon_temp:
                switch (attr) {
-               case hwmon_temp_input:
                case hwmon_temp_min_alarm:
                case hwmon_temp_max_alarm:
                case hwmon_temp_lcrit_alarm:
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_temp_max:
                case hwmon_temp_lcrit:
                case hwmon_temp_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_temp_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_in:
                switch (attr) {
-               case hwmon_in_input:
                case hwmon_in_min_alarm:
                case hwmon_in_max_alarm:
                case hwmon_in_lcrit_alarm:
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_in_max:
                case hwmon_in_lcrit:
                case hwmon_in_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_in_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_curr:
                switch (attr) {
-               case hwmon_curr_input:
                case hwmon_curr_min_alarm:
                case hwmon_curr_max_alarm:
                case hwmon_curr_lcrit_alarm:
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_curr_max:
                case hwmon_curr_lcrit:
                case hwmon_curr_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_curr_input:
                        return 0444;
                default:
                        return 0;
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                    channel == 1)
                        return 0;
                switch (attr) {
-               case hwmon_power_input:
                case hwmon_power_min_alarm:
                case hwmon_power_max_alarm:
                case hwmon_power_lcrit_alarm:
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_power_max:
                case hwmon_power_lcrit:
                case hwmon_power_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_power_input:
                        return 0444;
                default:
                        return 0;
index b4c3a957c102d2463fde9d4987dcdf3764e2a694..73969dbeb5c5ff390709dea24cbd61da794d9a3a 100644 (file)
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd, *copy_rd;
-       int size_of_regd, regd_to_copy, wmms_to_copy;
-       int size_of_wmms = 0;
+       int size_of_regd, regd_to_copy;
        struct ieee80211_reg_rule *rule;
-       struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
        struct regdb_ptrs *regdb_ptrs;
        enum nl80211_band band;
        int center_freq, prev_center_freq = 0;
-       int valid_rules = 0, n_wmms = 0;
-       int i;
+       int valid_rules = 0;
        bool new_rule;
        int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
                         IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                sizeof(struct ieee80211_regdomain) +
                num_of_ch * sizeof(struct ieee80211_reg_rule);
 
-       if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
-               size_of_wmms =
-                       num_of_ch * sizeof(struct ieee80211_wmm_rule);
-
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd->alpha2[0] = fw_mcc >> 8;
        regd->alpha2[1] = fw_mcc & 0xff;
 
-       wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
                band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                    band == NL80211_BAND_2GHZ)
                        continue;
 
-               if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
-                                        &regdb_ptrs[n_wmms].token, wmm_rule)) {
-                       /* Add only new rules */
-                       for (i = 0; i < n_wmms; i++) {
-                               if (regdb_ptrs[i].token ==
-                                   regdb_ptrs[n_wmms].token) {
-                                       rule->wmm_rule = regdb_ptrs[i].rule;
-                                       break;
-                               }
-                       }
-                       if (i == n_wmms) {
-                               rule->wmm_rule = wmm_rule;
-                               regdb_ptrs[n_wmms++].rule = wmm_rule;
-                               wmm_rule++;
-                       }
-               }
+               reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
        }
 
        regd->n_reg_rules = valid_rules;
-       regd->n_wmm_rules = n_wmms;
 
        /*
         * Narrow down regdom for unused regulatory rules to prevent hole
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd_to_copy = sizeof(struct ieee80211_regdomain) +
                valid_rules * sizeof(struct ieee80211_reg_rule);
 
-       wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
-
-       copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
+       copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
        if (!copy_rd) {
                copy_rd = ERR_PTR(-ENOMEM);
                goto out;
        }
 
        memcpy(copy_rd, regd, regd_to_copy);
-       memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
-              wmms_to_copy);
-
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
-       for (i = 0; i < regd->n_reg_rules; i++) {
-               if (!regd->reg_rules[i].wmm_rule)
-                       continue;
-
-               copy_rd->reg_rules[i].wmm_rule = d_wmm +
-                       (regd->reg_rules[i].wmm_rule - s_wmm);
-       }
 
 out:
        kfree(regdb_ptrs);
index 998dfac0fcff359d3727fb5df313e257484378b6..1068757ec42e4784942e69c00161ae4e1ee16548 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <linux/rhashtable.h>
+#include <linux/nospec.h>
 #include "mac80211_hwsim.h"
 
 #define WARN_QUEUE 100
@@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                IEEE80211_VHT_CAP_SHORT_GI_80 |
                                IEEE80211_VHT_CAP_SHORT_GI_160 |
                                IEEE80211_VHT_CAP_TXSTBC |
-                               IEEE80211_VHT_CAP_RXSTBC_1 |
-                               IEEE80211_VHT_CAP_RXSTBC_2 |
-                               IEEE80211_VHT_CAP_RXSTBC_3 |
                                IEEE80211_VHT_CAP_RXSTBC_4 |
                                IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
                        sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
        if (info->attrs[HWSIM_ATTR_CHANNELS])
                param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
 
+       if (param.channels < 1) {
+               GENL_SET_ERR_MSG(info, "must have at least one channel");
+               return -EINVAL;
+       }
+
        if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
                GENL_SET_ERR_MSG(info, "too many channels specified");
                return -EINVAL;
@@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        kfree(hwname);
                        return -EINVAL;
                }
+
+               idx = array_index_nospec(idx,
+                                        ARRAY_SIZE(hwsim_world_regdom_custom));
                param.regd = hwsim_world_regdom_custom[idx];
        }
 
index 7ba90c290a428186322cc0128ae4a7afbae87748..6c59673933e90817dea1023f3e200c486f82fdde 100644 (file)
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
        if (!dev)
                goto err_clear_flag;
 
+       /* AMBA devices only support a single DMA mask */
+       dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+
        /* setup generic device info */
        dev->dev.of_node = of_node_get(node);
        dev->dev.fwnode = &node->fwnode;
index 8fc851a9e1162a5695c1a344445b3fad7685f4dc..7c097006c54db679c40ebd51ca5efcc8d8dc297d 100644 (file)
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT
        default y
        depends on SCSI
        ---help---
-         This option enables the new blk-mq based I/O path for SCSI
-         devices by default.  With the option the scsi_mod.use_blk_mq
-         module/boot option defaults to Y, without it to N, but it can
-         still be overridden either way.
+         This option enables the blk-mq based I/O path for SCSI devices by
+         default.  With this option the scsi_mod.use_blk_mq module/boot
+         option defaults to Y, without it to N, but it can still be
+         overridden either way.
 
-         If unsure say N.
+         If unsure say Y.
 
 config SCSI_PROC_FS
        bool "legacy /proc/scsi/ support"
index 29bf1e60f5428ab4542cf0bd838342bd8d405f8b..39eb415987fc9fd901ba9ee157dedfcf76910829 100644 (file)
@@ -1346,7 +1346,7 @@ struct fib {
 struct aac_hba_map_info {
        __le32  rmw_nexus;              /* nexus for native HBA devices */
        u8              devtype;        /* device type */
-       u8              reset_state;    /* 0 - no reset, 1..x - */
+       s8              reset_state;    /* 0 - no reset, 1..x - */
                                        /* after xth TM LUN reset */
        u16             qd_limit;
        u32             scan_counter;
index 23d07e9f87d0d1e0357547c4f168ab966a0a809e..e5192388647580903b45f885c7995a5b8bbe4bbb 100644 (file)
@@ -1601,6 +1601,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
        return caps32;
 }
 
+/**
+ *     fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ *     @caps32: a 32-bit Port Capabilities value
+ *
+ *     Returns the equivalent 16-bit Port Capabilities value.  Note that
+ *     not all 32-bit Port Capabilities can be represented in the 16-bit
+ *     Port Capabilities and some fields/values may not make it.
+ */
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+       fw_port_cap16_t caps16 = 0;
+
+       #define CAP32_TO_CAP16(__cap) \
+               do { \
+                       if (caps32 & FW_PORT_CAP32_##__cap) \
+                               caps16 |= FW_PORT_CAP_##__cap; \
+               } while (0)
+
+       CAP32_TO_CAP16(SPEED_100M);
+       CAP32_TO_CAP16(SPEED_1G);
+       CAP32_TO_CAP16(SPEED_10G);
+       CAP32_TO_CAP16(SPEED_25G);
+       CAP32_TO_CAP16(SPEED_40G);
+       CAP32_TO_CAP16(SPEED_100G);
+       CAP32_TO_CAP16(FC_RX);
+       CAP32_TO_CAP16(FC_TX);
+       CAP32_TO_CAP16(802_3_PAUSE);
+       CAP32_TO_CAP16(802_3_ASM_DIR);
+       CAP32_TO_CAP16(ANEG);
+       CAP32_TO_CAP16(FORCE_PAUSE);
+       CAP32_TO_CAP16(MDIAUTO);
+       CAP32_TO_CAP16(MDISTRAIGHT);
+       CAP32_TO_CAP16(FEC_RS);
+       CAP32_TO_CAP16(FEC_BASER_RS);
+
+       #undef CAP32_TO_CAP16
+
+       return caps16;
+}
+
 /**
  *      lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
  *      @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw)
                        val = 1;
 
                        csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                                      hw->pfn, 0, 1, &param, &val, false,
+                                      hw->pfn, 0, 1, &param, &val, true,
                                       NULL);
 
                        if (csio_mb_issue(hw, mbp)) {
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw)
                                return -EINVAL;
                        }
 
-                       csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,
-                                                       &val);
-                       if (retval != FW_SUCCESS) {
-                               csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
-                                        portid, retval);
-                               mempool_free(mbp, hw->mb_mempool);
-                               return -EINVAL;
-                       }
-
-                       fw_caps = val;
+                       csio_mb_process_read_params_rsp(hw, mbp, &retval,
+                                                       0, NULL);
+                       fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
                }
 
                /* Read PORT information */
@@ -2364,8 +2397,8 @@ bye:
 }
 
 /*
- * Returns -EINVAL if attempts to flash the firmware failed
- * else returns 0,
+ * Returns -EINVAL if attempts to flash the firmware failed,
+ * -ENOMEM if memory allocation failed else returns 0,
  * if flashing was not attempted because the card had the
  * latest firmware ECANCELED is returned
  */
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                return -EINVAL;
        }
 
+       /* allocate memory to read the header of the firmware on the
+        * card
+        */
+       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
+       if (!card_fw)
+               return -ENOMEM;
+
        if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
                fw_bin_file = FW_FNAME_T5;
        else
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                fw_size = fw->size;
        }
 
-       /* allocate memory to read the header of the firmware on the
-        * card
-        */
-       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
-
        /* upgrade FW logic */
        ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
                         hw->fw_state, reset);
index 9e73ef771eb739a8158bedc3d1d4d86b512e0749..e351af6e7c81c1d5fca398425dca662a49493981 100644 (file)
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int,
 
 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32);
 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
 
 int csio_hw_start(struct csio_hw *);
index c026417269c3c9f191ff948f1a0a3912d87052d8..6f13673d6aa054e4b2e71ec240215b3ad35dc6ee 100644 (file)
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
                        FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
 
        if (fw_caps == FW_CAPS16)
-               cmdp->u.l1cfg.rcap = cpu_to_be32(fc);
+               cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
        else
                cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
 }
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
                        *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
                        *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
                } else {
-                       *pcaps = ntohs(rsp->u.info32.pcaps32);
-                       *acaps = ntohs(rsp->u.info32.acaps32);
+                       *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
+                       *acaps = be32_to_cpu(rsp->u.info32.acaps32);
                }
        }
 }
index f02dcc875a09a6c6ece63e89e8256c512e11f01e..ea4b0bb0c1cd4e6b12748256741afae339311591 100644 (file)
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(scsi_host_get);
 
-struct scsi_host_mq_in_flight {
-       int cnt;
-};
-
-static void scsi_host_check_in_flight(struct request *rq, void *data,
-               bool reserved)
-{
-       struct scsi_host_mq_in_flight *in_flight = data;
-
-       if (blk_mq_request_started(rq))
-               in_flight->cnt++;
-}
-
 /**
  * scsi_host_busy - Return the host busy counter
  * @shost:     Pointer to Scsi_Host to inc.
  **/
 int scsi_host_busy(struct Scsi_Host *shost)
 {
-       struct scsi_host_mq_in_flight in_flight = {
-               .cnt = 0,
-       };
-
-       if (!shost->use_blk_mq)
-               return atomic_read(&shost->host_busy);
-
-       blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
-                       &in_flight);
-       return in_flight.cnt;
+       return atomic_read(&shost->host_busy);
 }
 EXPORT_SYMBOL(scsi_host_busy);
 
index 58bb70b886d70d714ee6b448843aa113c2b313c0..c120929d4ffe52f3f24664226b7120b9057a2a1d 100644 (file)
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = {
 #endif
        .sdev_attrs = hpsa_sdev_attrs,
        .shost_attrs = hpsa_shost_attrs,
-       .max_sectors = 1024,
+       .max_sectors = 2048,
        .no_write_same = 1,
 };
 
index e0d0da5f43d6114e548974062d8a3118fdf384c3..43732e8d13473f84b88945072d7156d7a2982279 100644 (file)
@@ -672,7 +672,7 @@ struct lpfc_hba {
 #define LS_NPIV_FAB_SUPPORTED 0x2      /* Fabric supports NPIV */
 #define LS_IGNORE_ERATT       0x4      /* intr handler should ignore ERATT */
 #define LS_MDS_LINK_DOWN      0x8      /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK      0x16      /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK      0x10      /* MDS Diagnostics Link Up (Loopback) */
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
index 5a25553415f8bbb922883e9bbc8af18daebdda52..057a60abe664d269af64a2e85a4cea2faf740cfc 100644 (file)
@@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
 
 /*
 # lpfc_fdmi_on: Controls FDMI support.
-#       0       No FDMI support (default)
-#       1       Traditional FDMI support
+#       0       No FDMI support
+#       1       Traditional FDMI support (default)
 # Traditional FDMI support means the driver will assume FDMI-2 support;
 # however, if that fails, it will fallback to FDMI-1.
 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
 # lpfc_fdmi_on.
-# Value range [0,1]. Default value is 0.
+# Value range [0,1]. Default value is 1.
 */
-LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
+LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
 
 /*
 # Specifies the maximum number of ELS cmds we can have outstanding (for
index 0adfb3bce0fd677ddae9f9d806472ccf2077ce25..eb97d2dd36516d0a3a5c0a9db1f255aaf5a9c56f 100644 (file)
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
        unsigned long flags;
 
        rcu_read_lock();
-       if (!shost->use_blk_mq)
-               atomic_dec(&shost->host_busy);
+       atomic_dec(&shost->host_busy);
        if (unlikely(scsi_host_in_recovery(shost))) {
                spin_lock_irqsave(shost->host_lock, flags);
                if (shost->host_failed || shost->host_eh_scheduled)
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
 
 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 {
-       /*
-        * blk-mq can handle host queue busy efficiently via host-wide driver
-        * tag allocation
-        */
-
-       if (!shost->use_blk_mq && shost->can_queue > 0 &&
+       if (shost->can_queue > 0 &&
            atomic_read(&shost->host_busy) >= shost->can_queue)
                return true;
        if (atomic_read(&shost->host_blocked) > 0)
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
        if (scsi_host_in_recovery(shost))
                return 0;
 
-       if (!shost->use_blk_mq)
-               busy = atomic_inc_return(&shost->host_busy) - 1;
-       else
-               busy = 0;
+       busy = atomic_inc_return(&shost->host_busy) - 1;
        if (atomic_read(&shost->host_blocked) > 0) {
                if (busy)
                        goto starved;
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
                                     "unblocking host at zero depth\n"));
        }
 
-       if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
+       if (shost->can_queue > 0 && busy >= shost->can_queue)
                goto starved;
        if (shost->host_self_blocked)
                goto starved;
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
         * with the locks as normal issue path does.
         */
        atomic_inc(&sdev->device_busy);
-
-       if (!shost->use_blk_mq)
-               atomic_inc(&shost->host_busy);
+       atomic_inc(&shost->host_busy);
        if (starget->can_queue > 0)
                atomic_inc(&starget->target_busy);
 
index 768cce0ccb807518f32b3d72d875915e9391fdfd..76a262674c8dc7c4563fba8f6b570b6155d2d182 100644 (file)
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
        ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
        sgl->offset = sg_offset;
        if (!ret) {
-               pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
-                       __func__, 0, xferlen, sgcnt);
+               pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+                        __func__, 0, xferlen, sgcnt);
                goto rel_ppods;
        }
 
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
        if (ret < 0) {
-               pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
-                       csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+               pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+                        csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
 
                ttinfo->sgl = NULL;
                ttinfo->nents = 0;
index 0c3285c8db95b4ec6457fdfe759e1997c9c69a7f..476dcbb79713d20d12023dfb265a0ef62e48e6fc 100644 (file)
@@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
                goto inval;
 
        args = strchr(name, ' ');
-       if (!args)
-               goto inval;
-       do {
-               *args++ = 0;
-       } while(*args == ' ');
-       if (!*args)
-               goto inval;
+       if (args) {
+               do {
+                       *args++ = 0;
+               } while(*args == ' ');
+               if (!*args)
+                       goto inval;
+       }
 
        /* determine command to perform */
        _debug("cmd=%s name=%s args=%s", buf, name, args);
@@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
 
                if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
                        afs_put_cell(net, cell);
-               printk("kAFS: Added new cell '%s'\n", name);
        } else {
                goto inval;
        }
index 53af9f5253f4fc5556e568b9b78397bba61e00cc..2cddfe7806a412e4ecf26a61887fe4474ab380de 100644 (file)
@@ -1280,6 +1280,7 @@ struct btrfs_root {
        int send_in_progress;
        struct btrfs_subvolume_writers *subv_writers;
        atomic_t will_be_snapshotted;
+       atomic_t snapshot_force_cow;
 
        /* For qgroup metadata reserved space */
        spinlock_t qgroup_meta_rsv_lock;
@@ -3390,9 +3391,9 @@ do {                                                                      \
 #define btrfs_debug(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #endif
@@ -3404,6 +3405,13 @@ do {                                                     \
        rcu_read_unlock();                              \
 } while (0)
 
+#define btrfs_no_printk_in_rcu(fs_info, fmt, args...)  \
+do {                                                   \
+       rcu_read_lock();                                \
+       btrfs_no_printk(fs_info, fmt, ##args);          \
+       rcu_read_unlock();                              \
+} while (0)
+
 #define btrfs_printk_ratelimited(fs_info, fmt, args...)                \
 do {                                                           \
        static DEFINE_RATELIMIT_STATE(_rs,                      \
index 5124c15705ce777ef88950529343310c389686df..05dc3c17cb62aa38dc7a18adc886475ac22fd80b 100644 (file)
@@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        atomic_set(&root->log_batch, 0);
        refcount_set(&root->refs, 1);
        atomic_set(&root->will_be_snapshotted, 0);
+       atomic_set(&root->snapshot_force_cow, 0);
        root->log_transid = 0;
        root->log_transid_committed = -1;
        root->last_log_commit = 0;
index de6f75f5547bdb159cf0764e5d4bfaf19a2c1085..2d9074295d7f0bb327010feeccffb1630c455477 100644 (file)
@@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * root: the root of the parent directory
  * rsv: block reservation
  * items: the number of items that we need do reservation
- * qgroup_reserved: used to return the reserved size in qgroup
+ * use_global_rsv: allow fallback to the global block reservation
  *
  * This function is used to reserve the space for snapshot/subvolume
  * creation and deletion. Those operations are different with the
@@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * the space reservation mechanism in start_transaction().
  */
 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
-                                    struct btrfs_block_rsv *rsv,
-                                    int items,
+                                    struct btrfs_block_rsv *rsv, int items,
                                     bool use_global_rsv)
 {
+       u64 qgroup_num_bytes = 0;
        u64 num_bytes;
        int ret;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 
        if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
                /* One for parent inode, two for dir entries */
-               num_bytes = 3 * fs_info->nodesize;
-               ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+               qgroup_num_bytes = 3 * fs_info->nodesize;
+               ret = btrfs_qgroup_reserve_meta_prealloc(root,
+                               qgroup_num_bytes, true);
                if (ret)
                        return ret;
-       } else {
-               num_bytes = 0;
        }
 
        num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
@@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
        if (ret == -ENOSPC && use_global_rsv)
                ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
 
-       if (ret && num_bytes)
-               btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+       if (ret && qgroup_num_bytes)
+               btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
 
        return ret;
 }
index 9357a19d2bff2c76de1b29d2e221333b6f76da7e..3ea5339603cff14f8dc103f6561091e83dc0b81a 100644 (file)
@@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        u64 disk_num_bytes;
        u64 ram_bytes;
        int extent_type;
-       int ret, err;
+       int ret;
        int type;
        int nocow;
        int check_prev = 1;
@@ -1403,11 +1403,8 @@ next_slot:
                         * if there are pending snapshots for this root,
                         * we fall into common COW way.
                         */
-                       if (!nolock) {
-                               err = btrfs_start_write_no_snapshotting(root);
-                               if (!err)
-                                       goto out_check;
-                       }
+                       if (!nolock && atomic_read(&root->snapshot_force_cow))
+                               goto out_check;
                        /*
                         * force cow if csum exists in the range.
                         * this ensure that csum for a given extent are
@@ -1416,9 +1413,6 @@ next_slot:
                        ret = csum_exist_in_range(fs_info, disk_bytenr,
                                                  num_bytes);
                        if (ret) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
-
                                /*
                                 * ret could be -EIO if the above fails to read
                                 * metadata.
@@ -1431,11 +1425,8 @@ next_slot:
                                WARN_ON_ONCE(nolock);
                                goto out_check;
                        }
-                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
+                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
                                goto out_check;
-                       }
                        nocow = 1;
                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
                        extent_end = found_key.offset +
@@ -1448,8 +1439,6 @@ next_slot:
 out_check:
                if (extent_end <= start) {
                        path->slots[0]++;
-                       if (!nolock && nocow)
-                               btrfs_end_write_no_snapshotting(root);
                        if (nocow)
                                btrfs_dec_nocow_writers(fs_info, disk_bytenr);
                        goto next_slot;
@@ -1471,8 +1460,6 @@ out_check:
                                             end, page_started, nr_written, 1,
                                             NULL);
                        if (ret) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1492,8 +1479,6 @@ out_check:
                                          ram_bytes, BTRFS_COMPRESS_NONE,
                                          BTRFS_ORDERED_PREALLOC);
                        if (IS_ERR(em)) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1532,8 +1517,6 @@ out_check:
                                             EXTENT_CLEAR_DATA_RESV,
                                             PAGE_UNLOCK | PAGE_SET_PRIVATE2);
 
-               if (!nolock && nocow)
-                       btrfs_end_write_no_snapshotting(root);
                cur_offset = extent_end;
 
                /*
@@ -6639,6 +6622,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                drop_inode = 1;
        } else {
                struct dentry *parent = dentry->d_parent;
+               int ret;
+
                err = btrfs_update_inode(trans, root, inode);
                if (err)
                        goto fail;
@@ -6652,7 +6637,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                                goto fail;
                }
                d_instantiate(dentry, inode);
-               btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
+                                        true, NULL);
+               if (ret == BTRFS_NEED_TRANS_COMMIT) {
+                       err = btrfs_commit_transaction(trans);
+                       trans = NULL;
+               }
        }
 
 fail:
@@ -9388,14 +9378,21 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
-       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       struct btrfs_log_ctx ctx_root;
+       struct btrfs_log_ctx ctx_dest;
+       bool sync_log_root = false;
+       bool sync_log_dest = false;
+       bool commit_transaction = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
                return -EXDEV;
 
+       btrfs_init_log_ctx(&ctx_root, old_inode);
+       btrfs_init_log_ctx(&ctx_dest, new_inode);
+
        /* close the race window with snapshot create/destroy ioctl */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&fs_info->subvol_sem);
@@ -9542,15 +9539,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        if (root_log_pinned) {
                parent = new_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx_root);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log_root = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                root_log_pinned = false;
        }
        if (dest_log_pinned) {
-               parent = old_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
-                               parent);
+               if (!commit_transaction) {
+                       parent = old_dentry->d_parent;
+                       ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
+                                                BTRFS_I(new_dir), parent,
+                                                false, &ctx_dest);
+                       if (ret == BTRFS_NEED_LOG_SYNC)
+                               sync_log_dest = true;
+                       else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                               commit_transaction = true;
+                       ret = 0;
+               }
                btrfs_end_log_trans(dest);
                dest_log_pinned = false;
        }
@@ -9583,8 +9594,26 @@ out_fail:
                        dest_log_pinned = false;
                }
        }
-       ret2 = btrfs_end_transaction(trans);
-       ret = ret ? ret : ret2;
+       if (!ret && sync_log_root && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
+                                    &ctx_root);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (!ret && sync_log_dest && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
+                                    &ctx_dest);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
@@ -9661,6 +9690,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        int ret;
        u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
        bool log_pinned = false;
+       struct btrfs_log_ctx ctx;
+       bool sync_log = false;
+       bool commit_transaction = false;
 
        if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return -EPERM;
@@ -9818,8 +9850,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (log_pinned) {
                struct dentry *parent = new_dentry->d_parent;
 
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               btrfs_init_log_ctx(&ctx, old_inode);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
@@ -9856,7 +9895,19 @@ out_fail:
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
-       btrfs_end_transaction(trans);
+       if (!ret && sync_log) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
index 63600dc2ac4cb104d0feb1df1d5b8ccc650d1091..d60b6caf09e857ef7c39e8ce337171d04cca1d72 100644 (file)
@@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        struct btrfs_pending_snapshot *pending_snapshot;
        struct btrfs_trans_handle *trans;
        int ret;
+       bool snapshot_force_cow = false;
 
        if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
                return -EINVAL;
@@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
                goto free_pending;
        }
 
+       /*
+        * Force new buffered writes to reserve space even when NOCOW is
+        * possible. This is to avoid later writeback (running dealloc) to
+        * fallback to COW mode and unexpectedly fail with ENOSPC.
+        */
        atomic_inc(&root->will_be_snapshotted);
        smp_mb__after_atomic();
        /* wait for no snapshot writes */
@@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                goto dec_and_free;
 
+       /*
+        * All previous writes have started writeback in NOCOW mode, so now
+        * we force future writes to fallback to COW mode during snapshot
+        * creation.
+        */
+       atomic_inc(&root->snapshot_force_cow);
+       snapshot_force_cow = true;
+
        btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
 
        btrfs_init_block_rsv(&pending_snapshot->block_rsv,
@@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 fail:
        btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
 dec_and_free:
+       if (snapshot_force_cow)
+               atomic_dec(&root->snapshot_force_cow);
        if (atomic_dec_and_test(&root->will_be_snapshotted))
                wake_up_var(&root->will_be_snapshotted);
 free_pending:
@@ -3453,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
 
                same_lock_start = min_t(u64, loff, dst_loff);
                same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+       } else {
+               /*
+                * If the source and destination inodes are different, the
+                * source's range end offset matches the source's i_size, that
+                * i_size is not a multiple of the sector size, and the
+                * destination range does not go past the destination's i_size,
+                * we must round down the length to the nearest sector size
+                * multiple. If we don't do this adjustment we end replacing
+                * with zeroes the bytes in the range that starts at the
+                * deduplication range's end offset and ends at the next sector
+                * size multiple.
+                */
+               if (loff + olen == i_size_read(src) &&
+                   dst_loff + len < i_size_read(dst)) {
+                       const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
+
+                       len = round_down(i_size_read(src), sz) - loff;
+                       olen = len;
+               }
        }
 
 again:
index 4353bb69bb8672267286562c1bdd629fecca4e9b..d4917c0cddf57f11b6fbb600d73e629fcbd14093 100644 (file)
@@ -1019,10 +1019,9 @@ out_add_root:
        spin_unlock(&fs_info->qgroup_lock);
 
        ret = btrfs_commit_transaction(trans);
-       if (ret) {
-               trans = NULL;
+       trans = NULL;
+       if (ret)
                goto out_free_path;
-       }
 
        ret = qgroup_rescan_init(fs_info, 0, 1);
        if (!ret) {
index 1650dc44a5e37e483efebcf1a9e84117b0a1ed6a..3c2ae0e4f25a8de78040f6ca0ca94d070f158dd2 100644 (file)
@@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
  * Call this after adding a new name for a file and it will properly
  * update the log to reflect the new name.
  *
- * It will return zero if all goes well, and it will return 1 if a
- * full transaction commit is required.
+ * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
+ * true (because it's not used).
+ *
+ * Return value depends on whether @sync_log is true or false.
+ * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *            committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
+ *            otherwise.
+ * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
+ *             to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
+ *             or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *             committed (without attempting to sync the log).
  */
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent)
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
+       int ret;
 
        /*
         * this will force the logging code to walk the dentry chain
@@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
         */
        if (inode->logged_trans <= fs_info->last_trans_committed &&
            (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
-               return 0;
+               return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
+                       BTRFS_DONT_NEED_LOG_SYNC;
+
+       if (sync_log) {
+               struct btrfs_log_ctx ctx2;
+
+               btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
+               ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                            LOG_INODE_EXISTS, &ctx2);
+               if (ret == BTRFS_NO_LOG_SYNC)
+                       return BTRFS_DONT_NEED_TRANS_COMMIT;
+               else if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+
+               ret = btrfs_sync_log(trans, inode->root, &ctx2);
+               if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+               return BTRFS_DONT_NEED_TRANS_COMMIT;
+       }
+
+       ASSERT(ctx);
+       ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                    LOG_INODE_EXISTS, ctx);
+       if (ret == BTRFS_NO_LOG_SYNC)
+               return BTRFS_DONT_NEED_LOG_SYNC;
+       else if (ret)
+               return BTRFS_NEED_TRANS_COMMIT;
 
-       return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
-                                     LOG_INODE_EXISTS, NULL);
+       return BTRFS_NEED_LOG_SYNC;
 }
 
index 122e68b89a5ade4d64dcaf7ce887980c17fc9d5e..7ab9bb88a63935664a3d0dc556987f8f08f1561e 100644 (file)
@@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
                             int for_rename);
 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
                                   struct btrfs_inode *dir);
+/* Return values for btrfs_log_new_name() */
+enum {
+       BTRFS_DONT_NEED_TRANS_COMMIT,
+       BTRFS_NEED_TRANS_COMMIT,
+       BTRFS_DONT_NEED_LOG_SYNC,
+       BTRFS_NEED_LOG_SYNC,
+};
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent);
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx);
 
 #endif
index da86706123ffa4d85ce19148be2157edb9d7d3a4..f4405e430da6e003306e1a4d471731e1a13c9230 100644 (file)
@@ -4491,7 +4491,12 @@ again:
 
        /* Now btrfs_update_device() will change the on-disk size. */
        ret = btrfs_update_device(trans, device);
-       btrfs_end_transaction(trans);
+       if (ret < 0) {
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+       } else {
+               ret = btrfs_commit_transaction(trans);
+       }
 done:
        btrfs_free_path(path);
        if (ret) {
index 43ca3b763875d43c83ae43e9bd6b4b475493ac46..eab1359d05532afb6960c7acfb5c4fee2891eac9 100644 (file)
@@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
 
 /*
  * create a new fs client
+ *
+ * Success or not, this function consumes @fsopt and @opt.
  */
 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
                                        struct ceph_options *opt)
@@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
        struct ceph_fs_client *fsc;
        int page_count;
        size_t size;
-       int err = -ENOMEM;
+       int err;
 
        fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
-       if (!fsc)
-               return ERR_PTR(-ENOMEM);
+       if (!fsc) {
+               err = -ENOMEM;
+               goto fail;
+       }
 
        fsc->client = ceph_create_client(opt, fsc);
        if (IS_ERR(fsc->client)) {
                err = PTR_ERR(fsc->client);
                goto fail;
        }
+       opt = NULL; /* fsc->client now owns this */
 
        fsc->client->extra_mon_dispatch = extra_mon_dispatch;
        fsc->client->osdc.abort_on_full = true;
@@ -677,6 +682,9 @@ fail_client:
        ceph_destroy_client(fsc->client);
 fail:
        kfree(fsc);
+       if (opt)
+               ceph_destroy_options(opt);
+       destroy_mount_options(fsopt);
        return ERR_PTR(err);
 }
 
@@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               destroy_mount_options(fsopt);
-               ceph_destroy_options(opt);
                goto out_final;
        }
 
index b380e0871372df09635079008c970f6c8b6933c0..a2b2355e7f019c3bac02da2a6ffc3c9786c39f57 100644 (file)
@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
        case SFM_LESSTHAN:
                *target = '<';
                break;
-       case SFM_SLASH:
-               *target = '\\';
-               break;
        case SFM_SPACE:
                *target = ' ';
                break;
index c832a8a1970aabf11001237998df6f4c58208796..7aa08dba4719cde8c88c18bcb5ee4a4ddae733f1 100644 (file)
@@ -2547,7 +2547,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
        if (tcon == NULL)
                return -ENOMEM;
 
-       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
 
        /* cannot fail */
        nls_codepage = load_nls_default();
index d32eaa4b243767a9c65f0fc19906fdc00e0e4ece..6e8765f445086d2208a567825b5fb6022e564900 100644 (file)
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_READ;
        oparms.create_options = CREATE_NOT_DIR;
+       if (backup_cred(cifs_sb))
+               oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
        oparms.disposition = FILE_OPEN;
        oparms.path = path;
        oparms.fid = &fid;
index db0453660ff6c97d9d7ab66991d9b192fadcaed7..6a9c47541c53d0983a068703106ddacdbc834e04 100644 (file)
@@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
                 * MacOS server pads after SMB2.1 write response with 3 bytes
                 * of junk. Other servers match RFC1001 len to actual
                 * SMB2/SMB3 frame length (header + smb2 response specific data)
-                * Some windows servers do too when compounding is used.
-                * Log the server error (once), but allow it and continue
+                * Some windows servers also pad up to 8 bytes when compounding.
+                * If pad is longer than eight bytes, log the server behavior
+                * (once), since may indicate a problem but allow it and continue
                 * since the frame is parseable.
                 */
                if (clc_len < len) {
-                       printk_once(KERN_WARNING
-                               "SMB2 server sent bad RFC1001 len %d not %d\n",
-                               len, clc_len);
+                       pr_warn_once(
+                            "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+                            len, clc_len, command, mid);
                        return 0;
                }
+               pr_warn_once(
+                       "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
+                       len, clc_len, command, mid);
 
                return 1;
        }
index 247a98e6c856eb79bd0f8110741efe2ce699099b..d954ce36b4734c06ca63e2fdb0343f6109d2ec57 100644 (file)
@@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_WRITE_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = fid;
        oparms.reconnect = false;
 
@@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -3639,7 +3654,7 @@ struct smb_version_values smb21_values = {
 struct smb_version_values smb3any_values = {
        .version_string = SMB3ANY_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3660,7 +3675,7 @@ struct smb_version_values smb3any_values = {
 struct smb_version_values smbdefault_values = {
        .version_string = SMBDEFAULT_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3681,7 +3696,7 @@ struct smb_version_values smbdefault_values = {
 struct smb_version_values smb30_values = {
        .version_string = SMB30_VERSION_STRING,
        .protocol_id = SMB30_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3702,7 +3717,7 @@ struct smb_version_values smb30_values = {
 struct smb_version_values smb302_values = {
        .version_string = SMB302_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3723,7 +3738,7 @@ struct smb_version_values smb302_values = {
 struct smb_version_values smb311_values = {
        .version_string = SMB311_VERSION_STRING,
        .protocol_id = SMB311_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
index 5740aa809be663547de90bf9e71ca863b74555b5..c08acfc77abcf6f5190a0ce65d110a1b2f897875 100644 (file)
@@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
        if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
            *oplock == SMB2_OPLOCK_LEVEL_NONE)
                req->RequestedOplockLevel = *oplock;
+       else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
+                 (oparms->create_options & CREATE_NOT_FILE))
+               req->RequestedOplockLevel = *oplock; /* no srv lease support */
        else {
                rc = add_lease_context(server, iov, &n_iov,
                                       oparms->fid->lease_key, oplock);
index 03b8ba933eb2a3dcc781eaf9e57594da5a40f48b..235b959fc2b3a706866fd9e96447764641d94536 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * alloc.c - NILFS dat/inode allocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 05149e606a78a8dfd8f4a701863eccddb0e4979e..0303c3968cee06d37c25fa18c9a958aa69dea27a 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 01fb1831ca250b43172c7b046a2447b590feda0e..fb5a9a8a13cf7d734c0c13f38df097786c5ba5a1 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * bmap.c - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2b6ffbe5997a2f8bc77e80621999d4f1d651e02f..2c63858e81c9ce089820e9803a7a98f6121c524b 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * bmap.h - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dec98cab729dd90fdb491e5ac770c8bf99867aec..ebb24a314f43129fce712b87ee5d44e9e38e2979 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btnode.c - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Seiji Kihara.
  * Fully revised by Ryusuke Konishi for stabilization and simplification.
  *
index 4e8aaa1aeb65db70bc1f7fcc9faae7a81f96d09c..0f88dbc9bcb3ef4536ff13a28321d432070a8413 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btnode.h - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara.
  * Revised by Ryusuke Konishi.
  */
index 16a7a67a11c9e2098dde76c6eecee10fb1838b47..23e043eca237bd2e760882f5783b28a51558ae64 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btree.c - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2184e47fa4bf6ff297feea6caa7fce3fc761d31c..d1421b646ce46bbc4afd851e7b01175f5b8d53be 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btree.h - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index a15a1601e931dab6139a9f8dcee0428f06e1f485..8d41311b5db4b45b5a1536a10c90015e9e60e01e 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * cpfile.c - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 6eca972f9673cc676e9469b848567727f1fb61b2..6336222df24a8fee683afca437db55a9c29b3e4c 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * cpfile.h - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dffedb2f88179a321b13ea3901c9fe7efd03ea22..6f4066636be9a3eba91d78852246edabdffcff0d 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dat.c - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 57dc6cf466d02d3d082fb85125b37338ebfad94c..b17ee34580ae69dc4898785552cca5c83fe56535 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * dat.h - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 582831ab3eb95dee645907bf4be2402f62087748..81394e22d0a09a1ee88ac45850e5d63e98301001 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dir.c - NILFS directory entry operations
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji.
  */
 /*
index 96e3ed0d9652b67de00717f0e9a9042b3106620d..533e24ea3a88d208e8e39bf1a14a53852accb907 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * direct.c - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index cfe85e848bba1e9e07aea1ea93dad3b3d520b342..ec9a23c77994e76954ef0921c7607c550dc91215 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * direct.h - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 7da0fac71dc26a73d33877d9498c8007b17d11e4..64bc81363c6cc0437dd4c757615af395c85b0475 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * file.c - NILFS regular file handling primitives including fsync().
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji and Ryusuke Konishi.
  */
 
index 853a831dcde0890481d45fdfadf89ff8a35b7671..aa3c328ee189c4409c1c4fe497677b2278c12553 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * gcinode.c - dummy inodes to buffer blocks for garbage collection
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
  * Revised by Ryusuke Konishi.
  *
index b8fa45c20c63fdbded97c3474ac68438529cfca6..4140d232cadc04dbf65aa6a76b4660f9d30c3537 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ifile.c - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 188b94fe0ec5fe3f313160b0c8568e269b582253..a1e1e5711a054fd2fc9472234c34dbd88163adb0 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * ifile.h - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 6a612d832e7de41f1273805fd8db2ad0f0433221..671085512e0fde9e8be3274629db21e8c0561b96 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * inode.c - NILFS inode operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 1d2c3d7711feb94dea6ce3a0a1540a41a1bef1d2..9b96d79eea6c81247380142fb9db0ef1ad0c51cb 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ioctl.c - NILFS ioctl operations.
  *
  * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index c6bc1033e7d2ccf3795bde38b18a5787e2644d1d..700870a92bc4a1a2499e2ffef381613f8bad6e1a 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * mdt.c - meta data file for NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 3f67f3932097b408fd6d7161a90e62170992a71a..e77aea4bb921c37c212558b11137022e796a5bae 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * mdt.h - NILFS meta data file prototype and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index dd52d3f82e8d673bd43dd99dfdfff6658b959357..9fe6d4ab74f01ef3d7055d4bb1ace9a73e441ad7 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * namei.c - NILFS pathname lookup operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.
  */
 /*
index 33f8c8fc96e8e772bbe163da521e7ca2e7fa65a0..a2f247b6a209ec250cb7da2c1cbede10c14cf03f 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * nilfs.h - NILFS local header file.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato and Ryusuke Konishi.
  */
 
index 4cb850a6f1c2c64794449e922c6e40f69aa585b6..329a056b73b178958e71308c35b945a0c39de834 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * page.c - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index f3687c958fa848c20a3d17cab71b5a0da77de5d1..62b9bb469e92f3e58bd2378a8da979acb8364466 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * page.h - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index 5139efed1888294a425499ceba313f3747567b56..140b663e91c7fbf826702e9a27fce936ac80333f 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * recovery.c - NILFS recovery logic
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 68cb9e4740b4e818836a56dcdc24a05d6e3e5f51..20c479b5e41b8c7a9bbd09d309cf33aa51e3a90d 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segbuf.c - NILFS segment buffer
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 10e16935fff655c914c34055d526eccc172b409b..9bea1bd59041e0e0b69a379398f6877face96b3b 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segbuf.h - NILFS Segment buffer prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 0953635e7d48e7915b35cd63c44ff1b6bfcc8e68..445eef41bfaf00a948b5c29f3041703bba369dfa 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segment.c - NILFS segment constructor.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 04634e3e3d583af75d1dd77329582c9f8bd309fc..f5cf5308f3fcad1be6f0863c560ad30e04059c59 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segment.h - NILFS Segment constructor prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index c7fa139d50e8287e481586fe177df2c03ef9e6de..bf3f8f05c89b3dbb10849111e7a0815b2a3786e5 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sufile.c - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  * Revised by Ryusuke Konishi.
  */
index 673a891350f49599e30a5666afd85973a5419522..c4e2c7a7add1d5d1d5f89948507810c468c0b403 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sufile.h - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 1b9067cf451125941c2140c94aba622b875c0dde..26290aa1023f31950017cdba5c31fb589934e701 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * super.c - NILFS module and super block management.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 /*
index 4b25837e77245902dee4a3674206adf1262c83b9..e60be7bb55b0b870e99d7844f6e3495d7d17e365 100644 (file)
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sysfs.c - sysfs support implementation.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 648cedf9c06ec75c3e783d0d5e6f5b91ae1891d9..d001eb862daece4420aacc6fa450a8a35d64d7f9 100644 (file)
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sysfs.h - sysfs support declarations.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 1a85317e83f0f751332118daf559fc9ba79fb560..484785cdf96e220525a57222190ac3fe0bca7f71 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * the_nilfs.c - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 36da1779f9766f1e32bc8f8c2de6a7d93413403f..380a543c5b19bd424782e8393ecc285af648396e 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * the_nilfs.h - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index f174397b63a046f32ca24a7be30080c4ff5fe009..ababdbfab537ef259b20079cd74d7f036e293fc7 100644 (file)
@@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
 
        iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
 
-       if ((mask & FS_MODIFY) ||
-           (test_mask & to_tell->i_fsnotify_mask)) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
-       }
-
-       if (mnt && ((mask & FS_MODIFY) ||
-                   (test_mask & mnt->mnt_fsnotify_mask))) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
+               fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       if (mnt) {
                iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
                        fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
        }
index 34aec30e06c734a47bc9806990eb816994ad2a98..6d766a19f2bbb2b62facc79ff3871aa81be68534 100644 (file)
@@ -56,6 +56,7 @@ struct blkcg {
        struct list_head                all_blkcgs_node;
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct list_head                cgwb_list;
+       refcount_t                      cgwb_refcnt;
 #endif
 };
 
@@ -89,7 +90,6 @@ struct blkg_policy_data {
        /* the blkg and policy id this per-policy data belongs to */
        struct blkcg_gq                 *blkg;
        int                             plid;
-       bool                            offline;
 };
 
 /*
@@ -387,6 +387,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
        return cpd ? cpd->blkcg : NULL;
 }
 
+extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ */
+static inline void blkcg_cgwb_get(struct blkcg *blkcg)
+{
+       refcount_inc(&blkcg->cgwb_refcnt);
+}
+
+/**
+ * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ * When this count goes to zero, all active wb has finished so the
+ * blkcg can continue destruction by calling blkcg_destroy_blkgs().
+ * This work may occur in cgwb_release_workfn() on the cgwb_release
+ * workqueue.
+ */
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+       if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
+               blkcg_destroy_blkgs(blkcg);
+}
+
+#else
+
+static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
+
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+       /* wb isn't being accounted, so trigger destruction right away */
+       blkcg_destroy_blkgs(blkcg);
+}
+
+#endif
+
 /**
  * blkg_path - format cgroup path of blkg
  * @blkg: blkg of interest
index 99d366cb0e9f5327081cea07b34c798df6aec35f..d157983b84cf9258fa43b639accc8ee3904a080c 100644 (file)
 
 #define PCI_VENDOR_ID_OCZ              0x1b85
 
+#define PCI_VENDOR_ID_NCUBE            0x10ff
+
 #endif /* _LINUX_PCI_IDS_H */
index 5d738804e3d6241dae4961c93324efbdd4f5aaea..a5a3cfc3c2fa754fd8f21fc721094268a4aa256b 100644 (file)
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
 extern int persistent_clock_is_local;
 
 extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
-                                          struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+                                         struct timespec64 *boot_offset);
 extern int update_persistent_clock64(struct timespec64 now);
 
 /*
index 7f2e16e76ac476b4fb07223af23d15490c14b4d4..041f7e56a2894f3f800fc470d8a5bccaffc58b88 100644 (file)
@@ -158,8 +158,10 @@ extern void syscall_unregfunc(void);
                 * For rcuidle callers, use srcu since sched-rcu        \
                 * doesn't work from the idle path.                     \
                 */                                                     \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
                        idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+                       rcu_irq_enter_irqson();                         \
+               }                                                       \
                                                                        \
                it_func_ptr = rcu_dereference_raw((tp)->funcs);         \
                                                                        \
@@ -171,8 +173,10 @@ extern void syscall_unregfunc(void);
                        } while ((++it_func_ptr)->func);                \
                }                                                       \
                                                                        \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
+                       rcu_irq_exit_irqson();                          \
                        srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+               }                                                       \
                                                                        \
                preempt_enable_notrace();                               \
        } while (0)
index 9a850973e09a739aaa72d5be951436640bc9c131..8ebabc9873d1593b46161697b53c8a8d14242000 100644 (file)
@@ -4865,8 +4865,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * Return: 0 on success. -ENODATA.
  */
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
-                       struct ieee80211_wmm_rule *rule);
+int reg_query_regdb_wmm(char *alpha2, int freq,
+                       struct ieee80211_reg_rule *rule);
 
 /*
  * callbacks for asynchronous cfg80211 methods, notification
index 60f8cc86a44701c151c1f74ed6d7e5fb061c0fc2..3469750df0f44542bce0a97aa23360cbfe7079fc 100644 (file)
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
 struct ieee80211_reg_rule {
        struct ieee80211_freq_range freq_range;
        struct ieee80211_power_rule power_rule;
-       struct ieee80211_wmm_rule *wmm_rule;
+       struct ieee80211_wmm_rule wmm_rule;
        u32 flags;
        u32 dfs_cac_ms;
+       bool has_wmm;
 };
 
 struct ieee80211_regdomain {
        struct rcu_head rcu_head;
        u32 n_reg_rules;
-       u32 n_wmm_rules;
        char alpha2[3];
        enum nl80211_dfs_regions dfs_region;
        struct ieee80211_reg_rule reg_rules[];
index 7b8c9e19bad1c2bf72c21dcf6b358559d5e4b4ea..910cc4334b21557f98298082b16e2359a6414e01 100644 (file)
@@ -65,7 +65,7 @@
 
 /* keyctl structures */
 struct keyctl_dh_params {
-       __s32 private;
+       __s32 dh_private;
        __s32 prime;
        __s32 base;
 };
index dc520e1a4123f7a60d4996b5d0df7d237199cdba..8b73cb603c5f32c78ff4ac78df4e78453a449cd9 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/types.h>
 #include <linux/socket.h>              /* For __kernel_sockaddr_storage. */
+#include <linux/in6.h>                 /* For struct in6_addr. */
 
 #define RDS_IB_ABI_VERSION             0x301
 
index b1e22c40c4b68a0b39d0a8a567baea1f3b5403ed..84c3de89696a15c1a23e7226dea456a6232e2043 100644 (file)
@@ -176,7 +176,7 @@ struct vhost_memory {
 #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
 
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
-#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
 
 /* VHOST_NET specific defines */
 
index b0eb3757ab895d07970bec60b55f9bd895f7bd8a..4cd402e4cfeb603e2417a3796c3d9b22f3022f89 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -199,6 +199,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
        }
 
        ipc_unlock_object(ipcp);
+       ipcp = ERR_PTR(-EIDRM);
 err:
        rcu_read_unlock();
        /*
index cf5195c7c33172879158f56323ff81c6d3bf7c4f..488ef9663c01f3b4d2cc44b9ca88863ced2860e7 100644 (file)
@@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk)
 }
 
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
 
 static void bpf_tcp_release(struct sock *sk)
 {
@@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk)
                goto out;
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
@@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
        close_fun = psock->save_close;
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                        /* If another thread deleted this object skip deletion.
                         * The refcnt on psock may or may not be zero.
                         */
-                       if (l) {
+                       if (l && l == link) {
                                hlist_del_rcu(&link->hash_node);
                                smap_release_sock(psock, link->sk);
                                free_htab_elem(htab, link);
@@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes,
        md->sg_start = i;
 }
 
-static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
+static int free_sg(struct sock *sk, int start,
+                  struct sk_msg_buff *md, bool charge)
 {
        struct scatterlist *sg = md->sg_data;
        int i = start, free = 0;
 
        while (sg[i].length) {
                free += sg[i].length;
-               sk_mem_uncharge(sk, sg[i].length);
+               if (charge)
+                       sk_mem_uncharge(sk, sg[i].length);
                if (!md->skb)
                        put_page(sg_page(&sg[i]));
                sg[i].length = 0;
@@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
        return free;
 }
 
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
 {
-       int free = free_sg(sk, md->sg_start, md);
+       int free = free_sg(sk, md->sg_start, md, charge);
 
        md->sg_start = md->sg_end;
        return free;
@@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
 
 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
 {
-       return free_sg(sk, md->sg_curr, md);
+       return free_sg(sk, md->sg_curr, md, true);
 }
 
 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
@@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
                list_add_tail(&r->list, &psock->ingress);
                sk->sk_data_ready(sk);
        } else {
-               free_start_sg(sk, r);
+               free_start_sg(sk, r, true);
                kfree(r);
        }
 
@@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
                release_sock(sk);
        }
        smap_release_sock(psock, sk);
-       if (unlikely(err))
-               goto out;
-       return 0;
+       return err;
 out_rcu:
        rcu_read_unlock();
-out:
-       free_bytes_sg(NULL, send, md, false);
-       return err;
+       return 0;
 }
 
 static inline void bpf_md_init(struct smap_psock *psock)
@@ -822,7 +820,7 @@ more_data:
        case __SK_PASS:
                err = bpf_tcp_push(sk, send, m, flags, true);
                if (unlikely(err)) {
-                       *copied -= free_start_sg(sk, m);
+                       *copied -= free_start_sg(sk, m, true);
                        break;
                }
 
@@ -845,16 +843,17 @@ more_data:
                lock_sock(sk);
 
                if (unlikely(err < 0)) {
-                       free_start_sg(sk, m);
+                       int free = free_start_sg(sk, m, false);
+
                        psock->sg_size = 0;
                        if (!cork)
-                               *copied -= send;
+                               *copied -= free;
                } else {
                        psock->sg_size -= send;
                }
 
                if (cork) {
-                       free_start_sg(sk, m);
+                       free_start_sg(sk, m, true);
                        psock->sg_size = 0;
                        kfree(m);
                        m = NULL;
@@ -912,6 +911,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 
        rcu_read_lock();
        psock = smap_psock_sk(sk);
@@ -922,9 +923,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                goto out;
        rcu_read_unlock();
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
-               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
        lock_sock(sk);
 bytes_ready:
        while (copied != len) {
@@ -1122,7 +1120,7 @@ wait_for_memory:
                err = sk_stream_wait_memory(sk, &timeo);
                if (err) {
                        if (m && m != psock->cork)
-                               free_start_sg(sk, m);
+                               free_start_sg(sk, m, true);
                        goto out_err;
                }
        }
@@ -1464,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu)
        schedule_work(&psock->gc_work);
 }
 
+static bool psock_is_smap_sk(struct sock *sk)
+{
+       return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
+}
+
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
-               tcp_cleanup_ulp(sock);
+               if (psock_is_smap_sk(sock))
+                       tcp_cleanup_ulp(sock);
                write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
                write_unlock_bh(&sock->sk_callback_lock);
@@ -1581,13 +1585,13 @@ static void smap_gc_work(struct work_struct *w)
                bpf_prog_put(psock->bpf_tx_msg);
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -1894,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         * doesn't update user data.
         */
        if (psock) {
+               if (!psock_is_smap_sk(sock)) {
+                       err = -EBUSY;
+                       goto out_progs;
+               }
                if (READ_ONCE(psock->bpf_parse) && parse) {
                        err = -EBUSY;
                        goto out_progs;
index aa7fe85ad62e86de2060d2b89e525b93506cbf0d..0097acec1c717dee6948e50b57ab4c4bb0c15ac7 100644 (file)
@@ -607,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
        bool bringup = st->bringup;
        enum cpuhp_state state;
 
+       if (WARN_ON_ONCE(!st->should_run))
+               return;
+
        /*
         * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
         * that if we see ->should_run we also see the rest of the state.
         */
        smp_mb();
 
-       if (WARN_ON_ONCE(!st->should_run))
-               return;
-
        cpuhp_lock_acquire(bringup);
 
        if (st->single) {
@@ -916,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
                if (ret) {
                        st->target = prev_state;
-                       undo_cpu_down(cpu, st);
+                       if (st->state < prev_state)
+                               undo_cpu_down(cpu, st);
                        break;
                }
        }
@@ -969,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         * to do the further cleanups.
         */
        ret = cpuhp_down_callbacks(cpu, st, target);
-       if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+       if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
                cpuhp_reset_state(st, prev_state);
                __cpuhp_kick_ap(st);
        }
index 1c35b7b945d034d320fb942109bb218c9c1eff8c..de87b0282e7420feaf4282e7ba835074b61512a7 100644 (file)
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 int dma_direct_supported(struct device *dev, u64 mask)
 {
 #ifdef CONFIG_ZONE_DMA
-       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
                return 0;
 #else
        /*
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
         * memory, or by providing a ZONE_DMA32.  If neither is the case, the
         * architecture needs to use an IOMMU instead of the direct mapping.
         */
-       if (mask < DMA_BIT_MASK(32))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
                return 0;
 #endif
        /*
index d896e9ca38b0cccc5de00a0564ecd22179fa2c06..f0b58479534f07dfd55d57a117b4055d80c829b5 100644 (file)
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                        goto out;
        }
        /* a new mm has just been created */
-       arch_dup_mmap(oldmm, mm);
-       retval = 0;
+       retval = arch_dup_mmap(oldmm, mm);
 out:
        up_write(&mm->mmap_sem);
        flush_tlb_mm(oldmm);
index a0a74c533e4b755ea73b876b51e3839c272c19cf..0913b4d385de307049eead641cf0bba8ea9e7695 100644 (file)
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
        return printk_safe_log_store(s, fmt, args);
 }
 
-void printk_nmi_enter(void)
+void notrace printk_nmi_enter(void)
 {
        this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
 }
 
-void printk_nmi_exit(void)
+void notrace printk_nmi_exit(void)
 {
        this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
 }
index f74fb00d806444739f9d8ee1611a98c694325f95..0e6e97a01942d956e6135025003371537ca28ab0 100644 (file)
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
        spin_unlock_irqrestore(&watchdog_lock, *flags);
 }
 
+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
 /*
  * Interval: 0.5sec Threshold: 0.0625s
  */
 #define WATCHDOG_INTERVAL (HZ >> 1)
 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
 
+static void clocksource_watchdog_work(struct work_struct *work)
+{
+       /*
+        * We cannot directly run clocksource_watchdog_kthread() here, because
+        * clocksource_select() calls timekeeping_notify() which uses
+        * stop_machine(). One cannot use stop_machine() from a workqueue() due
+        * lock inversions wrt CPU hotplug.
+        *
+        * Also, we only ever run this work once or twice during the lifetime
+        * of the kernel, so there is no point in creating a more permanent
+        * kthread for this.
+        *
+        * If kthread_run fails the next watchdog scan over the
+        * watchdog_list will find the unstable clock again.
+        */
+       kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
 static void __clocksource_unstable(struct clocksource *cs)
 {
        cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
        cs->flags |= CLOCK_SOURCE_UNSTABLE;
 
        /*
-        * If the clocksource is registered clocksource_watchdog_work() will
+        * If the clocksource is registered clocksource_watchdog_kthread() will
         * re-rate and re-select.
         */
        if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
        if (cs->mark_unstable)
                cs->mark_unstable(cs);
 
-       /* kick clocksource_watchdog_work() */
+       /* kick clocksource_watchdog_kthread() */
        if (finished_booting)
                schedule_work(&watchdog_work);
 }
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
  * @cs:                clocksource to be marked unstable
  *
  * This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
  */
 void clocksource_mark_unstable(struct clocksource *cs)
 {
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
        }
 }
 
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
 {
        struct clocksource *cs, *tmp;
        unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
        return select;
 }
 
-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
 {
        mutex_lock(&clocksource_mutex);
-       if (__clocksource_watchdog_work())
+       if (__clocksource_watchdog_kthread())
                clocksource_select();
        mutex_unlock(&clocksource_mutex);
+       return 0;
 }
 
 static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
 static void clocksource_select_watchdog(bool fallback) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 void clocksource_mark_unstable(struct clocksource *cs) { }
 
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
        /*
         * Run the watchdog first to eliminate unstable clock sources
         */
-       __clocksource_watchdog_work();
+       __clocksource_watchdog_kthread();
        clocksource_select();
        mutex_unlock(&clocksource_mutex);
        return 0;
index 613316724c6af63ea9f93fccffb6b6010cdde82e..4966c4fbe7f735a1eae8d7e5de7e96022cb02862 100644 (file)
@@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
          time.  This is really bad from a security perspective, and
          so architecture maintainers really need to do what they can
          to get the CRNG seeded sooner after the system is booted.
-         However, since users can not do anything actionble to
+         However, since users cannot do anything actionable to
          address this, by default the kernel will issue only a single
          warning for the first use of unseeded randomness.
 
          Say Y here if you want to receive warnings for all uses of
          unseeded randomness.  This will be of use primarily for
-         those developers interersted in improving the security of
+         those developers interested in improving the security of
          Linux kernels running on their architecture (or
          subarchitecture).
 
index f5981e9d6ae2e73761879023584ff378ebfcb1b8..8a8bb8796c6c43cb711c0f618c488df188853623 100644 (file)
@@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work)
 {
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
                                                release_work);
+       struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
 
        mutex_lock(&wb->bdi->cgwb_release_mutex);
        wb_shutdown(wb);
@@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work)
        css_put(wb->blkcg_css);
        mutex_unlock(&wb->bdi->cgwb_release_mutex);
 
+       /* triggers blkg destruction if cgwb_refcnt becomes zero */
+       blkcg_cgwb_put(blkcg);
+
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
        wb_exit(wb);
@@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
                        list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
                        list_add(&wb->memcg_node, memcg_cgwb_list);
                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
+                       blkcg_cgwb_get(blkcg);
                        css_get(memcg_css);
                        css_get(blkcg_css);
                }
index c3bc7e9c9a2acc550ea8aeb68720a8c5a611c610..533f9b00147d267644bcbf98da717329fb07c38f 100644 (file)
@@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+                       !pfn_t_devmap(pfn));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-       BUG_ON(!pfn_t_devmap(pfn));
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
index 9a085d525bbce1bd4aabb4599236d2874c964850..17dd883198aeab0dccebe762a6b432dad2f14c9d 100644 (file)
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void)
 
        kmemleak_initialized = 1;
 
+       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
+                                    &kmemleak_fops);
+       if (!dentry)
+               pr_warn("Failed to create the debugfs kmemleak file\n");
+
        if (kmemleak_error) {
                /*
                 * Some error occurred and kmemleak was disabled. There is a
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void)
                return -ENOMEM;
        }
 
-       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
-                                    &kmemleak_fops);
-       if (!dentry)
-               pr_warn("Failed to create the debugfs kmemleak file\n");
        mutex_lock(&scan_mutex);
        start_scan_thread();
        mutex_unlock(&scan_mutex);
index 4ead5a4817de3ffbf1477cfe77b8f4f4802281c8..e79cb59552d9b0076a2c9cbfec3af9ebfa5c06e2 100644 (file)
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
        if (mem_cgroup_out_of_memory(memcg, mask, order))
                return OOM_SUCCESS;
 
-       WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "
-               "This looks like a misconfiguration or a kernel bug.");
        return OOM_FAILED;
 }
 
index 9eea6e809a4e99fcf11125d47f28d7493af8b0ab..38d94b703e9d4932279b5657a2c889dfaaf1b749 100644 (file)
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                        if (__PageMovable(page))
                                return pfn;
                        if (PageHuge(page)) {
-                               if (page_huge_active(page))
+                               if (hugepage_migration_supported(page_hstate(page)) &&
+                                   page_huge_active(page))
                                        return pfn;
                                else
                                        pfn = round_up(pfn + 1,
index b5b25e4dcbbb707e5d7ad0ae588a88ca2dc49e88..f10aa5360616ea3247b6e508ed28ace237d70ad9 100644 (file)
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
 
                        tlb_gather_mmu(&tlb, mm, start, end);
                        if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
+                               tlb_finish_mmu(&tlb, start, end);
                                ret = false;
                                continue;
                        }
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc)
        }
 
        select_bad_process(oc);
-       /* Found nothing?!?! Either we hang forever, or we panic. */
-       if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
+       /* Found nothing?!?! */
+       if (!oc->chosen) {
                dump_header(oc, NULL);
-               panic("Out of memory and no killable processes...\n");
+               pr_warn("Out of memory and no killable processes...\n");
+               /*
+                * If we got here due to an actual allocation at the
+                * system level, we cannot survive this and will enter
+                * an endless loop in the allocator. Bail out now.
+                */
+               if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+                       panic("System is deadlocked on memory\n");
        }
        if (oc->chosen && oc->chosen != (void *)-1UL)
                oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
index 05e983f42316a1c5446536fb16cdf3f0ceb3cdd0..89d2a2ab3fe68c3ae46104074c519c7500dd86cb 100644 (file)
@@ -7708,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                 * handle each tail page individually in migration.
                 */
                if (PageHuge(page)) {
+
+                       if (!hugepage_migration_supported(page_hstate(page)))
+                               goto unmovable;
+
                        iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
                        continue;
                }
index d2890a4073321c65a5f85f94c505c31bc3b7edb5..9e3ebd2ef65f8925b0a1f894567c0e3d98665b34 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(kvmalloc_node);
 
 /**
- * kvfree - free memory allocated with kvmalloc
- * @addr: pointer returned by kvmalloc
+ * kvfree() - Free memory.
+ * @addr: Pointer to allocated memory.
  *
- * If the memory is allocated from vmalloc area it is freed with vfree().
- * Otherwise kfree() is used.
+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
+ * It is slightly more efficient to use kfree() or vfree() if you are certain
+ * that you know which one to use.
+ *
+ * Context: Any context except NMI.
  */
 void kvfree(const void *addr)
 {
index c25eb36f13204e0df620e6d9eb76783fe67ad243..aecdeba052d3f0ff3d4f0a33ec36891f9738052c 100644 (file)
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+#define sk_msg_iter_var(var)                   \
+       do {                                    \
+               var++;                          \
+               if (var == MAX_SKB_FRAGS)       \
+                       var = 0;                \
+       } while (0)
+
 BPF_CALL_4(bpf_msg_pull_data,
           struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
 {
-       unsigned int len = 0, offset = 0, copy = 0;
+       unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
+       int bytes = end - start, bytes_sg_total;
        struct scatterlist *sg = msg->sg_data;
        int first_sg, last_sg, i, shift;
        unsigned char *p, *to, *from;
-       int bytes = end - start;
        struct page *page;
 
        if (unlikely(flags || end <= start))
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data,
        i = msg->sg_start;
        do {
                len = sg[i].length;
-               offset += len;
                if (start < offset + len)
                        break;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               offset += len;
+               sk_msg_iter_var(i);
        } while (i != msg->sg_end);
 
        if (unlikely(start >= offset + len))
                return -EINVAL;
 
-       if (!msg->sg_copy[i] && bytes <= len)
-               goto out;
-
        first_sg = i;
+       /* The start may point into the sg element so we need to also
+        * account for the headroom.
+        */
+       bytes_sg_total = start - offset + bytes;
+       if (!msg->sg_copy[i] && bytes_sg_total <= len)
+               goto out;
 
        /* At this point we need to linearize multiple scatterlist
         * elements or a single shared page. Either way we need to
@@ -2327,37 +2335,32 @@ BPF_CALL_4(bpf_msg_pull_data,
         */
        do {
                copy += sg[i].length;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
-               if (bytes < copy)
+               sk_msg_iter_var(i);
+               if (bytes_sg_total <= copy)
                        break;
        } while (i != msg->sg_end);
        last_sg = i;
 
-       if (unlikely(copy < end - start))
+       if (unlikely(bytes_sg_total > copy))
                return -EINVAL;
 
        page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
        if (unlikely(!page))
                return -ENOMEM;
        p = page_address(page);
-       offset = 0;
 
        i = first_sg;
        do {
                from = sg_virt(&sg[i]);
                len = sg[i].length;
-               to = p + offset;
+               to = p + poffset;
 
                memcpy(to, from, len);
-               offset += len;
+               poffset += len;
                sg[i].length = 0;
                put_page(sg_page(&sg[i]));
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (i != last_sg);
 
        sg[first_sg].length = copy;
@@ -2367,11 +2370,15 @@ BPF_CALL_4(bpf_msg_pull_data,
         * had a single entry though we can just replace it and
         * be done. Otherwise walk the ring and shift the entries.
         */
-       shift = last_sg - first_sg - 1;
+       WARN_ON_ONCE(last_sg == first_sg);
+       shift = last_sg > first_sg ?
+               last_sg - first_sg - 1 :
+               MAX_SKB_FRAGS - first_sg + last_sg - 1;
        if (!shift)
                goto out;
 
-       i = first_sg + 1;
+       i = first_sg;
+       sk_msg_iter_var(i);
        do {
                int move_from;
 
@@ -2388,15 +2395,13 @@ BPF_CALL_4(bpf_msg_pull_data,
                sg[move_from].page_link = 0;
                sg[move_from].offset = 0;
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (1);
        msg->sg_end -= shift;
        if (msg->sg_end < 0)
                msg->sg_end += MAX_SKB_FRAGS;
 out:
-       msg->data = sg_virt(&sg[i]) + start - offset;
+       msg->data = sg_virt(&sg[first_sg]) + start - offset;
        msg->data_end = msg->data + bytes;
 
        return 0;
@@ -7281,7 +7286,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_reuseport_md, ip_protocol):
-               BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
+               BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
                SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
                                                    BPF_W, 0);
                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
index 24431e578310cb1602c05ea26a24069d49309c74..60c928894a7868e5f1bcec39f5204eba9e0fe82f 100644 (file)
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol)
 
        rtnl_lock();
        tab = rtnl_msg_handlers[protocol];
+       if (!tab) {
+               rtnl_unlock();
+               return;
+       }
        RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
        for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
                link = tab[msgindex];
index e63c554e0623e54971ed90af69ff0d0c35ecebad..9f3209ff7ffde754230720ca9117111fdbd7c3bc 100644 (file)
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
-#include <linux/of_gpio.h>
 #include <linux/netdevice.h>
 #include <linux/sysfs.h>
 #include <linux/phy_fixed.h>
 #include <linux/ptp_classify.h>
-#include <linux/gpio/consumer.h>
 #include <linux/etherdevice.h>
 
 #include "dsa_priv.h"
index cf75f8944b05eb302c11cd8e2e0e9f762ab7e35c..4da39446da2d89b529973eb33902577a0e6cbb54 100644 (file)
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t)
        spin_lock(&im->lock);
        im->tm_running = 0;
 
-       if (im->unsolicit_count) {
-               im->unsolicit_count--;
+       if (im->unsolicit_count && --im->unsolicit_count)
                igmp_start_timer(im, unsolicited_report_interval(in_dev));
-       }
+
        im->reporter = 1;
        spin_unlock(&im->lock);
 
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im)
 
        if (in_dev->dead)
                return;
+
+       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
                spin_lock_bh(&im->lock);
                igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
                              unsigned int mode)
 {
        struct ip_mc_list *im;
-#ifdef CONFIG_IP_MULTICAST
-       struct net *net = dev_net(in_dev->dev);
-#endif
 
        ASSERT_RTNL();
 
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
        timer_setup(&im->timer, igmp_timer_expire, 0);
-       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
 #endif
 
        im->next_rcu = in_dev->mc_list;
index 51a5d06085ac44777c622a58476d7700d7bc0a97..ae714aecc31c030cdce19fc5b2e92eb6ca214a16 100644 (file)
@@ -1508,11 +1508,14 @@ nla_put_failure:
 
 static void erspan_setup(struct net_device *dev)
 {
+       struct ip_tunnel *t = netdev_priv(dev);
+
        ether_setup(dev);
        dev->netdev_ops = &erspan_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        ip_tunnel_setup(dev, erspan_net_id);
+       t->erspan_ver = 1;
 }
 
 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
index 75ef332a7caf44de619acf030977eba01565c70a..12affb7864d981a6494059232c4965aaee756803 100644 (file)
@@ -184,8 +184,9 @@ kill:
                                inet_twsk_deschedule_put(tw);
                                return TCP_TW_SUCCESS;
                        }
+               } else {
+                       inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
                }
-               inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
                if (tmp_opt.saw_tstamp) {
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
index 673bba31eb1807eb04ee0d0c072333000bde4305..9a4261e502727005b3ea5e26795afd747533ee9e 100644 (file)
@@ -938,14 +938,14 @@ static int __init inet6_init(void)
 
        err = proto_register(&pingv6_prot, 1);
        if (err)
-               goto out_unregister_ping_proto;
+               goto out_unregister_raw_proto;
 
        /* We MUST register RAW sockets before we create the ICMP6,
         * IGMP6, or NDISC control sockets.
         */
        err = rawv6_init();
        if (err)
-               goto out_unregister_raw_proto;
+               goto out_unregister_ping_proto;
 
        /* Register the family here so that the init calls below will
         * be able to create sockets. (?? is this dangerous ??)
@@ -1113,11 +1113,11 @@ netfilter_fail:
 igmp_fail:
        ndisc_cleanup();
 ndisc_fail:
-       ip6_mr_cleanup();
+       icmpv6_cleanup();
 icmp_fail:
-       unregister_pernet_subsys(&inet6_net_ops);
+       ip6_mr_cleanup();
 ipmr_fail:
-       icmpv6_cleanup();
+       unregister_pernet_subsys(&inet6_net_ops);
 register_pernet_fail:
        sock_unregister(PF_INET6);
        rtnl_unregister_all(PF_INET6);
index c861a6d4671d3f0f82f68752d16a8192ac649f8a..5516f55e214bd85ff7a07cf8c24648db327902c2 100644 (file)
@@ -989,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
                                        fib6_clean_expires(iter);
                                else
                                        fib6_set_expires(iter, rt->expires);
-                               fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
+
+                               if (rt->fib6_pmtu)
+                                       fib6_metric_set(iter, RTAX_MTU,
+                                                       rt->fib6_pmtu);
                                return -EEXIST;
                        }
                        /* If we have the same destination and the same metric,
index 18a3794b0f52e1dd7b8bf5179bcdcfdbd882f158..e493b041d4ac9900d44972a22ebdc898def323fe 100644 (file)
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
        if (data[IFLA_GRE_COLLECT_METADATA])
                parms->collect_md = true;
 
+       parms->erspan_ver = 1;
        if (data[IFLA_GRE_ERSPAN_VER])
                parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
index 5df2a58d945cc2d37fc1a30c7b850d928dcd2ccf..419960b0ba16370ec9c47bee16aad15d9819f349 100644 (file)
@@ -1188,7 +1188,15 @@ route_lookup:
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_frag_opts(skb, &opt.ops, &proto);
        }
-       hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
+
+       if (hop_limit == 0) {
+               if (skb->protocol == htons(ETH_P_IP))
+                       hop_limit = ip_hdr(skb)->ttl;
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       hop_limit = ipv6_hdr(skb)->hop_limit;
+               else
+                       hop_limit = ip6_dst_hoplimit(dst);
+       }
 
        /* Calculate max headroom for all the headers and adjust
         * needed_headroom if necessary.
index 5095367c7204927f489c2833c736fbf5292c8d09..eeaf7455d51e52f12b62ffd0c1c82e2ee09e4fc1 100644 (file)
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        mtu = dst_mtu(dst);
-       if (!skb->ignore_df && skb->len > mtu) {
+       if (skb->len > mtu) {
                skb_dst_update_pmtu(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
index c4ea13e8360b9a399c811563165383e720c1cbb4..18e00ce1719a3d16f5099ea2271d89916929074f 100644 (file)
@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
        rt->rt6i_src = ort->fib6_src;
 #endif
        rt->rt6i_prefsrc = ort->fib6_prefsrc;
-       rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
 }
 
 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
index 6449a1c2283bf236728f36dc627de7e91c886cda..f0f5fedb8caacd164d6575ebcca9e27b19cfd516 100644 (file)
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
        if (len < IEEE80211_DEAUTH_FRAME_LEN)
                return;
 
-       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, reason);
+       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
        sta_info_destroy_addr(sdata, mgmt->sa);
 }
 
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
        auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
        auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-       ibss_dbg(sdata,
-                "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+       ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+                mgmt->bssid, auth_transaction);
 
        if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
                return;
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                rx_timestamp = drv_get_tsf(local, sdata);
        }
 
-       ibss_dbg(sdata,
-                "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+       ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
                 mgmt->sa, mgmt->bssid,
-                (unsigned long long)rx_timestamp,
+                (unsigned long long)rx_timestamp);
+       ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
                 (unsigned long long)beacon_timestamp,
                 (unsigned long long)(rx_timestamp - beacon_timestamp),
                 jiffies);
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
 
        tx_last_beacon = drv_tx_last_beacon(local);
 
-       ibss_dbg(sdata,
-                "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+       ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+                mgmt->bssid, tx_last_beacon);
 
        if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
                return;
index 4fb2709cb52796c752f052a746bd5c420d6caf08..513627896204938485ef01f0f2d76606d4418af4 100644 (file)
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work)
 
        flush_work(&local->radar_detected_work);
        rtnl_lock();
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               /*
+                * XXX: there may be more work for other vif types and even
+                * for station mode: a good thing would be to run most of
+                * the iface type's dependent _stop (ieee80211_mg_stop,
+                * ieee80211_ibss_stop) etc...
+                * For now, fix only the specific bug that was seen: race
+                * between csa_connection_drop_work and us.
+                */
+               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+                       /*
+                        * This worker is scheduled from the iface worker that
+                        * runs on mac80211's workqueue, so we can't be
+                        * scheduling this worker after the cancel right here.
+                        * The exception is ieee80211_chswitch_done.
+                        * Then we can have a race...
+                        */
+                       cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+               }
                flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+       }
        ieee80211_scan_cancel(local);
 
        /* make sure any new ROC will consider local->in_reconfig */
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
                cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
                            IEEE80211_VHT_CAP_SHORT_GI_80 |
                            IEEE80211_VHT_CAP_SHORT_GI_160 |
-                           IEEE80211_VHT_CAP_RXSTBC_1 |
-                           IEEE80211_VHT_CAP_RXSTBC_2 |
-                           IEEE80211_VHT_CAP_RXSTBC_3 |
-                           IEEE80211_VHT_CAP_RXSTBC_4 |
+                           IEEE80211_VHT_CAP_RXSTBC_MASK |
                            IEEE80211_VHT_CAP_TXSTBC |
                            IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
                            IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
+       ieee80211_txq_teardown_flows(local);
 
        rtnl_lock();
 
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
        skb_queue_purge(&local->skb_queue_tdls_chsw);
-       ieee80211_txq_teardown_flows(local);
 
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
index 35ad3983ae4b6b26fb13f6718b692c9b9b0d6e98..daf9db3c8f24f389df84d95ae973c969d65622f1 100644 (file)
@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                forward = false;
                reply = true;
                target_metric = 0;
+
+               if (SN_GT(target_sn, ifmsh->sn))
+                       ifmsh->sn = target_sn;
+
                if (time_after(jiffies, ifmsh->last_sn_update +
                                        net_traversal_jiffies(sdata)) ||
                    time_before(jiffies, ifmsh->last_sn_update)) {
index 7fb9957359a3c1be557e577ba5b76cc4c1177105..3dbecae4be73cb1aae05752807b717a329b80980 100644 (file)
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
         */
 
        if (sdata->reserved_chanctx) {
+               struct ieee80211_supported_band *sband = NULL;
+               struct sta_info *mgd_sta = NULL;
+               enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
                /*
                 * with multi-vif csa driver may call ieee80211_csa_finish()
                 * many times while waiting for other interfaces to use their
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                if (sdata->reserved_ready)
                        goto out;
 
+               if (sdata->vif.bss_conf.chandef.width !=
+                   sdata->csa_chandef.width) {
+                       /*
+                        * For managed interface, we need to also update the AP
+                        * station bandwidth and align the rate scale algorithm
+                        * on the bandwidth change. Here we only consider the
+                        * bandwidth of the new channel definition (as channel
+                        * switch flow does not have the full HT/VHT/HE
+                        * information), assuming that if additional changes are
+                        * required they would be done as part of the processing
+                        * of the next beacon from the AP.
+                        */
+                       switch (sdata->csa_chandef.width) {
+                       case NL80211_CHAN_WIDTH_20_NOHT:
+                       case NL80211_CHAN_WIDTH_20:
+                       default:
+                               bw = IEEE80211_STA_RX_BW_20;
+                               break;
+                       case NL80211_CHAN_WIDTH_40:
+                               bw = IEEE80211_STA_RX_BW_40;
+                               break;
+                       case NL80211_CHAN_WIDTH_80:
+                               bw = IEEE80211_STA_RX_BW_80;
+                               break;
+                       case NL80211_CHAN_WIDTH_80P80:
+                       case NL80211_CHAN_WIDTH_160:
+                               bw = IEEE80211_STA_RX_BW_160;
+                               break;
+                       }
+
+                       mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+                       sband =
+                               local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+               }
+
+               if (sdata->vif.bss_conf.chandef.width >
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                ret = ieee80211_vif_use_reserved_context(sdata);
                if (ret) {
                        sdata_info(sdata,
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                        goto out;
                }
 
+               if (sdata->vif.bss_conf.chandef.width <
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                goto out;
        }
 
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                                         cbss->beacon_interval));
        return;
  drop_connection:
+       /*
+        * This is just so that the disconnect flow will know that
+        * we were trying to switch channel and failed. In case the
+        * mode is 1 (we are not allowed to Tx), we will know not to
+        * send a deauthentication frame. Those two fields will be
+        * reset when the disconnection worker runs.
+        */
+       sdata->vif.csa_active = true;
+       sdata->csa_block_tx = csa_ie.mode;
+
        ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
        mutex_unlock(&local->chanctx_mtx);
        mutex_unlock(&local->mtx);
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+       bool tx;
 
        sdata_lock(sdata);
        if (!ifmgd->associated) {
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
+       tx = !sdata->csa_block_tx;
+
        /* AP is probably out of range (or not reachable for another reason) so
         * remove the bss struct for that AP.
         */
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-                              true, frame_buf);
+                              tx, frame_buf);
        mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
        ifmgd->csa_waiting_bcn = false;
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        }
        mutex_unlock(&local->mtx);
 
-       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
                                    WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
 
        sdata_unlock(sdata);
index 64742f2765c4846c36d3f9304314059023215d0a..96611d5dfadb0ce37effd4adcce80454bb69f285 100644 (file)
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         */
        if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
+           !is_multicast_ether_addr(hdr->addr1) &&
            (ieee80211_is_mgmt(hdr->frame_control) ||
             ieee80211_is_data(hdr->frame_control)) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
index cd332e3e1134bed3efb89838ac245b0402e7a604..f353d9db54bc1f049e14b20713af88bde1da3c62 100644 (file)
@@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
 }
 
 static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
-                                       struct sk_buff *skb, int headroom,
-                                       int *subframe_len)
+                                       struct sk_buff *skb, int headroom)
 {
-       int amsdu_len = *subframe_len + sizeof(struct ethhdr);
-       int padding = (4 - amsdu_len) & 3;
-
-       if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
+       if (skb_headroom(skb) < headroom) {
                I802_DEBUG_INC(local->tx_expand_skb_head);
 
-               if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
+               if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
                        wiphy_debug(local->hw.wiphy,
                                    "failed to reallocate TX buffer\n");
                        return false;
                }
        }
 
-       if (padding) {
-               *subframe_len += padding;
-               skb_put_zero(skb, padding);
-       }
-
        return true;
 }
 
@@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
        if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
                return true;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
-                                        &subframe_len))
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
                return false;
 
        data = skb_push(skb, sizeof(*amsdu_hdr));
@@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        void *data;
        bool ret = false;
        unsigned int orig_len;
-       int n = 1, nfrags;
+       int n = 2, nfrags, pad = 0;
+       u16 hdrlen;
 
        if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
                return false;
@@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (skb->len + head->len > max_amsdu_len)
                goto out;
 
-       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
-               goto out;
-
        nfrags = 1 + skb_shinfo(skb)->nr_frags;
        nfrags += 1 + skb_shinfo(head)->nr_frags;
        frag_tail = &skb_shinfo(head)->frag_list;
@@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (max_frags && nfrags > max_frags)
                goto out;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
-                                        &subframe_len))
+       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
                goto out;
 
+       /*
+        * Pad out the previous subframe to a multiple of 4 by adding the
+        * padding to the next one, that's being added. Note that head->len
+        * is the length of the full A-MSDU, but that works since each time
+        * we add a new subframe we pad out the previous one to a multiple
+        * of 4 and thus it no longer matters in the next round.
+        */
+       hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
+       if ((head->len - hdrlen) & 3)
+               pad = 4 - ((head->len - hdrlen) & 3);
+
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
+                                                    2 + pad))
+               goto out_recalc;
+
        ret = true;
        data = skb_push(skb, ETH_ALEN + 2);
        memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
@@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        memcpy(data, &len, 2);
        memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
 
+       memset(skb_push(skb, pad), 0, pad);
+
        head->len += skb->len;
        head->data_len += skb->len;
        *frag_tail = skb;
 
-       flow->backlog += head->len - orig_len;
-       tin->backlog_bytes += head->len - orig_len;
-
-       fq_recalc_backlog(fq, tin, flow);
+out_recalc:
+       if (head->len != orig_len) {
+               flow->backlog += head->len - orig_len;
+               tin->backlog_bytes += head->len - orig_len;
 
+               fq_recalc_backlog(fq, tin, flow);
+       }
 out:
        spin_unlock_bh(&fq->lock);
 
index 88efda7c9f8a78737538a355b1b499104ab55aea..716cd6442d86c85b6507ba1aaa8e28e56f58e65b 100644 (file)
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_chanctx_conf *chanctx_conf;
        const struct ieee80211_reg_rule *rrule;
-       struct ieee80211_wmm_ac *wmm_ac;
+       const struct ieee80211_wmm_ac *wmm_ac;
        u16 center_freq = 0;
 
        if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 
        rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
 
-       if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) {
+       if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) {
                rcu_read_unlock();
                return;
        }
 
        if (sdata->vif.type == NL80211_IFTYPE_AP)
-               wmm_ac = &rrule->wmm_rule->ap[ac];
+               wmm_ac = &rrule->wmm_rule.ap[ac];
        else
-               wmm_ac = &rrule->wmm_rule->client[ac];
+               wmm_ac = &rrule->wmm_rule.client[ac];
        qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
        qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
        qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
-       qparam->txop = !qparam->txop ? wmm_ac->cot / 32 :
-               min_t(u16, qparam->txop, wmm_ac->cot / 32);
+       qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32);
        rcu_read_unlock();
 }
 
index 5610061e7f2e00b935ce44dd9cf82d10eb77a7bf..75c92a87e7b2481141161c8945f5e7eef8e0abf8 100644 (file)
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = {
        .close  =       packet_mm_close,
 };
 
-static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
+                       unsigned int len)
 {
        int i;
 
        for (i = 0; i < len; i++) {
                if (likely(pg_vec[i].buffer)) {
-                       kvfree(pg_vec[i].buffer);
+                       if (is_vmalloc_addr(pg_vec[i].buffer))
+                               vfree(pg_vec[i].buffer);
+                       else
+                               free_pages((unsigned long)pg_vec[i].buffer,
+                                          order);
                        pg_vec[i].buffer = NULL;
                }
        }
        kfree(pg_vec);
 }
 
-static char *alloc_one_pg_vec_page(unsigned long size)
+static char *alloc_one_pg_vec_page(unsigned long order)
 {
        char *buffer;
+       gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
+                         __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
 
-       buffer = kvzalloc(size, GFP_KERNEL);
+       buffer = (char *) __get_free_pages(gfp_flags, order);
        if (buffer)
                return buffer;
 
-       buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+       /* __get_free_pages failed, fall back to vmalloc */
+       buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
+       if (buffer)
+               return buffer;
 
-       return buffer;
+       /* vmalloc failed, lets dig into swap here */
+       gfp_flags &= ~__GFP_NORETRY;
+       buffer = (char *) __get_free_pages(gfp_flags, order);
+       if (buffer)
+               return buffer;
+
+       /* complete and utter failure */
+       return NULL;
 }
 
-static struct pgv *alloc_pg_vec(struct tpacket_req *req)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
 {
        unsigned int block_nr = req->tp_block_nr;
-       unsigned long size = req->tp_block_size;
        struct pgv *pg_vec;
        int i;
 
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req)
                goto out;
 
        for (i = 0; i < block_nr; i++) {
-               pg_vec[i].buffer = alloc_one_pg_vec_page(size);
+               pg_vec[i].buffer = alloc_one_pg_vec_page(order);
                if (unlikely(!pg_vec[i].buffer))
                        goto out_free_pgvec;
        }
@@ -4184,7 +4200,7 @@ out:
        return pg_vec;
 
 out_free_pgvec:
-       free_pg_vec(pg_vec, block_nr);
+       free_pg_vec(pg_vec, order, block_nr);
        pg_vec = NULL;
        goto out;
 }
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 {
        struct pgv *pg_vec = NULL;
        struct packet_sock *po = pkt_sk(sk);
+       int was_running, order = 0;
        struct packet_ring_buffer *rb;
        struct sk_buff_head *rb_queue;
-       int was_running;
        __be16 num;
        int err = -EINVAL;
        /* Added to avoid minimal code churn */
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
 
                err = -ENOMEM;
-               pg_vec = alloc_pg_vec(req);
+               order = get_order(req->tp_block_size);
+               pg_vec = alloc_pg_vec(req, order);
                if (unlikely(!pg_vec))
                        goto out;
                switch (po->tp_version) {
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                rb->frame_size = req->tp_frame_size;
                spin_unlock_bh(&rb_queue->lock);
 
+               swap(rb->pg_vec_order, order);
                swap(rb->pg_vec_len, req->tp_block_nr);
 
                rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
        if (pg_vec)
-               free_pg_vec(pg_vec, req->tp_block_nr);
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
        return err;
 }
index 8f50036f62f05c76632dea82491d6a60dba39f0f..3bb7c5fb3bff2fd5d91c3d973d006d0cdde29a0b 100644 (file)
@@ -64,6 +64,7 @@ struct packet_ring_buffer {
        unsigned int            frame_size;
        unsigned int            frame_max;
 
+       unsigned int            pg_vec_order;
        unsigned int            pg_vec_pages;
        unsigned int            pg_vec_len;
 
index 01b3bd6a3708dece9e1e064c58e21c9d61ad8138..b9092111bc459d1b309c84a713005e7df66b02cf 100644 (file)
@@ -1,6 +1,6 @@
 
 config RDS
-       tristate "The RDS Protocol"
+       tristate "The Reliable Datagram Sockets Protocol"
        depends on INET
        ---help---
          The RDS (Reliable Datagram Sockets) protocol provides reliable,
index c1d97640c0be7bf6307806e2e6b112111756b7a1..eba75c1ba359446ca7e39e5aaa88287129531055 100644 (file)
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
 
        if (rds_conn_state(conn) == RDS_CONN_UP) {
                struct rds_ib_device *rds_ibdev;
-               struct rdma_dev_addr *dev_addr;
 
                ic = conn->c_transport_data;
-               dev_addr = &ic->i_cm_id->route.addr.dev_addr;
-               rdma_addr_get_sgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->src_gid);
-               rdma_addr_get_dgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->dst_gid);
-
+               rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
+                              (union ib_gid *)&iinfo6->dst_gid);
                rds_ibdev = ic->rds_ibdev;
                iinfo6->max_send_wr = ic->i_send_ring.w_nr;
                iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
index 00192a996be0eab95be229f801ada687cca0f71a..0f846585225431c6ae903bd8308884729c99bb3c 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/rfkill.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
index db83dac1e7f488cd2fd0d013bce374c5d4123f3c..e12f8ef7baa438997da79c86eedec3efde1babcd 100644 (file)
@@ -662,6 +662,13 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
        return ret;
 }
 
+static int tcf_action_destroy_1(struct tc_action *a, int bind)
+{
+       struct tc_action *actions[] = { a, NULL };
+
+       return tcf_action_destroy(actions, bind);
+}
+
 static int tcf_action_put(struct tc_action *p)
 {
        return __tcf_action_put(p, false);
@@ -881,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
                err = tcf_action_goto_chain_init(a, tp);
                if (err) {
-                       struct tc_action *actions[] = { a, NULL };
-
-                       tcf_action_destroy(actions, bind);
+                       tcf_action_destroy_1(a, bind);
                        NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
                        return ERR_PTR(err);
                }
        }
 
        if (!tcf_action_valid(a->tcfa_action)) {
-               NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead");
-               a->tcfa_action = TC_ACT_UNSPEC;
+               tcf_action_destroy_1(a, bind);
+               NL_SET_ERR_MSG(extack, "Invalid control action value");
+               return ERR_PTR(-EINVAL);
        }
 
        return a;
@@ -1173,6 +1179,7 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
                struct tcf_idrinfo *idrinfo = a->idrinfo;
                u32 act_index = a->tcfa_index;
 
+               actions[i] = NULL;
                if (tcf_action_put(a)) {
                        /* last reference, action was deleted concurrently */
                        module_put(ops->owner);
@@ -1184,7 +1191,6 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
                        if (ret < 0)
                                return ret;
                }
-               actions[i] = NULL;
        }
        return 0;
 }
index 196430aefe87a355a9a237ef7184fa2a4830a3a3..06a3d48018782e5d35981fdcfc3208a5f11ad276 100644 (file)
@@ -326,6 +326,20 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
        return ret;
 }
 
+static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+                                   struct tcf_ife_info *ife, u32 metaid,
+                                   bool exists)
+{
+       int ret;
+
+       if (!try_module_get(ops->owner))
+               return -ENOENT;
+       ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
+       if (ret)
+               module_put(ops->owner);
+       return ret;
+}
+
 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
                        int len, bool exists)
 {
@@ -349,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
 
        read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
-               rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists);
+               rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
                if (rc == 0)
                        installed += 1;
        }
@@ -400,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
        struct tcf_meta_info *e, *n;
 
        list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
-               module_put(e->ops->owner);
                list_del(&e->metalist);
                if (e->metaval) {
                        if (e->ops->release)
@@ -408,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
                        else
                                kfree(e->metaval);
                }
+               module_put(e->ops->owner);
                kfree(e);
        }
 }
index 10703407001972a8cccc297a8c0d85e72edfed1c..ad99a99f11f6de4ad452b501aa8b23a01f156790 100644 (file)
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
 {
        struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
 
+       if (!keys_start)
+               goto nla_failure;
        for (; n > 0; n--) {
                struct nlattr *key_start;
 
                key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
+               if (!key_start)
+                       goto nla_failure;
 
                if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
-                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
-                       nlmsg_trim(skb, keys_start);
-                       return -EINVAL;
-               }
+                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
+                       goto nla_failure;
 
                nla_nest_end(skb, key_start);
 
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
        nla_nest_end(skb, keys_start);
 
        return 0;
+nla_failure:
+       nla_nest_cancel(skb, keys_start);
+       return -EINVAL;
 }
 
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
 
        if (p->tcfp_keys_ex) {
-               tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
+               if (tcf_pedit_key_ex_dump(skb,
+                                         p->tcfp_keys_ex,
+                                         p->tcfp_nkeys))
+                       goto nla_put_failure;
 
                if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
                        goto nla_put_failure;
index 31bd1439cf6059fbcb3e99525e4a964154ef7907..1a67af8a6e8ce93bc9821804567d738b4d4547e5 100644 (file)
@@ -1252,7 +1252,7 @@ replay:
        }
        chain = tcf_chain_get(block, chain_index, true);
        if (!chain) {
-               NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
+               NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
                err = -ENOMEM;
                goto errout;
        }
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
                        goto errout;
                }
                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
-               err = -EINVAL;
+               err = -ENOENT;
                goto errout;
        }
 
index ef5c9a82d4e896b506271cb229b07ee570c917f3..a644292f9fafd6cff9219c7fe6b64bfcc7bc898b 100644 (file)
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
 struct sctp_ht_iter {
        struct seq_net_private p;
        struct rhashtable_iter hti;
-       int start_fail;
 };
 
 static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 
        sctp_transport_walk_start(&iter->hti);
 
-       iter->start_fail = 0;
        return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
 }
 
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
 {
        struct sctp_ht_iter *iter = seq->private;
 
-       if (iter->start_fail)
-               return;
        sctp_transport_walk_stop(&iter->hti);
 }
 
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
        epb = &assoc->base;
        sk = epb->sk;
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
 
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
index e96b15a66abaa481b64db1ebd34f75d59d724c3d..f73e9d38d5ba734d7ee3347e4015fd30d355bbfa 100644 (file)
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        }
 
        if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
-               if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
-                       trans->flowlabel = params->spp_ipv6_flowlabel &
-                                          SCTP_FLOWLABEL_VAL_MASK;
-                       trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
-               } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
-                                           transports) {
-                               if (trans->ipaddr.sa.sa_family != AF_INET6)
-                                       continue;
+               if (trans) {
+                       if (trans->ipaddr.sa.sa_family == AF_INET6) {
                                trans->flowlabel = params->spp_ipv6_flowlabel &
                                                   SCTP_FLOWLABEL_VAL_MASK;
                                trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
                        }
+               } else if (asoc) {
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
+                                           transports) {
+                               if (t->ipaddr.sa.sa_family != AF_INET6)
+                                       continue;
+                               t->flowlabel = params->spp_ipv6_flowlabel &
+                                              SCTP_FLOWLABEL_VAL_MASK;
+                               t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+                       }
                        asoc->flowlabel = params->spp_ipv6_flowlabel &
                                          SCTP_FLOWLABEL_VAL_MASK;
                        asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                        trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        trans->dscp |= SCTP_DSCP_SET_MASK;
                } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
                                            transports) {
-                               trans->dscp = params->spp_dscp &
-                                             SCTP_DSCP_VAL_MASK;
-                               trans->dscp |= SCTP_DSCP_SET_MASK;
+                               t->dscp = params->spp_dscp &
+                                         SCTP_DSCP_VAL_MASK;
+                               t->dscp |= SCTP_DSCP_SET_MASK;
                        }
                        asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        asoc->dscp |= SCTP_DSCP_SET_MASK;
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
                        break;
                }
 
+               if (!sctp_transport_hold(t))
+                       continue;
+
                if (net_eq(sock_net(t->asoc->base.sk), net) &&
                    t->asoc->peer.primary_path == t)
                        break;
+
+               sctp_transport_put(t);
        }
 
        return t;
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
                                              struct rhashtable_iter *iter,
                                              int pos)
 {
-       void *obj = SEQ_START_TOKEN;
+       struct sctp_transport *t;
 
-       while (pos && (obj = sctp_transport_get_next(net, iter)) &&
-              !IS_ERR(obj))
-               pos--;
+       if (!pos)
+               return SEQ_START_TOKEN;
 
-       return obj;
+       while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
+               if (!--pos)
+                       break;
+               sctp_transport_put(t);
+       }
+
+       return t;
 }
 
 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@@ -5082,8 +5096,6 @@ again:
 
        tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
        for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
-               if (!sctp_transport_hold(tsp))
-                       continue;
                ret = cb(tsp, p);
                if (ret)
                        break;
index 9ee6cfea56dd015851302f1702f9e068803d01dd..d8026543bf4ce53634cb2d6fc114e57cdb74b21f 100644 (file)
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
  * struct tipc_bc_base - base structure for keeping broadcast send state
  * @link: broadcast send link structure
  * @inputq: data input queue; will only carry SOCK_WAKEUP messages
- * @dest: array keeping number of reachable destinations per bearer
+ * @dests: array keeping number of reachable destinations per bearer
  * @primary_bearer: a bearer having links to all broadcast destinations, if any
  * @bcast_support: indicates if primary bearer, if any, supports broadcast
  * @rcast_support: indicates if all peer nodes support replicast
  * @rc_ratio: dest count as percentage of cluster size where send method changes
- * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
  */
 struct tipc_bc_base {
        struct tipc_link *link;
index aaabb0b776dda6041defb902804e08388e39f9c5..73137f4aeb68f95677e34a3b2d5e823565024483 100644 (file)
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
 
        if (h->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = tipc_dump_start,
                        .dump = tipc_diag_dump,
+                       .done = tipc_dump_done,
                };
                netlink_dump_start(net->diag_nlsk, skb, h, &c);
                return 0;
index 88f027b502f6f5ed955b03bd645321ea8e79eec3..66d5b2c5987ad6a2e05ca45f236ade58459ac0bf 100644 (file)
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        list_for_each_entry(dst, l, list) {
-               if (dst->value != value)
-                       continue;
-               return dst;
+               if (dst->node == node && dst->port == port)
+                       return dst;
        }
        return NULL;
 }
 
 bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        if (tipc_dest_find(l, node, port))
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
        dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
        if (unlikely(!dst))
                return false;
-       dst->value = value;
+       dst->node = node;
+       dst->port = port;
        list_add(&dst->list, l);
        return true;
 }
index 0febba41da86ed64df54f447e7eb9b63cbea30cf..892bd750b85fa48639b15af9bbeb6d437a7660a7 100644 (file)
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
 
 struct tipc_dest {
        struct list_head list;
-       union {
-               struct {
-                       u32 port;
-                       u32 node;
-               };
-               u64 value;
-       };
+       u32 port;
+       u32 node;
 };
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
index 6ff2254088f647d4f7410c3335ccdae2e68ec522..99ee419210baf4374d438b138ab922034950ba7e 100644 (file)
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
        },
        {
                .cmd    = TIPC_NL_SOCK_GET,
+               .start = tipc_dump_start,
                .dumpit = tipc_nl_sk_dump,
+               .done   = tipc_dump_done,
                .policy = tipc_nl_policy,
        },
        {
index c1e93c9515bca5abaae647f18402a8bc25bfa2b4..ab7a2a7178f7167b2cac5c0f382a8bbda42bf4d5 100644 (file)
@@ -2672,6 +2672,8 @@ void tipc_sk_reinit(struct net *net)
 
                rhashtable_walk_stop(&iter);
        } while (tsk == ERR_PTR(-EAGAIN));
+
+       rhashtable_walk_exit(&iter);
 }
 
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -3227,45 +3229,69 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk))
 {
-       struct net *net = sock_net(skb->sk);
-       struct tipc_net *tn = tipc_net(net);
-       const struct bucket_table *tbl;
-       u32 prev_portid = cb->args[1];
-       u32 tbl_id = cb->args[0];
-       struct rhash_head *pos;
+       struct rhashtable_iter *iter = (void *)cb->args[0];
        struct tipc_sock *tsk;
        int err;
 
-       rcu_read_lock();
-       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
-       for (; tbl_id < tbl->size; tbl_id++) {
-               rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
-                       spin_lock_bh(&tsk->sk.sk_lock.slock);
-                       if (prev_portid && prev_portid != tsk->portid) {
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
+       rhashtable_walk_start(iter);
+       while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+               if (IS_ERR(tsk)) {
+                       err = PTR_ERR(tsk);
+                       if (err == -EAGAIN) {
+                               err = 0;
                                continue;
                        }
+                       break;
+               }
 
-                       err = skb_handler(skb, cb, tsk);
-                       if (err) {
-                               prev_portid = tsk->portid;
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
-                               goto out;
-                       }
-
-                       prev_portid = 0;
-                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               sock_hold(&tsk->sk);
+               rhashtable_walk_stop(iter);
+               lock_sock(&tsk->sk);
+               err = skb_handler(skb, cb, tsk);
+               if (err) {
+                       release_sock(&tsk->sk);
+                       sock_put(&tsk->sk);
+                       goto out;
                }
+               release_sock(&tsk->sk);
+               rhashtable_walk_start(iter);
+               sock_put(&tsk->sk);
        }
+       rhashtable_walk_stop(iter);
 out:
-       rcu_read_unlock();
-       cb->args[0] = tbl_id;
-       cb->args[1] = prev_portid;
-
        return skb->len;
 }
 EXPORT_SYMBOL(tipc_nl_sk_walk);
 
+int tipc_dump_start(struct netlink_callback *cb)
+{
+       struct rhashtable_iter *iter = (void *)cb->args[0];
+       struct net *net = sock_net(cb->skb->sk);
+       struct tipc_net *tn = tipc_net(net);
+
+       if (!iter) {
+               iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+               if (!iter)
+                       return -ENOMEM;
+
+               cb->args[0] = (long)iter;
+       }
+
+       rhashtable_walk_enter(&tn->sk_rht, iter);
+       return 0;
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+       struct rhashtable_iter *hti = (void *)cb->args[0];
+
+       rhashtable_walk_exit(hti);
+       kfree(hti);
+       return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
                           struct tipc_sock *tsk, u32 sk_filter_state,
                           u64 (*tipc_diag_gen_cookie)(struct sock *sk))
index aff9b2ae5a1f448d64d72927686d28146dfd5d7d..d43032e26532c406bea693b923fb25572a8ad567 100644 (file)
@@ -68,4 +68,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                    int (*skb_handler)(struct sk_buff *skb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int tipc_dump_done(struct netlink_callback *cb);
 #endif
index c8e34ef22c30223d57ea8254d239331402040742..2627b5d812e9040b11396f34f8bc31fb5da3956a 100644 (file)
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work)
        conn_put(con);
 }
 
-/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
- * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
  */
 void tipc_topsrv_queue_evt(struct net *net, int conid,
                           u32 event, struct tipc_event *evt)
index 5fb9b7dd98318b6e9d4842474361104855b42983..4b8ec659e797ff743267773e315c6220b90993d0 100644 (file)
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
                        goto nla_put_failure;
 
                if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
-                               rule->wmm_rule->client[j].cw_min) ||
+                               rule->wmm_rule.client[j].cw_min) ||
                    nla_put_u16(msg, NL80211_WMMR_CW_MAX,
-                               rule->wmm_rule->client[j].cw_max) ||
+                               rule->wmm_rule.client[j].cw_max) ||
                    nla_put_u8(msg, NL80211_WMMR_AIFSN,
-                              rule->wmm_rule->client[j].aifsn) ||
-                   nla_put_u8(msg, NL80211_WMMR_TXOP,
-                              rule->wmm_rule->client[j].cot))
+                              rule->wmm_rule.client[j].aifsn) ||
+                   nla_put_u16(msg, NL80211_WMMR_TXOP,
+                               rule->wmm_rule.client[j].cot))
                        goto nla_put_failure;
 
                nla_nest_end(msg, nl_wmm_rule);
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
 
        if (large) {
                const struct ieee80211_reg_rule *rule =
-                       freq_reg_info(wiphy, chan->center_freq);
+                       freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
 
-               if (!IS_ERR(rule) && rule->wmm_rule) {
+               if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) {
                        if (nl80211_msg_put_wmm_rules(msg, rule))
                                goto nla_put_failure;
                }
@@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
 
        if (!info->attrs[NL80211_ATTR_MDID] ||
+           !info->attrs[NL80211_ATTR_IE] ||
            !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
 
index 4fc66a117b7d74f86a1589a7a02b88f02f203b7d..2f702adf2912105947560d07d79ff86119333bbf 100644 (file)
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain *
 reg_copy_regd(const struct ieee80211_regdomain *src_regd)
 {
        struct ieee80211_regdomain *regd;
-       int size_of_regd, size_of_wmms;
+       int size_of_regd;
        unsigned int i;
-       struct ieee80211_wmm_rule *d_wmm, *s_wmm;
 
        size_of_regd =
                sizeof(struct ieee80211_regdomain) +
                src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
-       size_of_wmms = src_regd->n_wmm_rules *
-               sizeof(struct ieee80211_wmm_rule);
 
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
        memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
 
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
-       memcpy(d_wmm, s_wmm, size_of_wmms);
-
-       for (i = 0; i < src_regd->n_reg_rules; i++) {
+       for (i = 0; i < src_regd->n_reg_rules; i++)
                memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
                       sizeof(struct ieee80211_reg_rule));
-               if (!src_regd->reg_rules[i].wmm_rule)
-                       continue;
 
-               regd->reg_rules[i].wmm_rule = d_wmm +
-                       (src_regd->reg_rules[i].wmm_rule - s_wmm) /
-                       sizeof(struct ieee80211_wmm_rule);
-       }
        return regd;
 }
 
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size)
        return true;
 }
 
-static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
+static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
                         struct fwdb_wmm_rule *wmm)
 {
+       struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
        unsigned int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
                rule->ap[i].aifsn = wmm->ap[i].aifsn;
                rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
        }
+
+       rrule->has_wmm = true;
 }
 
 static int __regdb_query_wmm(const struct fwdb_header *db,
                             const struct fwdb_country *country, int freq,
-                            u32 *dbptr, struct ieee80211_wmm_rule *rule)
+                            struct ieee80211_reg_rule *rule)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
                        wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
                        wmm = (void *)((u8 *)db + wmm_ptr);
                        set_wmm_rule(rule, wmm);
-                       if (dbptr)
-                               *dbptr = wmm_ptr;
                        return 0;
                }
        }
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
        return -ENODATA;
 }
 
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
-                       struct ieee80211_wmm_rule *rule)
+int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
 {
        const struct fwdb_header *hdr = regdb;
        const struct fwdb_country *country;
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
        country = &hdr->country[0];
        while (country->coll_ptr) {
                if (alpha2_equal(alpha2, country->alpha2))
-                       return __regdb_query_wmm(regdb, country, freq, dbptr,
-                                                rule);
+                       return __regdb_query_wmm(regdb, country, freq, rule);
 
                country++;
        }
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
 }
 EXPORT_SYMBOL(reg_query_regdb_wmm);
 
-struct wmm_ptrs {
-       struct ieee80211_wmm_rule *rule;
-       u32 ptr;
-};
-
-static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
-                                              u32 wmm_ptr, int n_wmms)
-{
-       int i;
-
-       for (i = 0; i < n_wmms; i++) {
-               if (wmm_ptrs[i].ptr == wmm_ptr)
-                       return wmm_ptrs[i].rule;
-       }
-       return NULL;
-}
-
 static int regdb_query_country(const struct fwdb_header *db,
                               const struct fwdb_country *country)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
        struct ieee80211_regdomain *regdom;
-       struct ieee80211_regdomain *tmp_rd;
-       unsigned int size_of_regd, i, n_wmms = 0;
-       struct wmm_ptrs *wmm_ptrs;
+       unsigned int size_of_regd, i;
 
        size_of_regd = sizeof(struct ieee80211_regdomain) +
                coll->n_rules * sizeof(struct ieee80211_reg_rule);
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db,
        if (!regdom)
                return -ENOMEM;
 
-       wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
-       if (!wmm_ptrs) {
-               kfree(regdom);
-               return -ENOMEM;
-       }
-
        regdom->n_reg_rules = coll->n_rules;
        regdom->alpha2[0] = country->alpha2[0];
        regdom->alpha2[1] = country->alpha2[1];
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db,
                                1000 * be16_to_cpu(rule->cac_timeout);
                if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
                        u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
-                       struct ieee80211_wmm_rule *wmm_pos =
-                               find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
-                       struct fwdb_wmm_rule *wmm;
-                       struct ieee80211_wmm_rule *wmm_rule;
-
-                       if (wmm_pos) {
-                               rrule->wmm_rule = wmm_pos;
-                               continue;
-                       }
-                       wmm = (void *)((u8 *)db + wmm_ptr);
-                       tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
-                                         sizeof(struct ieee80211_wmm_rule),
-                                         GFP_KERNEL);
-
-                       if (!tmp_rd) {
-                               kfree(regdom);
-                               kfree(wmm_ptrs);
-                               return -ENOMEM;
-                       }
-                       regdom = tmp_rd;
-
-                       wmm_rule = (struct ieee80211_wmm_rule *)
-                               ((u8 *)regdom + size_of_regd + n_wmms *
-                               sizeof(struct ieee80211_wmm_rule));
+                       struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
 
-                       set_wmm_rule(wmm_rule, wmm);
-                       wmm_ptrs[n_wmms].ptr = wmm_ptr;
-                       wmm_ptrs[n_wmms++].rule = wmm_rule;
+                       set_wmm_rule(rrule, wmm);
                }
        }
-       kfree(wmm_ptrs);
 
        return reg_schedule_apply(regdom);
 }
index e0825a019e9fb255adc2f4f749b08e241b2c2dde..959ed3acd2407c8c4beb4ce91b625315bf926109 100644 (file)
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
                                          u8 *op_class)
 {
        u8 vht_opclass;
-       u16 freq = chandef->center_freq1;
+       u32 freq = chandef->center_freq1;
 
        if (freq >= 2412 && freq <= 2472) {
                if (chandef->width > NL80211_CHAN_WIDTH_40)
index 5219280bf7ff351a742db19e6a35748f77efbeb8..161b0224d6ae9ca927269664eaded3bcce41e91f 100755 (executable)
@@ -380,6 +380,7 @@ our $Attribute      = qr{
                        __noclone|
                        __deprecated|
                        __read_mostly|
+                       __ro_after_init|
                        __kprobes|
                        $InitAttribute|
                        ____cacheline_aligned|
@@ -3311,7 +3312,7 @@ sub process {
                        # known declaration macros
                      $sline =~ /^\+\s+$declaration_macros/ ||
                        # start of struct or union or enum
-                     $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||
+                     $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||
                        # start or end of block or continuation of declaration
                      $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
                        # bitfield continuation
index 999d585eaa7358dd813b8b0d5a204fc1ea48ca3c..e083bcae343f3e71290e433b7fd50861fd819ca4 100755 (executable)
@@ -11,13 +11,14 @@ DEPMOD=$1
 KERNELRELEASE=$2
 
 if ! test -r System.map ; then
+       echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
        exit 0
 fi
 
 if [ -z $(command -v $DEPMOD) ]; then
-       echo "'make modules_install' requires $DEPMOD. Please install it." >&2
+       echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
        echo "This is probably in the kmod package." >&2
-       exit 1
+       exit 0
 fi
 
 # older versions of depmod require the version string to start with three
index 4a7bd2192073b20ca6497fa6becb18e0c15480da..67ed9f6ccdf8f09d8cb193d864950ed4e14ed9f7 100644 (file)
@@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
 
 # check if necessary packages are available, and configure build flags
 define filechk_conf_cfg
-       $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
        $(CONFIG_SHELL) $<
 endef
 
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
deleted file mode 100644 (file)
index 7a1c40b..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Check for pkg-config presence
-
-if [ -z $(command -v pkg-config) ]; then
-       echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
-       exit 1
-fi
index 533b3d8f8f08dbab20fda2b3488d342f47ede5a2..480ecd8b9f415693c0747478b2021b5e972a42bb 100755 (executable)
@@ -3,6 +3,13 @@
 
 PKG="gtk+-2.0 gmodule-2.0 libglade-2.0"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if ! pkg-config --exists $PKG; then
        echo >&2 "*"
        echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
index e6f9facd00772f3779c6072d88f174c7302aa6dc..c812872d7f9d78697d40debbe636674419e646a4 100755 (executable)
@@ -4,20 +4,23 @@
 PKG="ncursesw"
 PKG2="ncurses"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw\"
index 83b5836615fb04a9cf389a2c978b75d4f5aa4f50..143c05fec1614bafe2f5f70dec85da3849e8225e 100644 (file)
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu)
                        switch (prop->type) {
                        case P_MENU:
                                child_count++;
-                               prompt = prompt;
                                if (single_menu_mode) {
                                        item_make("%s%*c%s",
                                                  menu->data ? "-->" : "++>",
index 42f5ac73548e4ba002506f4d38db920f8368e3c7..001559ef0a60b539c8615c23d4bcc94be5eaf59f 100644 (file)
@@ -4,20 +4,23 @@
 PKG="ncursesw menuw panelw"
 PKG2="ncurses menu panel"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw -lmenuw -lpanelw\"
index 0862e15625366058f421d8b4346eb7e795d5a1e5..02ccc0ae103187a4a1f9c56bac3eca08823087b8 100755 (executable)
@@ -4,6 +4,13 @@
 PKG="Qt5Core Qt5Gui Qt5Widgets"
 PKG2="QtCore QtGui"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if pkg-config --exists $PKG; then
        echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\"
        echo libs=\"$(pkg-config --libs $PKG)\"
index fe06e77c15eb7072d1cb9c09d9481048984e03d7..f599031260d5178d10dd83554ac20d957c8b82af 100755 (executable)
@@ -389,6 +389,9 @@ if ($arch eq "x86_64") {
     $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
     $type = ".quad";
     $alignment = 2;
+} elsif ($arch eq "nds32") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$";
+    $alignment = 2;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
index 71f39410691b6be14774102000507c834c8ad493..79f7dd57d571e749dc3e36ed95a473cd599e0056 100755 (executable)
@@ -74,7 +74,7 @@ scm_version()
                fi
 
                # Check for uncommitted changes
-               if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
+               if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
                        printf '%s' -dirty
                fi
 
index f2f22d00db18893a917006794c5a09e341aae2b0..4ccec1bcf6f54f261542a546458cab77c6af9e52 100644 (file)
@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
        struct aa_label *label = aa_secid_to_label(secid);
        int len;
 
-       AA_BUG(!secdata);
        AA_BUG(!seclen);
 
        if (!label)
index 711e89d8c4153b6123678772e83efe1092f4ae1f..3b602a1e27fa224e3a7628436ac6f93309a54c4a 100644 (file)
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
        }
        dh_inputs.g_size = dlen;
 
-       dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+       dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
        if (dlen < 0) {
                ret = dlen;
                goto out2;
index 69517e18ef07fc241d18335e7f209a2b1ded96d1..08d5662039e38123e349bc5352c2b674280048f3 100644 (file)
@@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
                runtime->avail = 0;
        else
                runtime->avail = runtime->buffer_size;
-       runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL);
+       runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL);
        if (!runtime->buffer) {
                kfree(runtime);
                return -ENOMEM;
@@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
        if (params->avail_min < 1 || params->avail_min > params->buffer_size)
                return -EINVAL;
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = kvmalloc(params->buffer_size, GFP_KERNEL);
+               newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
                spin_lock_irq(&runtime->lock);
index 1bd27576db98d50cc12382c209c3938fe9fe5e14..a835558ddbc9b560a5e456a1594693350263c3d9 100644 (file)
@@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple);
  */
 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream)
 {
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start);
 
@@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream)
 
        snd_hdac_ext_link_stream_clear(stream);
 
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST);
        udelay(3);
        timeout = 50;
        do {
@@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id);
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
                                 int stream)
 {
-       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream));
+       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id);
 
@@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
                                 bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->spbcap) {
                dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask);
+               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask);
        else
                snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
 }
@@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
                                bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->drsmcap) {
                dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask);
+               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask);
        else
                snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
 }
index 0a50855370348e37a52a2788463bc0a5ead4a5d1..26d348b47867d6d595aa8701a3cd779bf01d6204 100644 (file)
@@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
 
        list_for_each_codec(codec, bus) {
                /* FIXME: maybe a better way needed for forced reset */
-               cancel_delayed_work_sync(&codec->jackpoll_work);
+               if (current_work() != &codec->jackpoll_work.work)
+                       cancel_delayed_work_sync(&codec->jackpoll_work);
 #ifdef CONFIG_PM
                if (hda_codec_is_power_on(codec)) {
                        hda_call_codec_suspend(codec);
index b2ec20e562bd5a3b0adf77b5f40be365bf255efa..b455930a3eaf7fd9095e116cfd9536e621a5cd1f 100644 (file)
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = {
        [BPF_MAP_TYPE_DEVMAP]           = "devmap",
        [BPF_MAP_TYPE_SOCKMAP]          = "sockmap",
        [BPF_MAP_TYPE_CPUMAP]           = "cpumap",
+       [BPF_MAP_TYPE_XSKMAP]           = "xskmap",
        [BPF_MAP_TYPE_SOCKHASH]         = "sockhash",
        [BPF_MAP_TYPE_CGROUP_STORAGE]   = "cgroup_storage",
 };
index 56c4b3f8a01beaa414e2526020dcfcfbfc142b9b..439b8a27488d371fe323f879ba225650df23a359 100755 (executable)
@@ -759,12 +759,18 @@ class DebugfsProvider(Provider):
             if len(vms) == 0:
                 self.do_read = False
 
-            self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
+            self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
 
         else:
             self.paths = []
             self.do_read = True
-        self.reset()
+
+    def _verify_paths(self):
+        """Remove invalid paths"""
+        for path in self.paths:
+            if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
+                self.paths.remove(path)
+                continue
 
     def read(self, reset=0, by_guest=0):
         """Returns a dict with format:'file name / field -> current value'.
@@ -780,6 +786,7 @@ class DebugfsProvider(Provider):
         # If no debugfs filtering support is available, then don't read.
         if not self.do_read:
             return results
+        self._verify_paths()
 
         paths = self.paths
         if self._pid == 0:
@@ -1096,15 +1103,16 @@ class Tui(object):
             pid = self.stats.pid_filter
         self.screen.erase()
         gname = self.get_gname_from_pid(pid)
+        self._gname = gname
         if gname:
             gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
                                    if len(gname) > MAX_GUEST_NAME_LEN
                                    else gname))
         if pid > 0:
-            self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}'
-                               .format(pid, gname), curses.A_BOLD)
+            self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname)
         else:
-            self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+            self._headline = 'kvm statistics - summary'
+        self.screen.addstr(0, 0, self._headline, curses.A_BOLD)
         if self.stats.fields_filter:
             regex = self.stats.fields_filter
             if len(regex) > MAX_REGEX_LEN:
@@ -1162,6 +1170,19 @@ class Tui(object):
 
             return sorted_items
 
+        if not self._is_running_guest(self.stats.pid_filter):
+            if self._gname:
+                try: # ...to identify the guest by name in case it's back
+                    pids = self.get_pid_from_gname(self._gname)
+                    if len(pids) == 1:
+                        self._refresh_header(pids[0])
+                        self._update_pid(pids[0])
+                        return
+                except:
+                    pass
+            self._display_guest_dead()
+            # leave final data on screen
+            return
         row = 3
         self.screen.move(row, 0)
         self.screen.clrtobot()
@@ -1184,6 +1205,7 @@ class Tui(object):
         # print events
         tavg = 0
         tcur = 0
+        guest_removed = False
         for key, values in get_sorted_events(self, stats):
             if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
                 break
@@ -1191,7 +1213,10 @@ class Tui(object):
                 key = self.get_gname_from_pid(key)
                 if not key:
                     continue
-            cur = int(round(values.delta / sleeptime)) if values.delta else ''
+            cur = int(round(values.delta / sleeptime)) if values.delta else 0
+            if cur < 0:
+                guest_removed = True
+                continue
             if key[0] != ' ':
                 if values.delta:
                     tcur += values.delta
@@ -1204,13 +1229,21 @@ class Tui(object):
                                values.value * 100 / float(ltotal), cur))
             row += 1
         if row == 3:
-            self.screen.addstr(4, 1, 'No matching events reported yet')
+            if guest_removed:
+                self.screen.addstr(4, 1, 'Guest removed, updating...')
+            else:
+                self.screen.addstr(4, 1, 'No matching events reported yet')
         if row > 4:
             tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
             self.screen.addstr(row, 1, '%-40s %10d        %8s' %
                                ('Total', total, tavg), curses.A_BOLD)
         self.screen.refresh()
 
+    def _display_guest_dead(self):
+        marker = '   Guest is DEAD   '
+        y = min(len(self._headline), 80 - len(marker))
+        self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT)
+
     def _show_msg(self, text):
         """Display message centered text and exit on key press"""
         hint = 'Press any key to continue'
@@ -1219,10 +1252,10 @@ class Tui(object):
         (x, term_width) = self.screen.getmaxyx()
         row = 2
         for line in text:
-            start = (term_width - len(line)) / 2
+            start = (term_width - len(line)) // 2
             self.screen.addstr(row, start, line)
             row += 1
-        self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
+        self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
                            curses.A_STANDOUT)
         self.screen.getkey()
 
@@ -1319,6 +1352,12 @@ class Tui(object):
                 msg = '"' + str(val) + '": Invalid value'
         self._refresh_header()
 
+    def _is_running_guest(self, pid):
+        """Check if pid is still a running process."""
+        if not pid:
+            return True
+        return os.path.isdir(os.path.join('/proc/', str(pid)))
+
     def _show_vm_selection_by_guest(self):
         """Draws guest selection mask.
 
@@ -1346,7 +1385,7 @@ class Tui(object):
             if not guest or guest == '0':
                 break
             if guest.isdigit():
-                if not os.path.isdir(os.path.join('/proc/', guest)):
+                if not self._is_running_guest(guest):
                     msg = '"' + guest + '": Not a running process'
                     continue
                 pid = int(guest)
index f8cc38afffa2e6a35dd316a40e2493356c16063c..32a194e3e07a5a60d4c00dfdcfd25ad31ef38b6e 100755 (executable)
@@ -46,6 +46,9 @@
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
+# Some systems don't have a ping6 binary anymore
+which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
+
 tests="
        pmtu_vti6_exception             vti6: PMTU exceptions
        pmtu_vti4_exception             vti4: PMTU exceptions
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() {
        mtu "${ns_b}" veth_b 4000
        mtu "${ns_a}" vti6_a 5000
        mtu "${ns_b}" vti6_b 5000
-       ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
+       ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
 
        # Check that exception was created
        if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() {
        fail=0
 
        min=68
-       max=$((65528 - 20))
+       max=$((65535 - 20))
        # Check invalid values first
        for v in $((min - 1)) $((max + 1)); do
                ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
index f03763d816172e9a825ca896f0f3733e2109d503..30f9b54bd66689094d2556c354373cfc186a1932 100644 (file)
             "$TC actions flush action police"
         ]
     },
+    {
+        "id": "6aaf",
+        "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action police index 1",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
+    {
+        "id": "29b1",
+        "name": "Add police actions with conform-exceed control <invalid>/drop",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action police",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
     {
         "id": "c26f",
         "name": "Add police action with invalid peakrate value",
index 30cb0a0713fff9c63a158129cc5e59c115cb4e88..37908a83ddc27eac33d40f76ea869edf8bdd957d 100644 (file)
@@ -159,12 +159,6 @@ static const char * const page_flag_names[] = {
 };
 
 
-static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug",
-       "/debug",
-       0,
-};
-
 /*
  * data structures
  */
index f82c2eaa859d11fa755e8809dd9fce52af95fdec..334b16db0ebbe99b82855c80e641986c4f93edc8 100644 (file)
@@ -30,8 +30,8 @@ struct slabinfo {
        int alias;
        int refs;
        int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-       int hwcache_align, object_size, objs_per_slab;
-       int sanity_checks, slab_size, store_user, trace;
+       unsigned int hwcache_align, object_size, objs_per_slab;
+       unsigned int sanity_checks, slab_size, store_user, trace;
        int order, poison, reclaim_account, red_zone;
        unsigned long partial, objects, slabs, objects_partial, objects_total;
        unsigned long alloc_fastpath, alloc_slowpath;
index 91aaf73b00df8a385a28c2a221505b6ba82062b1..ed162a6c57c597d89a7f08600b84a4425feb0b13 100644 (file)
@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
        return 0;
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       unsigned long end = hva + PAGE_SIZE;
-
-       if (!kvm->arch.pgd)
-               return 0;
-
-       trace_kvm_unmap_hva(hva);
-       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-       return 0;
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end)
 {
@@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
        unsigned long end = hva + PAGE_SIZE;
+       kvm_pfn_t pfn = pte_pfn(pte);
        pte_t stage2_pte;
 
        if (!kvm->arch.pgd)
                return;
 
        trace_kvm_set_spte_hva(hva);
-       stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+
+       /*
+        * We've moved a page around, probably through CoW, so let's treat it
+        * just like a translation fault and clean the cache to the PoC.
+        */
+       clean_dcache_guest_page(pfn, PAGE_SIZE);
+       stage2_pte = pfn_pte(pfn, PAGE_S2);
        handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
 }
 
index e53b596f483b99f0a7f9d897938b30f47a079316..57b3edebbb4043e7cbeef0f834ccc2a1848caf78 100644 (file)
@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
                  __entry->vcpu_pc, __entry->instr, __entry->cpsr)
 );
 
-TRACE_EVENT(kvm_unmap_hva,
-       TP_PROTO(unsigned long hva),
-       TP_ARGS(hva),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  hva             )
-       ),
-
-       TP_fast_assign(
-               __entry->hva            = hva;
-       ),
-
-       TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
 TRACE_EVENT(kvm_unmap_hva_range,
        TP_PROTO(unsigned long start, unsigned long end),
        TP_ARGS(start, end),