Merge tag 'mmc-v4.14-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Oct 2017 10:52:05 +0000 (06:52 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Oct 2017 10:52:05 +0000 (06:52 -0400)
Pull MMC fix from Ulf Hansson:
 "sdhci-pci: Fix default d3_retune for Intel host controllers"

* tag 'mmc-v4.14-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: sdhci-pci: Fix default d3_retune for Intel host controllers

335 files changed:
Documentation/ABI/testing/sysfs-kernel-mm-swap
Documentation/core-api/kernel-api.rst
Documentation/networking/bonding.txt
Documentation/process/index.rst
Documentation/process/kernel-enforcement-statement.rst [new file with mode: 0644]
MAINTAINERS
Makefile
arch/Kconfig
arch/mips/include/asm/cmpxchg.h
arch/mips/loongson32/common/platform.c
arch/mips/math-emu/cp1emu.c
arch/mips/net/ebpf_jit.c
arch/mips/tools/generic-board-config.sh
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/time.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/trace/ftrace_64_mprofile.S
arch/powerpc/lib/sstep.c
arch/powerpc/mm/numa.c
arch/powerpc/perf/imc-pmu.c
arch/s390/configs/zfcpdump_defconfig
arch/s390/kernel/smp.c
arch/sparc/Kconfig
arch/x86/entry/entry_32.S
arch/x86/events/intel/uncore.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/mmu.c
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/kprobes/common.h
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/reboot.c
arch/x86/kernel/unwind_frame.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/mm/Makefile
arch/x86/mm/tlb.c
arch/x86/xen/enlighten.c
block/bio.c
crypto/asymmetric_keys/asymmetric_type.c
crypto/asymmetric_keys/pkcs7_parser.c
crypto/shash.c
crypto/skcipher.c
crypto/xts.c
drivers/acpi/property.c
drivers/android/binder.c
drivers/base/node.c
drivers/base/property.c
drivers/block/nbd.c
drivers/block/skd_main.c
drivers/crypto/axis/artpec6_crypto.c
drivers/crypto/stm32/stm32-hash.c
drivers/dma-buf/sync_file.c
drivers/dma/altera-msgdma.c
drivers/dma/edma.c
drivers/dma/ti-dma-crossbar.c
drivers/gpio/Kconfig
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/gpu/ipu-v3/ipu-prg.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-elecom.c
drivers/hid/hid-ids.h
drivers/hid/usbhid/hid-core.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/vmbus_drv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-piix4.c
drivers/iommu/amd_iommu.c
drivers/iommu/exynos-iommu.c
drivers/media/cec/cec-adap.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-frontends/dib3000mc.c
drivers/media/dvb-frontends/dvb-pll.c
drivers/media/platform/Kconfig
drivers/media/platform/qcom/camss-8x16/camss-vfe.c
drivers/media/platform/qcom/venus/helpers.c
drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
drivers/media/platform/s5p-cec/s5p_cec.c
drivers/media/platform/s5p-cec/s5p_cec.h
drivers/media/tuners/mt2060.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/cdc_ether.c
drivers/of/base.c
drivers/of/of_reserved_mem.c
drivers/of/property.c
drivers/pci/host/pci-aardvark.c
drivers/pci/host/pci-tegra.c
drivers/pinctrl/Kconfig
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/ras/cec.c
drivers/remoteproc/Kconfig
drivers/remoteproc/imx_rproc.c
drivers/rpmsg/qcom_glink_native.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/staging/media/imx/imx-media-dev.c
drivers/tty/tty_ldisc.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/configfs.h
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/u_rndis.h
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/misc/usbtest.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
fs/9p/vfs_addr.c
fs/binfmt_misc.c
fs/block_dev.c
fs/crypto/keyinfo.c
fs/direct-io.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/keystore.c
fs/exec.c
fs/f2fs/f2fs.h
fs/f2fs/segment.c
fs/f2fs/super.c
fs/fscache/object-list.c
fs/iomap.c
fs/mpage.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/nfs4idmap.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfsd/nfs4proc.c
fs/quota/dquot.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/xfs_acl.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_file.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_log.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_ondisk.h
include/linux/bpf.h
include/linux/buffer_head.h
include/linux/hyperv.h
include/linux/kernel.h
include/linux/key.h
include/linux/mm_types.h
include/linux/netfilter_bridge/ebtables.h
include/linux/of.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/sched/mm.h
include/linux/sched/topology.h
include/linux/srcu.h
include/linux/thread_info.h
include/sound/control.h
include/sound/seq_virmidi.h
include/uapi/linux/membarrier.h
include/uapi/linux/netfilter/xt_bpf.h
kernel/bpf/inode.c
kernel/bpf/verifier.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/irq/chip.c
kernel/irq/cpuhotplug.c
kernel/irq/manage.c
kernel/livepatch/core.c
kernel/locking/lockdep.c
kernel/rcu/srcutree.c
kernel/rcu/sync.c
kernel/rcu/tree.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/membarrier.c
kernel/seccomp.c
lib/Kconfig.debug
lib/digsig.c
lib/locking-selftest.c
mm/cma.c
mm/madvise.c
mm/mempolicy.c
mm/migrate.c
mm/page_vma_mapped.c
mm/swap_state.c
mm/vmalloc.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/ebtables.c
net/dns_resolver/dns_key.c
net/ipv4/gre_offload.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/ip6_offload.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/route.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_tables_api.c
net/netfilter/x_tables.c
net/netfilter/xt_bpf.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/sunrpc/xprtsock.c
net/tipc/bcast.c
net/tipc/msg.c
net/wireless/nl80211.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/trace_events/trace-events-sample.c
scripts/faddr2line
scripts/kallsyms.c
security/commoncap.c
security/keys/Kconfig
security/keys/big_key.c
security/keys/encrypted-keys/encrypted.c
security/keys/gc.c
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/permission.c
security/keys/proc.c
security/keys/process_keys.c
security/keys/request_key.c
security/keys/request_key_auth.c
security/keys/trusted.c
security/keys/user_defined.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_lock.c
sound/core/seq/seq_lock.h
sound/core/seq/seq_ports.c
sound/core/seq/seq_virmidi.c
sound/core/vmaster.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_codec.c
sound/usb/caiaq/device.c
sound/usb/line6/driver.c
sound/usb/line6/podhd.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/quirks.c
tools/include/uapi/linux/bpf.h
tools/perf/builtin-script.c
tools/perf/util/callchain.c
tools/perf/util/parse-events.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/mqueue/Makefile
tools/testing/selftests/networking/timestamping/rxtimestamp.c
tools/testing/selftests/vm/userfaultfd.c
tools/testing/selftests/x86/Makefile

index 587db52084c7c21a6db250dffd9b794859d7466a..94672016c26810799328ee6a56bd7f66bf7ef73e 100644 (file)
@@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
                still used for tmpfs etc. other users.  If set to
                false, the global swap readahead algorithm will be
                used for all swappable pages.
-
-What:          /sys/kernel/mm/swap/vma_ra_max_order
-Date:          August 2017
-Contact:       Linux memory management mailing list <linux-mm@kvack.org>
-Description:   The max readahead size in order for VMA based swap readahead
-
-               VMA based swap readahead algorithm will readahead at
-               most 1 << max_order pages for each readahead.  The
-               real readahead size for each readahead will be scaled
-               according to the estimation algorithm.
index 8282099e0cbf446bbebaaf63868ca04610999408..5da10184d9084a77c15e42b56f76a4571193344a 100644 (file)
@@ -352,44 +352,30 @@ Read-Copy Update (RCU)
 ----------------------
 
 .. kernel-doc:: include/linux/rcupdate.h
-   :external:
 
 .. kernel-doc:: include/linux/rcupdate_wait.h
-   :external:
 
 .. kernel-doc:: include/linux/rcutree.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree.c
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree_plugin.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/tree_exp.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/update.c
-   :external:
 
 .. kernel-doc:: include/linux/srcu.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/srcutree.c
-   :external:
 
 .. kernel-doc:: include/linux/rculist_bl.h
-   :external:
 
 .. kernel-doc:: include/linux/rculist.h
-   :external:
 
 .. kernel-doc:: include/linux/rculist_nulls.h
-   :external:
 
 .. kernel-doc:: include/linux/rcu_sync.h
-   :external:
 
 .. kernel-doc:: kernel/rcu/sync.c
-   :external:
 
index 57f52cdce32e42c9d170e8e475a02e8a08e1f454..9ba04c0bab8db6e1a74947770a028ebca43e1651 100644 (file)
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
        and packet type ID), so in a "gatewayed" configuration, all
        outgoing traffic will generally use the same device.  Incoming
        traffic may also end up on a single device, but that is
-       dependent upon the balancing policy of the peer's 8023.ad
+       dependent upon the balancing policy of the peer's 802.3ad
        implementation.  In a "local" configuration, traffic will be
        distributed across the devices in the bond.
 
index 82fc399fcd33d1628289ec5ccfeb3193a368c2fc..61e43cc3ed171e2371b6372609533dc16fc9b057 100644 (file)
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
    submitting-patches
    coding-style
    email-clients
+   kernel-enforcement-statement
 
 Other guides to the community that are of interest to most developers are: 
 
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
new file mode 100644 (file)
index 0000000..1e23d42
--- /dev/null
@@ -0,0 +1,147 @@
+Linux Kernel Enforcement Statement
+----------------------------------
+
+As developers of the Linux kernel, we have a keen interest in how our software
+is used and how the license for our software is enforced.  Compliance with the
+reciprocal sharing obligations of GPL-2.0 is critical to the long-term
+sustainability of our software and community.
+
+Although there is a right to enforce the separate copyright interests in the
+contributions made to our community, we share an interest in ensuring that
+individual enforcement actions are conducted in a manner that benefits our
+community and do not have an unintended negative impact on the health and
+growth of our software ecosystem.  In order to deter unhelpful enforcement
+actions, we agree that it is in the best interests of our development
+community to undertake the following commitment to users of the Linux kernel
+on behalf of ourselves and any successors to our copyright interests:
+
+    Notwithstanding the termination provisions of the GPL-2.0, we agree that
+    it is in the best interests of our development community to adopt the
+    following provisions of GPL-3.0 as additional permissions under our
+    license with respect to any non-defensive assertion of rights under the
+    license.
+
+       However, if you cease all violation of this License, then your license
+       from a particular copyright holder is reinstated (a) provisionally,
+       unless and until the copyright holder explicitly and finally
+       terminates your license, and (b) permanently, if the copyright holder
+       fails to notify you of the violation by some reasonable means prior to
+       60 days after the cessation.
+
+       Moreover, your license from a particular copyright holder is
+       reinstated permanently if the copyright holder notifies you of the
+       violation by some reasonable means, this is the first time you have
+       received notice of violation of this License (for any work) from that
+       copyright holder, and you cure the violation prior to 30 days after
+       your receipt of the notice.
+
+Our intent in providing these assurances is to encourage more use of the
+software.  We want companies and individuals to use, modify and distribute
+this software.  We want to work with users in an open and transparent way to
+eliminate any uncertainty about our expectations regarding compliance or
+enforcement that might limit adoption of our software.  We view legal action
+as a last resort, to be initiated only when other community efforts have
+failed to resolve the problem.
+
+Finally, once a non-compliance issue is resolved, we hope the user will feel
+welcome to join us in our efforts on this project.  Working together, we will
+be stronger.
+
+Except where noted below, we speak only for ourselves, and not for any company
+we might work for today, have in the past, or will in the future.
+
+  - Bjorn Andersson (Linaro)
+  - Andrea Arcangeli (Red Hat)
+  - Neil Armstrong
+  - Jens Axboe
+  - Pablo Neira Ayuso
+  - Khalid Aziz
+  - Ralf Baechle
+  - Felipe Balbi
+  - Arnd Bergmann
+  - Ard Biesheuvel
+  - Paolo Bonzini (Red Hat)
+  - Christian Borntraeger
+  - Mark Brown (Linaro)
+  - Paul Burton
+  - Javier Martinez Canillas
+  - Rob Clark
+  - Jonathan Corbet
+  - Vivien Didelot (Savoir-faire Linux)
+  - Hans de Goede (Red Hat)
+  - Mel Gorman (SUSE)
+  - Sven Eckelmann
+  - Alex Elder (Linaro)
+  - Fabio Estevam
+  - Larry Finger
+  - Bhumika Goyal
+  - Andy Gross
+  - Juergen Gross
+  - Shawn Guo
+  - Ulf Hansson
+  - Tejun Heo
+  - Rob Herring
+  - Masami Hiramatsu
+  - Michal Hocko
+  - Simon Horman
+  - Johan Hovold (Hovold Consulting AB)
+  - Christophe JAILLET
+  - Olof Johansson
+  - Lee Jones (Linaro)
+  - Heiner Kallweit
+  - Srinivas Kandagatla
+  - Jan Kara
+  - Shuah Khan (Samsung)
+  - David Kershner
+  - Jaegeuk Kim
+  - Namhyung Kim
+  - Colin Ian King
+  - Jeff Kirsher
+  - Greg Kroah-Hartman (Linux Foundation)
+  - Christian König
+  - Vinod Koul
+  - Krzysztof Kozlowski
+  - Viresh Kumar
+  - Aneesh Kumar K.V
+  - Julia Lawall
+  - Doug Ledford (Red Hat)
+  - Chuck Lever (Oracle)
+  - Daniel Lezcano
+  - Shaohua Li
+  - Xin Long (Red Hat)
+  - Tony Luck
+  - Mike Marshall
+  - Chris Mason
+  - Paul E. McKenney
+  - David S. Miller
+  - Ingo Molnar
+  - Kuninori Morimoto
+  - Borislav Petkov
+  - Jiri Pirko
+  - Josh Poimboeuf
+  - Sebastian Reichel (Collabora)
+  - Guenter Roeck
+  - Joerg Roedel
+  - Leon Romanovsky
+  - Steven Rostedt (VMware)
+  - Ivan Safonov
+  - Ivan Safonov
+  - Anna Schumaker
+  - Jes Sorensen
+  - K.Y. Srinivasan
+  - Heiko Stuebner
+  - Jiri Kosina (SUSE)
+  - Dmitry Torokhov
+  - Linus Torvalds
+  - Thierry Reding
+  - Rik van Riel
+  - Geert Uytterhoeven (Glider bvba)
+  - Daniel Vetter
+  - Linus Walleij
+  - Richard Weinberger
+  - Dan Williams
+  - Rafael J. Wysocki
+  - Arvind Yadav
+  - Masahiro Yamada
+  - Wei Yongjun
+  - Lv Zheng
index 2d3d750b19c0cddf8af39786faed78709f7bbba1..a74227ad082ee84db1f0a0a693d5fe62a007d36a 100644 (file)
@@ -5346,9 +5346,7 @@ M:        "J. Bruce Fields" <bfields@fieldses.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     include/linux/fcntl.h
-F:     include/linux/fs.h
 F:     include/uapi/linux/fcntl.h
-F:     include/uapi/linux/fs.h
 F:     fs/fcntl.c
 F:     fs/locks.c
 
@@ -5357,6 +5355,8 @@ M:        Alexander Viro <viro@zeniv.linux.org.uk>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     fs/*
+F:     include/linux/fs.h
+F:     include/uapi/linux/fs.h
 
 FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:     Riku Voipio <riku.voipio@iki.fi>
@@ -7571,7 +7571,7 @@ F:        arch/mips/include/asm/kvm*
 F:     arch/mips/kvm/
 
 KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
-M:     Alexander Graf <agraf@suse.com>
+M:     Paul Mackerras <paulus@ozlabs.org>
 L:     kvm-ppc@vger.kernel.org
 W:     http://www.linux-kvm.org/
 T:     git git://github.com/agraf/linux-2.6.git
index 2835863bdd5a44d75e4b48820caedcfb5ee31ef2..46bfb0ed22570cf7f1736eeb7ffd10e500c923e4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
-    $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+    ifdef CONFIG_ORC_UNWINDER
+      $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+    else
+      $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+    endif
     SKIP_STACK_VALIDATION := 1
     export SKIP_STACK_VALIDATION
   endif
index 1aafb4efbb51dfea8c5639f2bd68379c930825d4..d789a89cb32c450fa436bac9e4b99171e3c9cc5c 100644 (file)
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
          and non-text memory will be made non-executable. This provides
          protection against certain security exploits (e.g. writing to text)
 
-config ARCH_WANT_RELAX_ORDER
-       bool
-
 config ARCH_HAS_REFCOUNT
        bool
        help
index 903f3bf48419cb917dfc15af9013674e45c23017..7e25c5cc353a8223c29ed546772ee453b994c529 100644 (file)
@@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
                return __cmpxchg_small(ptr, old, new, size);
 
        case 4:
-               return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
+               return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
+                                    (u32)old, new);
 
        case 8:
                /* lld/scd are only available for MIPS64 */
                if (!IS_ENABLED(CONFIG_64BIT))
                        return __cmpxchg_called_with_bad_pointer();
 
-               return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
+               return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
+                                    (u64)old, new);
 
        default:
                return __cmpxchg_called_with_bad_pointer();
index 100f23dfa4384495dc4868ee0b921f6963188729..ac584c5823d08666c40c38144ad33289cc9302ae 100644 (file)
@@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
 }
 
 static struct plat_stmmacenet_data ls1x_eth0_pdata = {
-       .bus_id         = 0,
-       .phy_addr       = -1,
+       .bus_id                 = 0,
+       .phy_addr               = -1,
 #if defined(CONFIG_LOONGSON1_LS1B)
-       .interface      = PHY_INTERFACE_MODE_MII,
+       .interface              = PHY_INTERFACE_MODE_MII,
 #elif defined(CONFIG_LOONGSON1_LS1C)
-       .interface      = PHY_INTERFACE_MODE_RMII,
+       .interface              = PHY_INTERFACE_MODE_RMII,
 #endif
-       .mdio_bus_data  = &ls1x_mdio_bus_data,
-       .dma_cfg        = &ls1x_eth_dma_cfg,
-       .has_gmac       = 1,
-       .tx_coe         = 1,
-       .init           = ls1x_eth_mux_init,
+       .mdio_bus_data          = &ls1x_mdio_bus_data,
+       .dma_cfg                = &ls1x_eth_dma_cfg,
+       .has_gmac               = 1,
+       .tx_coe                 = 1,
+       .rx_queues_to_use       = 1,
+       .tx_queues_to_use       = 1,
+       .init                   = ls1x_eth_mux_init,
 };
 
 static struct resource ls1x_eth0_resources[] = {
@@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
 
 #ifdef CONFIG_LOONGSON1_LS1B
 static struct plat_stmmacenet_data ls1x_eth1_pdata = {
-       .bus_id         = 1,
-       .phy_addr       = -1,
-       .interface      = PHY_INTERFACE_MODE_MII,
-       .mdio_bus_data  = &ls1x_mdio_bus_data,
-       .dma_cfg        = &ls1x_eth_dma_cfg,
-       .has_gmac       = 1,
-       .tx_coe         = 1,
-       .init           = ls1x_eth_mux_init,
+       .bus_id                 = 1,
+       .phy_addr               = -1,
+       .interface              = PHY_INTERFACE_MODE_MII,
+       .mdio_bus_data          = &ls1x_mdio_bus_data,
+       .dma_cfg                = &ls1x_eth_dma_cfg,
+       .has_gmac               = 1,
+       .tx_coe                 = 1,
+       .rx_queues_to_use       = 1,
+       .tx_queues_to_use       = 1,
+       .init                   = ls1x_eth_mux_init,
 };
 
 static struct resource ls1x_eth1_resources[] = {
index 192542dbd9724788838a74cb5ed4b8404b0d83fc..16d9ef5a78c57086c5c5db3f518b5bb4c7b1f0b9 100644 (file)
@@ -2558,7 +2558,6 @@ dcopuop:
                                        break;
                                default:
                                        /* Reserved R6 ops */
-                                       pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
                                        return SIGILL;
                                }
                        }
@@ -2719,7 +2718,6 @@ dcopuop:
                                        break;
                                default:
                                        /* Reserved R6 ops */
-                                       pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
                                        return SIGILL;
                                }
                        }
index 7646891c4e9b18077d59ac28bfe2ffe5b82d0d37..01b7a87ea67866a19c86019ab95654185d74e84d 100644 (file)
@@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 {
        int src, dst, r, td, ts, mem_off, b_off;
        bool need_swap, did_move, cmp_eq;
-       unsigned int target;
+       unsigned int target = 0;
        u64 t64;
        s64 t64s;
        int bpf_op = BPF_OP(insn->code);
index 5c4f936870391195da1d8de70d789650fc11acb5..654d652d7fa13adeac15390629d155597ee15296 100755 (executable)
@@ -30,8 +30,6 @@ cfg="$4"
 boards_origin="$5"
 shift 5
 
-cd "${srctree}"
-
 # Only print Skipping... lines if the user explicitly specified BOARDS=. In the
 # general case it only serves to obscure the useful output about what actually
 # was included.
@@ -48,7 +46,7 @@ environment*)
 esac
 
 for board in $@; do
-       board_cfg="arch/mips/configs/generic/board-${board}.config"
+       board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
        if [ ! -f "${board_cfg}" ]; then
                echo "WARNING: Board config '${board_cfg}' not found"
                continue
@@ -84,7 +82,7 @@ for board in $@; do
        done || continue
 
        # Merge this board config fragment into our final config file
-       ./scripts/kconfig/merge_config.sh \
+       ${srctree}/scripts/kconfig/merge_config.sh \
                -m -O ${objtree} ${cfg} ${board_cfg} \
                | grep -Ev '^(#|Using)'
 done
index c6d6272a934f03823b655cf07b38e7bbc01ca12e..7baa2265d43927fd7e5a24269e627486c60c6b35 100644 (file)
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(__xchg8);
 EXPORT_SYMBOL(__xchg32);
 EXPORT_SYMBOL(__cmpxchg_u32);
+EXPORT_SYMBOL(__cmpxchg_u64);
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(__atomic_hash);
 #endif
 #ifdef CONFIG_64BIT
 EXPORT_SYMBOL(__xchg64);
-EXPORT_SYMBOL(__cmpxchg_u64);
 #endif
 
 #include <linux/uaccess.h>
index 23de307c3052aa9ecac21fd6c294657fb53de447..41e60a9c7db23b8384b18bf8ddd45f188ef4a0c3 100644 (file)
@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
 10:    ldd     0(%r25), %r25
 11:    ldd     0(%r24), %r24
 #else
-       /* Load new value into r22/r23 - high/low */
+       /* Load old value into r22/r23 - high/low */
 10:    ldw     0(%r25), %r22
 11:    ldw     4(%r25), %r23
        /* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@ cas2_action:
        copy    %r0, %r28
 #else
        /* Compare first word */
-19:    ldw,ma  0(%r26), %r29
+19:    ldw     0(%r26), %r29
        sub,=   %r29, %r22, %r0
        b,n     cas2_end
        /* Compare second word */
-20:    ldw,ma  4(%r26), %r29
+20:    ldw     4(%r26), %r29
        sub,=   %r29, %r23, %r0
        b,n     cas2_end
        /* Perform the store */
index 2d956aa0a38abbc3829757bab4749dd6a0037490..8c0105a49839cf018a80108f76dadccb5e793ee1 100644 (file)
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
                cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
 
                for_each_online_cpu(cpu) {
-                       if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
+                       if (cpu == 0)
+                               continue;
+                       if ((cpu0_loc != 0) &&
+                           (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
                                continue;
 
                        clocksource_cr16.name = "cr16_unstable";
index 48da0f5d2f7fe0a4745bce864795df16d612a89e..b82586c535604158986bc1d3c1abb481ff736dd6 100644 (file)
@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
 EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
 TRAMP_KVM(PACA_EXGEN, 0x700)
 EXC_COMMON_BEGIN(program_check_common)
-       EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+       /*
+        * It's possible to receive a TM Bad Thing type program check with
+        * userspace register values (in particular r1), but with SRR1 reporting
+        * that we came from the kernel. Normally that would confuse the bad
+        * stack logic, and we would report a bad kernel stack pointer. Instead
+        * we switch to the emergency stack if we're taking a TM Bad Thing from
+        * the kernel.
+        */
+       li      r10,MSR_PR              /* Build a mask of MSR_PR ..    */
+       oris    r10,r10,0x200000@h      /* .. and SRR1_PROGTM           */
+       and     r10,r10,r12             /* Mask SRR1 with that.         */
+       srdi    r10,r10,8               /* Shift it so we can compare   */
+       cmpldi  r10,(0x200000 >> 8)     /* .. with an immediate.        */
+       bne 1f                          /* If != go to normal path.     */
+
+       /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack  */
+       andi.   r10,r12,MSR_PR;         /* Set CR0 correctly for label  */
+                                       /* 3 in EXCEPTION_PROLOG_COMMON */
+       mr      r10,r1                  /* Save r1                      */
+       ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
+       subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
+       b 3f                            /* Jump into the macro !!       */
+1:     EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
index c83c115858c1909218c5897a23f15f0bbd7b10ed..b2c002993d78d340db6ef882d5eeec6692577475 100644 (file)
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
        if (MSR_TM_RESV(msr))
                return -EINVAL;
 
-       /* pull in MSR TM from user context */
+       /* pull in MSR TS bits from user context */
        regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
 
+       /*
+        * Ensure that TM is enabled in regs->msr before we leave the signal
+        * handler. It could be the case that (a) user disabled the TM bit
+        * through the manipulation of the MSR bits in uc_mcontext or (b) the
+        * TM bit was disabled because a sufficient number of context switches
+        * happened whilst in the signal handler and load_tm overflowed,
+        * disabling the TM bit. In either case we can end up with an illegal
+        * TM state leading to a TM Bad Thing when we return to userspace.
+        */
+       regs->msr |= MSR_TM;
+
        /* pull in MSR LE from user context */
        regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
index c98e90b4ea7b1f15a2dd7157300376e370774cf2..b4e2b7165f79b0d45da686ea8fe77da4dca109ca 100644 (file)
@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
         *  - we have no stack frame and can not allocate one
         *  - LR points back to the original caller (in A)
         *  - CTR holds the new NIP in C
-        *  - r0 & r12 are free
-        *
-        * r0 can't be used as the base register for a DS-form load or store, so
-        * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+        *  - r0, r11 & r12 are free
         */
 livepatch_handler:
        CURRENT_THREAD_INFO(r12, r1)
 
-       /* Save stack pointer into r0 */
-       mr      r0, r1
-
        /* Allocate 3 x 8 bytes */
-       ld      r1, TI_livepatch_sp(r12)
-       addi    r1, r1, 24
-       std     r1, TI_livepatch_sp(r12)
+       ld      r11, TI_livepatch_sp(r12)
+       addi    r11, r11, 24
+       std     r11, TI_livepatch_sp(r12)
 
        /* Save toc & real LR on livepatch stack */
-       std     r2,  -24(r1)
+       std     r2,  -24(r11)
        mflr    r12
-       std     r12, -16(r1)
+       std     r12, -16(r11)
 
        /* Store stack end marker */
        lis     r12, STACK_END_MAGIC@h
        ori     r12, r12, STACK_END_MAGIC@l
-       std     r12, -8(r1)
-
-       /* Restore real stack pointer */
-       mr      r1, r0
+       std     r12, -8(r11)
 
        /* Put ctr in r12 for global entry and branch there */
        mfctr   r12
@@ -216,36 +207,30 @@ livepatch_handler:
 
        /*
         * Now we are returning from the patched function to the original
-        * caller A. We are free to use r0 and r12, and we can use r2 until we
+        * caller A. We are free to use r11, r12 and we can use r2 until we
         * restore it.
         */
 
        CURRENT_THREAD_INFO(r12, r1)
 
-       /* Save stack pointer into r0 */
-       mr      r0, r1
-
-       ld      r1, TI_livepatch_sp(r12)
+       ld      r11, TI_livepatch_sp(r12)
 
        /* Check stack marker hasn't been trashed */
        lis     r2,  STACK_END_MAGIC@h
        ori     r2,  r2, STACK_END_MAGIC@l
-       ld      r12, -8(r1)
+       ld      r12, -8(r11)
 1:     tdne    r12, r2
        EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
 
        /* Restore LR & toc from livepatch stack */
-       ld      r12, -16(r1)
+       ld      r12, -16(r11)
        mtlr    r12
-       ld      r2,  -24(r1)
+       ld      r2,  -24(r11)
 
        /* Pop livepatch stack frame */
-       CURRENT_THREAD_INFO(r12, r0)
-       subi    r1, r1, 24
-       std     r1, TI_livepatch_sp(r12)
-
-       /* Restore real stack pointer */
-       mr      r1, r0
+       CURRENT_THREAD_INFO(r12, r1)
+       subi    r11, r11, 24
+       std     r11, TI_livepatch_sp(r12)
 
        /* Return to original caller of live patched function */
        blr
index 5e8418c28bd884bd854818755b61b06fcff776e6..f208f560aecd086b579ace960a78f25123974abd 100644 (file)
@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
  * Logical instructions
  */
                case 26:        /* cntlzw */
-                       op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
+                       val = (unsigned int) regs->gpr[rd];
+                       op->val = ( val ? __builtin_clz(val) : 32 );
                        goto logical_done;
 #ifdef __powerpc64__
                case 58:        /* cntlzd */
-                       op->val = __builtin_clzl(regs->gpr[rd]);
+                       val = regs->gpr[rd];
+                       op->val = ( val ? __builtin_clzl(val) : 64 );
                        goto logical_done;
 #endif
                case 28:        /* and */
index b95c584ce19d33b658a9a546ecb3de64ff93d23a..a51df9ef529d9356cb54c75721510e50cfa69b8d 100644 (file)
@@ -1438,7 +1438,6 @@ out:
 
 int arch_update_cpu_topology(void)
 {
-       lockdep_assert_cpus_held();
        return numa_update_cpu_topology(true);
 }
 
index 9ccac86f346385eaa624930f3050cdae62b6b09d..88126245881b3f8e500ab099bbfa4ebba8bf03d9 100644 (file)
@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
 
        /* Take the mutex lock for this node and then decrement the reference count */
        mutex_lock(&ref->lock);
+       if (ref->refc == 0) {
+               /*
+                * The scenario where this is true is, when perf session is
+                * started, followed by offlining of all cpus in a given node.
+                *
+                * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
+                * function set the ref->count to zero, if the cpu which is
+                * about to offline is the last cpu in a given node and make
+                * an OPAL call to disable the engine in that node.
+                *
+                */
+               mutex_unlock(&ref->lock);
+               return;
+       }
        ref->refc--;
        if (ref->refc == 0) {
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
 
        /* We need only vbase for core counters */
        mem_info->vbase = page_address(alloc_pages_node(phys_id,
-                                         GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
-                                         get_order(size)));
+                                         GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                                         __GFP_NOWARN, get_order(size)));
        if (!mem_info->vbase)
                return -ENOMEM;
 
@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
                return;
 
        mutex_lock(&ref->lock);
+       if (ref->refc == 0) {
+               /*
+                * The scenario where this is true is, when perf session is
+                * started, followed by offlining of all cpus in a given core.
+                *
+                * In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
+                * function set the ref->count to zero, if the cpu which is
+                * about to offline is the last cpu in a given core and make
+                * an OPAL call to disable the engine in that core.
+                *
+                */
+               mutex_unlock(&ref->lock);
+               return;
+       }
        ref->refc--;
        if (ref->refc == 0) {
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
                 * free the memory in cpu offline path.
                 */
                local_mem = page_address(alloc_pages_node(phys_id,
-                                 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
-                                 get_order(size)));
+                                 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                                 __GFP_NOWARN, get_order(size)));
                if (!local_mem)
                        return -ENOMEM;
 
@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
        }
 
        /* Only free the attr_groups which are dynamically allocated  */
-       kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
+       if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
+               kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
        kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
        kfree(pmu_ptr);
        return;
index afa46a7406eaeddbbf70bbaf384f49ffa0535874..04e042edbab760f13a2da86a0cab071346bf2ab5 100644 (file)
@@ -27,6 +27,7 @@ CONFIG_NET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_RAM=y
 # CONFIG_BLK_DEV_XPRAM is not set
 # CONFIG_DCSSBLK is not set
 # CONFIG_DASD is not set
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
index 1cee6753d47a5cba115730cb73cef4324e8d3ed1..495ff6959dec76a904ee2f6ccc30f8ef7809960d 100644 (file)
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
        lc->lpp = LPP_MAGIC;
        lc->current_pid = tsk->pid;
        lc->user_timer = tsk->thread.user_timer;
+       lc->guest_timer = tsk->thread.guest_timer;
        lc->system_timer = tsk->thread.system_timer;
+       lc->hardirq_timer = tsk->thread.hardirq_timer;
+       lc->softirq_timer = tsk->thread.softirq_timer;
        lc->steal_timer = 0;
 }
 
index 0be3828752e5bc9ddf33ba74c9e5b9efd03691e4..4e83f950713e9f9837da01406586613a9c07cb3f 100644 (file)
@@ -44,7 +44,6 @@ config SPARC
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
        select LOCKDEP_SMALL if LOCKDEP
-       select ARCH_WANT_RELAX_ORDER
 
 config SPARC32
        def_bool !64BIT
index 8a13d468635a8f56f772a6879d3e907a1945dd6a..50e0d2bc45288cd8d741af14c23b2387be5eedfe 100644 (file)
 /*
  * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
  * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
- * is just setting the LSB, which makes it an invalid stack address and is also
+ * is just clearing the MSB, which makes it an invalid stack address and is also
  * a signal to the unwinder that it's a pt_regs pointer in disguise.
  *
  * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
 .macro ENCODE_FRAME_POINTER
 #ifdef CONFIG_FRAME_POINTER
        mov %esp, %ebp
-       orl $0x1, %ebp
+       andl $0x7fffffff, %ebp
 #endif
 .endm
 
index 1c5390f1cf0992787afa8d34bb361f622b00dcb2..d45e06346f14d8636f1b4348a84a6e503012c686 100644 (file)
@@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
                pmus[i].type    = type;
                pmus[i].boxes   = kzalloc(size, GFP_KERNEL);
                if (!pmus[i].boxes)
-                       return -ENOMEM;
+                       goto err;
        }
 
        type->pmus = pmus;
@@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
                attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
                                        sizeof(*attr_group), GFP_KERNEL);
                if (!attr_group)
-                       return -ENOMEM;
+                       goto err;
 
                attrs = (struct attribute **)(attr_group + 1);
                attr_group->name = "events";
@@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
        }
 
        type->pmu_group = &uncore_pmu_attr_group;
+
        return 0;
+
+err:
+       for (i = 0; i < type->num_boxes; i++)
+               kfree(pmus[i].boxes);
+       kfree(pmus);
+
+       return -ENOMEM;
 }
 
 static int __init
index 1a8eb550c40f13d8f4f4f5d049e09d323a2bb185..a5db63f728a2f985bde0f1b98f87be4537913cdc 100644 (file)
@@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
 u32 *hv_vp_index;
 EXPORT_SYMBOL_GPL(hv_vp_index);
 
+u32 hv_max_vp_index;
+
 static int hv_cpu_init(unsigned int cpu)
 {
        u64 msr_vp_index;
@@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
 
        hv_vp_index[smp_processor_id()] = msr_vp_index;
 
+       if (msr_vp_index > hv_max_vp_index)
+               hv_max_vp_index = msr_vp_index;
+
        return 0;
 }
 
index 39e7f6e50919117087fe663e013cd77d12b16b7b..9cc9e1c1e2dbcf6047c9ebd37f280c66b9e7e29a 100644 (file)
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
 /* Each gva in gva_list encodes up to 4096 pages to flush */
 #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
 
-static struct hv_flush_pcpu __percpu *pcpu_flush;
+static struct hv_flush_pcpu __percpu **pcpu_flush;
 
-static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
+static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
 
 /*
  * Fills in gva_list starting from offset. Returns the number of items added.
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
 {
        int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
 
+       /* valid_bank_mask can represent up to 64 banks */
+       if (hv_max_vp_index / 64 >= 64)
+               return 0;
+
+       /*
+        * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
+        * structs are not cleared between calls, we risk flushing unneeded
+        * vCPUs otherwise.
+        */
+       for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+               flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
+
        /*
         * Some banks may end up being empty but this is acceptable.
         */
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
                vcpu = hv_cpu_number_to_vp_number(cpu);
                vcpu_bank = vcpu / 64;
                vcpu_offset = vcpu % 64;
-
-               /* valid_bank_mask can represent up to 64 banks */
-               if (vcpu_bank >= 64)
-                       return 0;
-
                __set_bit(vcpu_offset, (unsigned long *)
                          &flush->hv_vp_set.bank_contents[vcpu_bank]);
                if (vcpu_bank >= nr_bank)
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
                                    const struct flush_tlb_info *info)
 {
        int cpu, vcpu, gva_n, max_gvas;
+       struct hv_flush_pcpu **flush_pcpu;
        struct hv_flush_pcpu *flush;
        u64 status = U64_MAX;
        unsigned long flags;
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
 
        local_irq_save(flags);
 
-       flush = this_cpu_ptr(pcpu_flush);
+       flush_pcpu = this_cpu_ptr(pcpu_flush);
+
+       if (unlikely(!*flush_pcpu))
+               *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+       flush = *flush_pcpu;
+
+       if (unlikely(!flush)) {
+               local_irq_restore(flags);
+               goto do_native;
+       }
 
        if (info->mm) {
                flush->address_space = virt_to_phys(info->mm->pgd);
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
                                       const struct flush_tlb_info *info)
 {
        int nr_bank = 0, max_gvas, gva_n;
+       struct hv_flush_pcpu_ex **flush_pcpu;
        struct hv_flush_pcpu_ex *flush;
        u64 status = U64_MAX;
        unsigned long flags;
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
 
        local_irq_save(flags);
 
-       flush = this_cpu_ptr(pcpu_flush_ex);
+       flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
+
+       if (unlikely(!*flush_pcpu))
+               *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+       flush = *flush_pcpu;
+
+       if (unlikely(!flush)) {
+               local_irq_restore(flags);
+               goto do_native;
+       }
 
        if (info->mm) {
                flush->address_space = virt_to_phys(info->mm->pgd);
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
                flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
                status = hv_do_rep_hypercall(
                        HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
-                       0, nr_bank + 2, flush, NULL);
+                       0, nr_bank, flush, NULL);
        } else if (info->end &&
                   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
                status = hv_do_rep_hypercall(
                        HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
-                       0, nr_bank + 2, flush, NULL);
+                       0, nr_bank, flush, NULL);
        } else {
                gva_n = fill_gva_list(flush->gva_list, nr_bank,
                                      info->start, info->end);
                status = hv_do_rep_hypercall(
                        HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
-                       gva_n, nr_bank + 2, flush, NULL);
+                       gva_n, nr_bank, flush, NULL);
        }
 
        local_irq_restore(flags);
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
                return;
 
        if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
-               pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+               pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
        else
-               pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+               pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
 }
index e7636bac7372d41d4b8077f4d7df7d81de32ee32..6c98821fef5ed9f0b953141a905b99893663652e 100644 (file)
 #define new_len2               145f-144f
 
 /*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
  */
 #define alt_max_short(a, b)    ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
 
index c096624137ae831fa4d0f9cebf528453598fdefb..ccbe24e697c46cee2890d00fda413b9f31b41af6 100644 (file)
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
        alt_end_marker ":\n"
 
 /*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
  *
- * The additional "-" is needed because gas works with s32s.
+ * The additional "-" is needed because gas uses a "true" value of -1.
  */
-#define alt_max_short(a, b)    "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+#define alt_max_short(a, b)    "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
 
 /*
  * Pad the second replacement alternative with additional NOPs if it is
index 181264989db572a8533c00bbe6392c880c0875e5..8edac1de2e356dde61029939adbaa535aa2fa793 100644 (file)
@@ -187,7 +187,6 @@ struct mca_msr_regs {
 
 extern struct mce_vendor_flags mce_flags;
 
-extern struct mca_config mca_cfg;
 extern struct mca_msr_regs msr_ops;
 
 enum mce_notifier_prios {
index c120b5db178aa415e1bbb97957dd7d73687575d3..3c856a15b98e8edda98cc5ba7e40fc67f49be230 100644 (file)
@@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
        DEBUG_LOCKS_WARN_ON(preemptible());
 }
 
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-       int cpu = smp_processor_id();
-
-       if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
-               cpumask_clear_cpu(cpu, mm_cpumask(mm));
-}
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
index 738503e1f80c14f1b5720275dd57161509927ef8..530f448fddafff3297f0e97d697309f1a2cf15bf 100644 (file)
@@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
  * to this information.
  */
 extern u32 *hv_vp_index;
+extern u32 hv_max_vp_index;
 
 /**
  * hv_cpu_number_to_vp_number() - Map CPU to VP.
index 4893abf7f74f9e264f5aaaec5a58211b6d324b3c..d362161d3291f13d3c211a91e10ce85416050c12 100644 (file)
@@ -82,6 +82,13 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
 #endif
 
+/*
+ * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
+ * to init_mm when we switch to a kernel thread (e.g. the idle thread).  If
+ * it's false, then we immediately switch CR3 when entering a kernel thread.
+ */
+DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+
 /*
  * 6 because 6 should be plenty and struct tlb_state will fit in
  * two cache lines.
@@ -104,6 +111,23 @@ struct tlb_state {
        u16 loaded_mm_asid;
        u16 next_asid;
 
+       /*
+        * We can be in one of several states:
+        *
+        *  - Actively using an mm.  Our CPU's bit will be set in
+        *    mm_cpumask(loaded_mm) and is_lazy == false;
+        *
+        *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
+        *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
+        *
+        *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
+        *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
+        *    We're heuristically guessing that the CR3 load we
+        *    skipped more than makes up for the overhead added by
+        *    lazy mode.
+        */
+       bool is_lazy;
+
        /*
         * Access to this CR4 shadow and to H/W CR4 is protected by
         * disabling interrupts when modifying either one.
index d705c769f77d52ce55e4f7d5d32b9853ceb40394..ff891772c9f86492d7ca2721b66619b2be61ed9d 100644 (file)
@@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
        return ~0U;
 }
 
+static u32 skx_deadline_rev(void)
+{
+       switch (boot_cpu_data.x86_mask) {
+       case 0x03: return 0x01000136;
+       case 0x04: return 0x02000014;
+       }
+
+       return ~0U;
+}
+
 static const struct x86_cpu_id deadline_match[] = {
        DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X,        hsx_deadline_rev),
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X,      0x0b000020),
        DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
-       DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X,        0x02000014),
+       DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X,        skx_deadline_rev),
 
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE,     0x22),
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT,      0x20),
@@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
        const struct x86_cpu_id *m;
        u32 rev;
 
-       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
+           boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return;
 
        m = x86_match_cpu(deadline_match);
index 098530a93bb7cc4e451687ae29a0b86447105861..debb974fd17d0badaa7d1bbf3e0c671a2909b583 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef __X86_MCE_INTERNAL_H__
+#define __X86_MCE_INTERNAL_H__
+
 #include <linux/device.h>
 #include <asm/mce.h>
 
@@ -108,3 +111,7 @@ static inline void mce_work_trigger(void)   { }
 static inline void mce_register_injector_chain(struct notifier_block *nb)      { }
 static inline void mce_unregister_injector_chain(struct notifier_block *nb)    { }
 #endif
+
+extern struct mca_config mca_cfg;
+
+#endif /* __X86_MCE_INTERNAL_H__ */
index 40e28ed77fbf017aee3ddb5c8dd128ff8ac2b826..486f640b02efd1313911c494588bb7875e7e5759 100644 (file)
@@ -28,6 +28,8 @@
 #include <asm/msr.h>
 #include <asm/trace/irq_vectors.h>
 
+#include "mce-internal.h"
+
 #define NR_BLOCKS         5
 #define THRESHOLD_MAX     0xFFF
 #define INT_TYPE_APIC     0x00020000
index 86e8f0b2537b3eaedd2da204d67c94947a1b16f1..c4fa4a85d4cb6f4e0cb4b34cd20467879167463d 100644 (file)
@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
        bool *res = &dis_ucode_ldr;
 #endif
 
-       if (!have_cpuid_p())
-               return *res;
-
        /*
         * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
         * completely accurate as xen pv guests don't see that CPUID bit set but
@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
 void __init load_ucode_bsp(void)
 {
        unsigned int cpuid_1_eax;
+       bool intel = true;
 
-       if (check_loader_disabled_bsp())
+       if (!have_cpuid_p())
                return;
 
        cpuid_1_eax = native_cpuid_eax(1);
 
        switch (x86_cpuid_vendor()) {
        case X86_VENDOR_INTEL:
-               if (x86_family(cpuid_1_eax) >= 6)
-                       load_ucode_intel_bsp();
+               if (x86_family(cpuid_1_eax) < 6)
+                       return;
                break;
+
        case X86_VENDOR_AMD:
-               if (x86_family(cpuid_1_eax) >= 0x10)
-                       load_ucode_amd_bsp(cpuid_1_eax);
+               if (x86_family(cpuid_1_eax) < 0x10)
+                       return;
+               intel = false;
                break;
+
        default:
-               break;
+               return;
        }
+
+       if (check_loader_disabled_bsp())
+               return;
+
+       if (intel)
+               load_ucode_intel_bsp();
+       else
+               load_ucode_amd_bsp(cpuid_1_eax);
 }
 
 static bool check_loader_disabled_ap(void)
index db2182d63ed0c40ae9057ea100314d0a7c7de8fb..3fc0f9a794cbdecb3b8749766af6255ec156bd71 100644 (file)
@@ -3,6 +3,15 @@
 
 /* Kprobes and Optprobes common header */
 
+#include <asm/asm.h>
+
+#ifdef CONFIG_FRAME_POINTER
+# define SAVE_RBP_STRING "     push %" _ASM_BP "\n" \
+                        "      mov  %" _ASM_SP ", %" _ASM_BP "\n"
+#else
+# define SAVE_RBP_STRING "     push %" _ASM_BP "\n"
+#endif
+
 #ifdef CONFIG_X86_64
 #define SAVE_REGS_STRING                       \
        /* Skip cs, ip, orig_ax. */             \
@@ -17,7 +26,7 @@
        "       pushq %r10\n"                   \
        "       pushq %r11\n"                   \
        "       pushq %rbx\n"                   \
-       "       pushq %rbp\n"                   \
+       SAVE_RBP_STRING                         \
        "       pushq %r12\n"                   \
        "       pushq %r13\n"                   \
        "       pushq %r14\n"                   \
@@ -48,7 +57,7 @@
        "       pushl %es\n"                    \
        "       pushl %ds\n"                    \
        "       pushl %eax\n"                   \
-       "       pushl %ebp\n"                   \
+       SAVE_RBP_STRING                         \
        "       pushl %edi\n"                   \
        "       pushl %esi\n"                   \
        "       pushl %edx\n"                   \
index f0153714ddac6b2305645ef5c0e35fd3a6c9fb2c..0742491cbb734d29e1be790d890aeb5271ea6eb4 100644 (file)
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
         * raw stack chunk with redzones:
         */
        __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
-       regs->flags &= ~X86_EFLAGS_IF;
-       trace_hardirqs_off();
        regs->ip = (unsigned long)(jp->entry);
 
        /*
index 54180fa6f66fa8fe04c7528f054d642db522b617..add33f600531d7ca01c41d79be31383a49464f5e 100644 (file)
@@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
        load_cr3(initial_page_table);
 #else
        write_cr3(real_mode_header->trampoline_pgd);
+
+       /* Exiting long mode will fail if CR4.PCIDE is set. */
+       if (static_cpu_has(X86_FEATURE_PCID))
+               cr4_clear_bits(X86_CR4_PCIDE);
 #endif
 
        /* Jump to the identity-mapped low memory code */
index d145a0b1f529877b67e16992ca4c3830869f9950..3dc26f95d46e8a1ea439dba8ae144bae0dc9444d 100644 (file)
@@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
                        state->stack_info.type, state->stack_info.next_sp,
                        state->stack_mask, state->graph_idx);
 
-       for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+       for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
+            sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
                if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
                        break;
 
@@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
  * This determines if the frame pointer actually contains an encoded pointer to
  * pt_regs on the stack.  See ENCODE_FRAME_POINTER.
  */
+#ifdef CONFIG_X86_64
 static struct pt_regs *decode_frame_pointer(unsigned long *bp)
 {
        unsigned long regs = (unsigned long)bp;
@@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
 
        return (struct pt_regs *)(regs & ~0x1);
 }
+#else
+static struct pt_regs *decode_frame_pointer(unsigned long *bp)
+{
+       unsigned long regs = (unsigned long)bp;
+
+       if (regs & 0x80000000)
+               return NULL;
+
+       return (struct pt_regs *)(regs | 0x80000000);
+}
+#endif
+
+#ifdef CONFIG_X86_32
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
+#else
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
+#endif
 
 static bool update_stack_state(struct unwind_state *state,
                               unsigned long *next_bp)
@@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
        regs = decode_frame_pointer(next_bp);
        if (regs) {
                frame = (unsigned long *)regs;
-               len = regs_size(regs);
+               len = KERNEL_REGS_SIZE;
                state->got_irq = true;
        } else {
                frame = next_bp;
@@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
            frame < prev_frame_end)
                return false;
 
+       /*
+        * On 32-bit with user mode regs, make sure the last two regs are safe
+        * to access:
+        */
+       if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
+           !on_stack(info, frame, len + 2*sizeof(long)))
+               return false;
+
        /* Move state to the next frame: */
        if (regs) {
                state->regs = regs;
@@ -328,6 +355,13 @@ bad_address:
            state->regs->sp < (unsigned long)task_pt_regs(state->task))
                goto the_end;
 
+       /*
+        * There are some known frame pointer issues on 32-bit.  Disable
+        * unwinder warnings on 32-bit until it gets objtool support.
+        */
+       if (IS_ENABLED(CONFIG_X86_32))
+               goto the_end;
+
        if (state->regs) {
                printk_deferred_once(KERN_WARNING
                        "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
index 106d4a029a8a933ad3865a11a5e2c1a11810e714..7a69cf053711197df9a0f2ec284ef5a436c42514 100644 (file)
@@ -3973,13 +3973,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
 static inline bool is_last_gpte(struct kvm_mmu *mmu,
                                unsigned level, unsigned gpte)
 {
-       /*
-        * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
-        * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
-        * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
-        */
-       gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
-
        /*
         * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
         * If it is clear, there are no large pages at this level, so clear
@@ -3987,6 +3980,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
         */
        gpte &= level - mmu->last_nonleaf_level;
 
+       /*
+        * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
+        * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+        * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+        */
+       gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
+
        return gpte & PT_PAGE_SIZE_MASK;
 }
 
@@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 
        update_permission_bitmask(vcpu, context, true);
        update_pkru_bitmask(vcpu, context, true);
+       update_last_nonleaf_level(vcpu, context);
        reset_rsvds_bits_mask_ept(vcpu, context, execonly);
        reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
index 86b68dc5a6493152b9032f1fda970b31ca8d755a..f18d1f8d332b8a2b0512c1f6d8d85fe4ca303ef3 100644 (file)
@@ -334,10 +334,11 @@ retry_walk:
                --walker->level;
 
                index = PT_INDEX(addr, walker->level);
-
                table_gfn = gpte_to_gfn(pte);
                offset    = index * sizeof(pt_element_t);
                pte_gpa   = gfn_to_gpa(table_gfn) + offset;
+
+               BUG_ON(walker->level < 1);
                walker->table_gfn[walker->level - 1] = table_gfn;
                walker->pte_gpa[walker->level - 1] = pte_gpa;
 
index a2b804e10c95d71ac19aebc83c6f89316cea658d..95a01609d7eea13633a6d53383a6e2859463c3e5 100644 (file)
@@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 
        /* Same as above - no reason to call set_cr4_guest_host_mask().  */
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
-       kvm_set_cr4(vcpu, vmcs12->host_cr4);
+       vmx_set_cr4(vcpu, vmcs12->host_cr4);
 
        nested_ept_uninit_mmu_context(vcpu);
 
index 72bf8c01c6e3a58254cc915aded88eea8146f41a..e1f095884386d40f4a8dcc6a8500be6d67fd6dbd 100644 (file)
@@ -1,5 +1,12 @@
-# Kernel does not boot with instrumentation of tlb.c.
-KCOV_INSTRUMENT_tlb.o  := n
+# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
+KCOV_INSTRUMENT_tlb.o          := n
+KCOV_INSTRUMENT_mem_encrypt.o  := n
+
+KASAN_SANITIZE_mem_encrypt.o   := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_mem_encrypt.o    = -pg
+endif
 
 obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
            pat.o pgtable.o physaddr.o setup_nx.o tlb.o
index 49d9778376d774c3d6f17eb47118176efb99b718..658bf00905651624945a4097c3098b94e6645908 100644 (file)
@@ -30,6 +30,8 @@
 
 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
 
+DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+
 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
                            u16 *new_asid, bool *need_flush)
 {
@@ -80,7 +82,7 @@ void leave_mm(int cpu)
                return;
 
        /* Warn if we're not lazy. */
-       WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
+       WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
 
        switch_mm(NULL, &init_mm, NULL);
 }
@@ -142,45 +144,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                __flush_tlb_all();
        }
 #endif
+       this_cpu_write(cpu_tlbstate.is_lazy, false);
 
        if (real_prev == next) {
                VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
                          next->context.ctx_id);
 
-               if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
-                       /*
-                        * There's nothing to do: we weren't lazy, and we
-                        * aren't changing our mm.  We don't need to flush
-                        * anything, nor do we need to update CR3, CR4, or
-                        * LDTR.
-                        */
-                       return;
-               }
-
-               /* Resume remote flushes and then read tlb_gen. */
-               cpumask_set_cpu(cpu, mm_cpumask(next));
-               next_tlb_gen = atomic64_read(&next->context.tlb_gen);
-
-               if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
-                   next_tlb_gen) {
-                       /*
-                        * Ideally, we'd have a flush_tlb() variant that
-                        * takes the known CR3 value as input.  This would
-                        * be faster on Xen PV and on hypothetical CPUs
-                        * on which INVPCID is fast.
-                        */
-                       this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
-                                      next_tlb_gen);
-                       write_cr3(build_cr3(next, prev_asid));
-                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
-                                       TLB_FLUSH_ALL);
-               }
-
                /*
-                * We just exited lazy mode, which means that CR4 and/or LDTR
-                * may be stale.  (Changes to the required CR4 and LDTR states
-                * are not reflected in tlb_gen.)
+                * We don't currently support having a real mm loaded without
+                * our cpu set in mm_cpumask().  We have all the bookkeeping
+                * in place to figure out whether we would need to flush
+                * if our cpu were cleared in mm_cpumask(), but we don't
+                * currently use it.
                 */
+               if (WARN_ON_ONCE(real_prev != &init_mm &&
+                                !cpumask_test_cpu(cpu, mm_cpumask(next))))
+                       cpumask_set_cpu(cpu, mm_cpumask(next));
+
+               return;
        } else {
                u16 new_asid;
                bool need_flush;
@@ -199,10 +180,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                }
 
                /* Stop remote flushes for the previous mm */
-               if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
-                       cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
-
-               VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
+               VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
+                               real_prev != &init_mm);
+               cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
 
                /*
                 * Start remote flushes and then read tlb_gen.
@@ -232,6 +212,37 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
        switch_ldt(real_prev, next);
 }
 
+/*
+ * enter_lazy_tlb() is a hint from the scheduler that we are entering a
+ * kernel thread or other context without an mm.  Acceptable implementations
+ * include doing nothing whatsoever, switching to init_mm, or various clever
+ * lazy tricks to try to minimize TLB flushes.
+ *
+ * The scheduler reserves the right to call enter_lazy_tlb() several times
+ * in a row.  It will notify us that we're going back to a real mm by
+ * calling switch_mm_irqs_off().
+ */
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+       if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
+               return;
+
+       if (static_branch_unlikely(&tlb_use_lazy_mode)) {
+               /*
+                * There's a significant optimization that may be possible
+                * here.  We have accurate enough TLB flush tracking that we
+                * don't need to maintain coherence of TLB per se when we're
+                * lazy.  We do, however, need to maintain coherence of
+                * paging-structure caches.  We could, in principle, leave our
+                * old mm loaded and only switch to init_mm when
+                * tlb_remove_page() happens.
+                */
+               this_cpu_write(cpu_tlbstate.is_lazy, true);
+       } else {
+               switch_mm(NULL, &init_mm, NULL);
+       }
+}
+
 /*
  * Call this when reinitializing a CPU.  It fixes the following potential
  * problems:
@@ -303,16 +314,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
        /* This code cannot presently handle being reentered. */
        VM_WARN_ON(!irqs_disabled());
 
+       if (unlikely(loaded_mm == &init_mm))
+               return;
+
        VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
                   loaded_mm->context.ctx_id);
 
-       if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
+       if (this_cpu_read(cpu_tlbstate.is_lazy)) {
                /*
-                * We're in lazy mode -- don't flush.  We can get here on
-                * remote flushes due to races and on local flushes if a
-                * kernel thread coincidentally flushes the mm it's lazily
-                * still using.
+                * We're in lazy mode.  We need to at least flush our
+                * paging-structure cache to avoid speculatively reading
+                * garbage into our TLB.  Since switching to init_mm is barely
+                * slower than a minimal flush, just switch to init_mm.
                 */
+               switch_mm_irqs_off(NULL, &init_mm, NULL);
                return;
        }
 
@@ -611,3 +626,57 @@ static int __init create_tlb_single_page_flush_ceiling(void)
        return 0;
 }
 late_initcall(create_tlb_single_page_flush_ceiling);
+
+static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       char buf[2];
+
+       buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
+       buf[1] = '\n';
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t tlblazy_write_file(struct file *file,
+                const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       bool val;
+
+       if (kstrtobool_from_user(user_buf, count, &val))
+               return -EINVAL;
+
+       if (val)
+               static_branch_enable(&tlb_use_lazy_mode);
+       else
+               static_branch_disable(&tlb_use_lazy_mode);
+
+       return count;
+}
+
+static const struct file_operations fops_tlblazy = {
+       .read = tlblazy_read_file,
+       .write = tlblazy_write_file,
+       .llseek = default_llseek,
+};
+
+static int __init init_tlb_use_lazy_mode(void)
+{
+       if (boot_cpu_has(X86_FEATURE_PCID)) {
+               /*
+                * Heuristic: with PCID on, switching to and from
+                * init_mm is reasonably fast, but remote flush IPIs
+                * as expensive as ever, so turn off lazy TLB mode.
+                *
+                * We can't do this in setup_pcid() because static keys
+                * haven't been initialized yet, and it would blow up
+                * badly.
+                */
+               static_branch_disable(&tlb_use_lazy_mode);
+       }
+
+       debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
+                           arch_debugfs_dir, NULL, &fops_tlblazy);
+       return 0;
+}
+late_initcall(init_tlb_use_lazy_mode);
index 0e7ef69e853151207c985d5306927c09d335acdc..d669e9d890017770456abe458f1161eb2509c09e 100644 (file)
@@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
        int rc;
 
        rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
-                                      "x86/xen/hvm_guest:prepare",
+                                      "x86/xen/guest:prepare",
                                       cpu_up_prepare_cb, cpu_dead_cb);
        if (rc >= 0) {
                rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-                                              "x86/xen/hvm_guest:online",
+                                              "x86/xen/guest:online",
                                               xen_cpu_up_online, NULL);
                if (rc < 0)
                        cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
index b38e962fa83e774c5c9121bbbe277124d13dd793..101c2a9b548150cd3f7bb5b28a460ef82c9e4a75 100644 (file)
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
         */
        bmd->is_our_pages = map_data ? 0 : 1;
        memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
-       iov_iter_init(&bmd->iter, iter->type, bmd->iov,
-                       iter->nr_segs, iter->count);
+       bmd->iter = *iter;
+       bmd->iter.iov = bmd->iov;
 
        ret = -ENOMEM;
        bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
        int ret, offset;
        struct iov_iter i;
        struct iovec iov;
+       struct bio_vec *bvec;
 
        iov_for_each(iov, i, *iter) {
                unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                ret = get_user_pages_fast(uaddr, local_nr_pages,
                                (iter->type & WRITE) != WRITE,
                                &pages[cur_page]);
-               if (ret < local_nr_pages) {
+               if (unlikely(ret < local_nr_pages)) {
+                       for (j = cur_page; j < page_limit; j++) {
+                               if (!pages[j])
+                                       break;
+                               put_page(pages[j]);
+                       }
                        ret = -EFAULT;
                        goto out_unmap;
                }
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                offset = offset_in_page(uaddr);
                for (j = cur_page; j < page_limit; j++) {
                        unsigned int bytes = PAGE_SIZE - offset;
+                       unsigned short prev_bi_vcnt = bio->bi_vcnt;
 
                        if (len <= 0)
                                break;
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
                                            bytes)
                                break;
 
+                       /*
+                        * check if vector was merged with previous
+                        * drop page reference if needed
+                        */
+                       if (bio->bi_vcnt == prev_bi_vcnt)
+                               put_page(pages[j]);
+
                        len -= bytes;
                        offset = 0;
                }
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
        return bio;
 
  out_unmap:
-       for (j = 0; j < nr_pages; j++) {
-               if (!pages[j])
-                       break;
-               put_page(pages[j]);
+       bio_for_each_segment_all(bvec, bio, j) {
+               put_page(bvec->bv_page);
        }
  out:
        kfree(pages);
index e4b0ed386bc82f339829fea2dcedd42fe2f0aff5..39aecad286fe482ff3f44fe08b286c2edbf3553b 100644 (file)
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
        char *req, *p;
        int len;
 
+       BUG_ON(!id_0 && !id_1);
+
        if (id_0) {
                lookup = id_0->data;
                len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
        if (id_0 && id_1) {
                const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
 
-               if (!kids->id[0]) {
+               if (!kids->id[1]) {
                        pr_debug("First ID matches, but second is missing\n");
                        goto reject;
                }
index af4cd864911752478ba5f3c2732273f9624d434f..d140d8bb2c96140c408b1e3450f288e562372743 100644 (file)
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
        bool want = false;
 
        sinfo = msg->signed_infos;
+       if (!sinfo)
+               goto inconsistent;
+
        if (sinfo->authattrs) {
                want = true;
                msg->have_authattrs = true;
index 5e31c8d776dfc8a144f25e70122ee77d335e196a..325a14da58278f01b8c1ffd92bdd8990db2860c4 100644 (file)
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
        int err;
 
        absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-       buffer = kmalloc(absize, GFP_KERNEL);
+       buffer = kmalloc(absize, GFP_ATOMIC);
        if (!buffer)
                return -ENOMEM;
 
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
 
 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
 {
-       struct scatterlist *sg = req->src;
-       unsigned int offset = sg->offset;
        unsigned int nbytes = req->nbytes;
+       struct scatterlist *sg;
+       unsigned int offset;
        int err;
 
-       if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+       if (nbytes &&
+           (sg = req->src, offset = sg->offset,
+            nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
                void *data;
 
                data = kmap_atomic(sg_page(sg));
index 4faa0fd53b0c120d39022ad726dbbe2c74f787bd..d5692e35fab1f069376f7c54358ff5e5f0cb352e 100644 (file)
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
 
 static int skcipher_walk_first(struct skcipher_walk *walk)
 {
-       walk->nbytes = 0;
-
        if (WARN_ON_ONCE(in_irq()))
                return -EDEADLK;
 
-       if (unlikely(!walk->total))
-               return 0;
-
        walk->buffer = NULL;
        if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
                int err = skcipher_copy_iv(walk);
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 
+       walk->total = req->cryptlen;
+       walk->nbytes = 0;
+
+       if (unlikely(!walk->total))
+               return 0;
+
        scatterwalk_start(&walk->in, req->src);
        scatterwalk_start(&walk->out, req->dst);
 
-       walk->total = req->cryptlen;
        walk->iv = req->iv;
        walk->oiv = req->iv;
 
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        int err;
 
+       walk->nbytes = 0;
+
+       if (unlikely(!walk->total))
+               return 0;
+
        walk->flags &= ~SKCIPHER_WALK_PHYS;
 
        scatterwalk_start(&walk->in, req->src);
index d86c11a8c882c37ee7a7c2c18e41d54aaec25af8..e31828ed00466cc08e8ee2e73bacb2d99c1e0a34 100644 (file)
@@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
                ctx->name[len - 1] = 0;
 
                if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-                            "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
-                       return -ENAMETOOLONG;
+                            "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
+                       err = -ENAMETOOLONG;
+                       goto err_drop_spawn;
+               }
        } else
                goto err_drop_spawn;
 
index 3fb8ff5134613fb044b68ab8f9e8b3c7a234f1b5..e26ea209b63ef1b8f89a6112de5c983db3f4feee 100644 (file)
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
  *     }
  * }
  *
- * Calling this function with index %2 return %-ENOENT and with index %3
- * returns the last entry. If the property does not contain any more values
- * %-ENODATA is returned. The NULL entry must be single integer and
- * preferably contain value %0.
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
  *
  * Return: %0 on success, negative error code on failure.
  */
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
 
        data = acpi_device_data_of_node(fwnode);
        if (!data)
-               return -EINVAL;
+               return -ENOENT;
 
        ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
        if (ret)
-               return ret;
+               return ret == -EINVAL ? -ENOENT : -EINVAL;
 
        /*
         * The simplest case is when the value is a single reference.  Just
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
 
                ret = acpi_bus_get_device(obj->reference.handle, &device);
                if (ret)
-                       return ret;
+                       return ret == -ENODEV ? -EINVAL : ret;
 
                args->adev = device;
                args->nargs = 0;
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
         * The index argument is then used to determine which reference
         * the caller wants (along with the arguments).
         */
-       if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
-               return -EPROTO;
+       if (obj->type != ACPI_TYPE_PACKAGE)
+               return -EINVAL;
+       if (index >= obj->package.count)
+               return -ENOENT;
 
        element = obj->package.elements;
        end = element + obj->package.count;
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                        ret = acpi_bus_get_device(element->reference.handle,
                                                  &device);
                        if (ret)
-                               return -ENODEV;
+                               return -EINVAL;
 
                        nargs = 0;
                        element++;
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                                else if (type == ACPI_TYPE_LOCAL_REFERENCE)
                                        break;
                                else
-                                       return -EPROTO;
+                                       return -EINVAL;
                        }
 
                        if (nargs > MAX_ACPI_REFERENCE_ARGS)
-                               return -EPROTO;
+                               return -EINVAL;
 
                        if (idx == index) {
                                args->adev = device;
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                                return -ENOENT;
                        element++;
                } else {
-                       return -EPROTO;
+                       return -EINVAL;
                }
 
                idx++;
        }
 
-       return -ENODATA;
+       return -ENOENT;
 }
 EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
 
index ab34239a76ee53fbb3dc62b0f37b30eaf3a9031e..0621a95b8597a986801a70ed874d203be68e8c51 100644 (file)
@@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
        return true;
 }
 
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node:         struct binder_node for which to get refs
+ * @proc:         returns @node->proc if valid
+ * @error:        if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+               struct binder_node *node,
+               struct binder_proc **procp,
+               uint32_t *error)
+{
+       struct binder_node *target_node = NULL;
+
+       binder_node_inner_lock(node);
+       if (node->proc) {
+               target_node = node;
+               binder_inc_node_nilocked(node, 1, 0, NULL);
+               binder_inc_node_tmpref_ilocked(node);
+               node->proc->tmp_ref++;
+               *procp = node->proc;
+       } else
+               *error = BR_DEAD_REPLY;
+       binder_node_inner_unlock(node);
+
+       return target_node;
+}
+
 static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
@@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
                        ref = binder_get_ref_olocked(proc, tr->target.handle,
                                                     true);
                        if (ref) {
-                               binder_inc_node(ref->node, 1, 0, NULL);
-                               target_node = ref->node;
-                       }
-                       binder_proc_unlock(proc);
-                       if (target_node == NULL) {
+                               target_node = binder_get_node_refs_for_txn(
+                                               ref->node, &target_proc,
+                                               &return_error);
+                       } else {
                                binder_user_error("%d:%d got transaction to invalid handle\n",
-                                       proc->pid, thread->pid);
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               return_error_param = -EINVAL;
-                               return_error_line = __LINE__;
-                               goto err_invalid_target_handle;
                        }
+                       binder_proc_unlock(proc);
                } else {
                        mutex_lock(&context->context_mgr_node_lock);
                        target_node = context->binder_context_mgr_node;
-                       if (target_node == NULL) {
+                       if (target_node)
+                               target_node = binder_get_node_refs_for_txn(
+                                               target_node, &target_proc,
+                                               &return_error);
+                       else
                                return_error = BR_DEAD_REPLY;
-                               mutex_unlock(&context->context_mgr_node_lock);
-                               return_error_line = __LINE__;
-                               goto err_no_context_mgr_node;
-                       }
-                       binder_inc_node(target_node, 1, 0, NULL);
                        mutex_unlock(&context->context_mgr_node_lock);
                }
-               e->to_node = target_node->debug_id;
-               binder_node_lock(target_node);
-               target_proc = target_node->proc;
-               if (target_proc == NULL) {
-                       binder_node_unlock(target_node);
-                       return_error = BR_DEAD_REPLY;
+               if (!target_node) {
+                       /*
+                        * return_error is set above
+                        */
+                       return_error_param = -EINVAL;
                        return_error_line = __LINE__;
                        goto err_dead_binder;
                }
-               binder_inner_proc_lock(target_proc);
-               target_proc->tmp_ref++;
-               binder_inner_proc_unlock(target_proc);
-               binder_node_unlock(target_node);
+               e->to_node = target_node->debug_id;
                if (security_binder_transaction(proc->tsk,
                                                target_proc->tsk) < 0) {
                        return_error = BR_FAILED_REPLY;
@@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
        if (target_thread)
                binder_thread_dec_tmpref(target_thread);
        binder_proc_dec_tmpref(target_proc);
+       if (target_node)
+               binder_dec_node_tmpref(target_node);
        /*
         * write barrier to synchronize with initialization
         * of log entry
@@ -3090,6 +3126,8 @@ err_bad_parent:
 err_copy_data_failed:
        trace_binder_transaction_failed_buffer_release(t->buffer);
        binder_transaction_buffer_release(target_proc, t->buffer, offp);
+       if (target_node)
+               binder_dec_node_tmpref(target_node);
        target_node = NULL;
        t->buffer->transaction = NULL;
        binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3104,13 +3142,14 @@ err_bad_call_stack:
 err_empty_call_stack:
 err_dead_binder:
 err_invalid_target_handle:
-err_no_context_mgr_node:
        if (target_thread)
                binder_thread_dec_tmpref(target_thread);
        if (target_proc)
                binder_proc_dec_tmpref(target_proc);
-       if (target_node)
+       if (target_node) {
                binder_dec_node(target_node, 1, 0);
+               binder_dec_node_tmpref(target_node);
+       }
 
        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
                     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
index 3855902f2c5b369dc538759950f5b4c951c484d6..aae2402f3791dbc12174cce7ae0552a3c59e5b02 100644 (file)
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
 
 static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
 {
+       ssize_t n;
+       cpumask_var_t mask;
        struct node *node_dev = to_node(dev);
-       const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
 
        /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
        BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
 
-       return cpumap_print_to_pagebuf(list, buf, mask);
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return 0;
+
+       cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+       n = cpumap_print_to_pagebuf(list, buf, mask);
+       free_cpumask_var(mask);
+
+       return n;
 }
 
 static inline ssize_t node_read_cpumask(struct device *dev,
index d0b65bbe7e1513f140f198aa839fd03ef925e5e6..7ed99c1b2a8b99cab9b5bc6bd002933615e9d00f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/phy.h>
 
 struct property_set {
+       struct device *dev;
        struct fwnode_handle fwnode;
        const struct property_entry *properties;
 };
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
  * Caller is responsible to call fwnode_handle_put() on the returned
  * args->fwnode pointer.
  *
+ * Returns: %0 on success
+ *         %-ENOENT when the index is out of bounds, the index has an empty
+ *                  reference or the property was not found
+ *         %-EINVAL on parse error
  */
 int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
                                       const char *prop, const char *nargs_prop,
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
 void device_remove_properties(struct device *dev)
 {
        struct fwnode_handle *fwnode;
+       struct property_set *pset;
 
        fwnode = dev_fwnode(dev);
        if (!fwnode)
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
         * the pset. If there is no real firmware node (ACPI/DT) primary
         * will hold the pset.
         */
-       if (is_pset_node(fwnode)) {
+       pset = to_pset_node(fwnode);
+       if (pset) {
                set_primary_fwnode(dev, NULL);
-               pset_free_set(to_pset_node(fwnode));
        } else {
-               fwnode = fwnode->secondary;
-               if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+               pset = to_pset_node(fwnode->secondary);
+               if (pset && dev == pset->dev)
                        set_secondary_fwnode(dev, NULL);
-                       pset_free_set(to_pset_node(fwnode));
-               }
        }
+       if (pset && dev == pset->dev)
+               pset_free_set(pset);
 }
 EXPORT_SYMBOL_GPL(device_remove_properties);
 
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
 
        p->fwnode.ops = &pset_fwnode_ops;
        set_secondary_fwnode(dev, &p->fwnode);
+       p->dev = dev;
        return 0;
 }
 EXPORT_SYMBOL_GPL(device_add_properties);
index 883dfebd3014b506a0861aed0640dc313fd8b0cc..baebbdfd74d54f4969fac84e06c8b1741b831c0c 100644 (file)
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
        struct nbd_config *config = nbd->config;
        config->blksize = blocksize;
        config->bytesize = blocksize * nr_blocks;
-       nbd_size_update(nbd);
 }
 
 static void nbd_complete_rq(struct request *req)
@@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                args->index = i;
                queue_work(recv_workqueue, &args->work);
        }
+       nbd_size_update(nbd);
        return error;
 }
 
index 7cedb4295e9d325343e296b8b299cb407b7b2a55..64d0fc17c1742ab74aa232da503d08e344b594b2 100644 (file)
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
                return NULL;
        *dma_handle = dma_map_single(dev, buf, s->size, dir);
        if (dma_mapping_error(dev, *dma_handle)) {
-               kfree(buf);
+               kmem_cache_free(s, buf);
                buf = NULL;
        }
        return buf;
index d9fbbf01062bc84f17001808ea6060c8fa7d7110..0f9754e077191e07f3ad985ad2897822f99a932c 100644 (file)
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
 /* The crypto framework makes it hard to avoid this global. */
 static struct device *artpec6_crypto_dev;
 
-static struct dentry *dbgfs_root;
-
 #ifdef CONFIG_FAULT_INJECTION
 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
        char *desc;
 };
 
+static struct dentry *dbgfs_root;
+
 static void artpec6_crypto_init_debugfs(void)
 {
        dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
index b585ce54a8028ccfabd61c8a0e4a768f17328345..4835dd4a9e5075e3c652ea78072c515fc5537fd1 100644 (file)
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
        struct scatterlist sg[1], *tsg;
-       int err = 0, len = 0, reg, ncp;
+       int err = 0, len = 0, reg, ncp = 0;
        unsigned int i;
-       const u32 *buffer = (const u32 *)rctx->buffer;
+       u32 *buffer = (void *)rctx->buffer;
 
        rctx->sg = hdev->req->src;
        rctx->total = hdev->req->nbytes;
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
                reg |= HASH_CR_DMAA;
                stm32_hash_write(hdev, HASH_CR, reg);
 
-               for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
-                       stm32_hash_write(hdev, HASH_DIN, buffer[i]);
-
-               stm32_hash_set_nblw(hdev, ncp);
+               if (ncp) {
+                       memset(buffer + ncp, 0,
+                              DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
+                       writesl(hdev->io_base + HASH_DIN, buffer,
+                               DIV_ROUND_UP(ncp, sizeof(u32)));
+               }
+               stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
                reg = stm32_hash_read(hdev, HASH_STR);
                reg |= HASH_STR_DCAL;
                stm32_hash_write(hdev, HASH_STR, reg);
index 66fb40d0ebdbbec521499cd58cf2e1d55c195878..03830634e141e70782c96ca42945f30babc0283a 100644 (file)
@@ -383,7 +383,7 @@ err_put_fd:
        return err;
 }
 
-static void sync_fill_fence_info(struct dma_fence *fence,
+static int sync_fill_fence_info(struct dma_fence *fence,
                                 struct sync_fence_info *info)
 {
        strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
                test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
                ktime_to_ns(fence->timestamp) :
                ktime_set(0, 0);
+
+       return info->status;
 }
 
 static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
         * sync_fence_info and return the actual number of fences on
         * info->num_fences.
         */
-       if (!info.num_fences)
+       if (!info.num_fences) {
+               info.status = dma_fence_is_signaled(sync_file->fence);
                goto no_fences;
+       } else {
+               info.status = 1;
+       }
 
        if (info.num_fences < num_fences)
                return -EINVAL;
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (!fence_info)
                return -ENOMEM;
 
-       for (i = 0; i < num_fences; i++)
-               sync_fill_fence_info(fences[i], &fence_info[i]);
+       for (i = 0; i < num_fences; i++) {
+               int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+               info.status = info.status <= 0 ? info.status : status;
+       }
 
        if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
                         size)) {
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 
 no_fences:
        sync_file_get_name(sync_file, info.name, sizeof(info.name));
-       info.status = dma_fence_is_signaled(sync_file->fence);
        info.num_fences = num_fences;
 
        if (copy_to_user((void __user *)arg, &info, sizeof(info)))
index 32905d5606ac8b90585148e26ace36c2b7352808..339186f25a2ae529c447bd8359a9c55e0049d2ea 100644 (file)
@@ -212,11 +212,12 @@ struct msgdma_device {
 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
 {
        struct msgdma_sw_desc *desc;
+       unsigned long flags;
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
        list_del(&desc->node);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 
        INIT_LIST_HEAD(&desc->tx_list);
 
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
        struct msgdma_device *mdev = to_mdev(tx->chan);
        struct msgdma_sw_desc *new;
        dma_cookie_t cookie;
+       unsigned long flags;
 
        new = tx_to_desc(tx);
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        cookie = dma_cookie_assign(tx);
 
        list_add_tail(&new->node, &mdev->pending_list);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 
        return cookie;
 }
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
        struct msgdma_extended_desc *desc;
        size_t copy;
        u32 desc_cnt;
+       unsigned long irqflags;
 
        desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, irqflags);
        if (desc_cnt > mdev->desc_free_cnt) {
                spin_unlock_bh(&mdev->lock);
                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
                return NULL;
        }
        mdev->desc_free_cnt -= desc_cnt;
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, irqflags);
 
        do {
                /* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
        u32 desc_cnt = 0, i;
        struct scatterlist *sg;
        u32 stride;
+       unsigned long irqflags;
 
        for_each_sg(sgl, sg, sg_len, i)
                desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, irqflags);
        if (desc_cnt > mdev->desc_free_cnt) {
                spin_unlock_bh(&mdev->lock);
                dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
                return NULL;
        }
        mdev->desc_free_cnt -= desc_cnt;
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, irqflags);
 
        avail = sg_dma_len(sgl);
 
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
 static void msgdma_issue_pending(struct dma_chan *chan)
 {
        struct msgdma_device *mdev = to_mdev(chan);
+       unsigned long flags;
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        msgdma_start_transfer(mdev);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 }
 
 /**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
 static void msgdma_free_chan_resources(struct dma_chan *dchan)
 {
        struct msgdma_device *mdev = to_mdev(dchan);
+       unsigned long flags;
 
-       spin_lock_bh(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
        msgdma_free_descriptors(mdev);
-       spin_unlock_bh(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
        kfree(mdev->sw_desq);
 }
 
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
        u32 count;
        u32 __maybe_unused size;
        u32 __maybe_unused status;
+       unsigned long flags;
 
-       spin_lock(&mdev->lock);
+       spin_lock_irqsave(&mdev->lock, flags);
 
        /* Read number of responses that are available */
        count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
                 * bits. So we need to just drop these values.
                 */
                size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
-               status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
+               status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
 
                msgdma_complete_descriptor(mdev);
                msgdma_chan_desc_cleanup(mdev);
        }
 
-       spin_unlock(&mdev->lock);
+       spin_unlock_irqrestore(&mdev->lock, flags);
 }
 
 /**
index 3879f80a4815cb27ba4329e29510752e34331ba4..a7ea20e7b8e94cd9527d73785578abd599e5029d 100644 (file)
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
        struct edma_desc *edesc;
        struct device *dev = chan->device->dev;
        struct edma_chan *echan = to_edma_chan(chan);
-       unsigned int width, pset_len;
+       unsigned int width, pset_len, array_size;
 
        if (unlikely(!echan || !len))
                return NULL;
 
+       /* Align the array size (acnt block) with the transfer properties */
+       switch (__ffs((src | dest | len))) {
+       case 0:
+               array_size = SZ_32K - 1;
+               break;
+       case 1:
+               array_size = SZ_32K - 2;
+               break;
+       default:
+               array_size = SZ_32K - 4;
+               break;
+       }
+
        if (len < SZ_64K) {
                /*
                 * Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
                 * When the full_length is multibple of 32767 one slot can be
                 * used to complete the transfer.
                 */
-               width = SZ_32K - 1;
+               width = array_size;
                pset_len = rounddown(len, width);
                /* One slot is enough for lengths multiple of (SZ_32K -1) */
                if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
                }
                dest += pset_len;
                src += pset_len;
-               pset_len = width = len % (SZ_32K - 1);
+               pset_len = width = len % array_size;
 
                ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
                                       width, pset_len, DMA_MEM_TO_MEM);
index 2f65a8fde21d4f5be2e2a623676047e4674da27f..f1d04b70ee672af4c99dcd21298d03c65898be96 100644 (file)
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
        mutex_lock(&xbar->mutex);
        map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
                                            xbar->dma_requests);
-       mutex_unlock(&xbar->mutex);
        if (map->xbar_out == xbar->dma_requests) {
+               mutex_unlock(&xbar->mutex);
                dev_err(&pdev->dev, "Run out of free DMA requests\n");
                kfree(map);
                return ERR_PTR(-ENOMEM);
        }
        set_bit(map->xbar_out, xbar->dma_inuse);
+       mutex_unlock(&xbar->mutex);
 
        map->xbar_in = (u16)dma_spec->args[0];
 
index 3388d54ba11468a0ab013f1160b1ea2a258ca354..3f80f167ed56d917405aaad1dd5e09598c7fc9e6 100644 (file)
@@ -453,7 +453,8 @@ config GPIO_TS4800
 config GPIO_THUNDERX
        tristate "Cavium ThunderX/OCTEON-TX GPIO"
        depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
-       depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+       depends on PCI_MSI
+       select IRQ_DOMAIN_HIERARCHY
        select IRQ_FASTEOI_HIERARCHY_HANDLERS
        help
          Say yes here to support the on-chip GPIO lines on the ThunderX
index dbf869fb63ced2cb2884be234c5275f2893237b2..3233b72b682809e197ed2528f2c9bc634400d878 100644 (file)
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
                irq_set_handler_locked(d, handle_level_irq);
        else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-               irq_set_handler_locked(d, handle_edge_irq);
+               /*
+                * Edge IRQs are already cleared/acked in irq_handler and
+                * not need to be masked, as result handle_edge_irq()
+                * logic is excessed here and may cause lose of interrupts.
+                * So just use handle_simple_irq.
+                */
+               irq_set_handler_locked(d, handle_simple_irq);
 
        return 0;
 
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
 {
        void __iomem *isr_reg = NULL;
-       u32 isr;
+       u32 enabled, isr, level_mask;
        unsigned int bit;
        struct gpio_bank *bank = gpiobank;
        unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
        pm_runtime_get_sync(bank->chip.parent);
 
        while (1) {
-               u32 isr_saved, level_mask = 0;
-               u32 enabled;
-
                raw_spin_lock_irqsave(&bank->lock, lock_flags);
 
                enabled = omap_get_gpio_irqbank_mask(bank);
-               isr_saved = isr = readl_relaxed(isr_reg) & enabled;
+               isr = readl_relaxed(isr_reg) & enabled;
 
                if (bank->level_mask)
                        level_mask = bank->level_mask & enabled;
+               else
+                       level_mask = 0;
 
                /* clear edge sensitive interrupts before handler(s) are
                called so that we don't miss any interrupt occurred while
                executing them */
-               omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
-               omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
-               omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+               if (isr & ~level_mask)
+                       omap_clear_gpio_irqbank(bank, isr & ~level_mask);
 
                raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
 
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 
 /*---------------------------------------------------------------------*/
 
-static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+static void omap_gpio_show_rev(struct gpio_bank *bank)
 {
        static bool called;
        u32 rev;
index 4d2113530735185e5dcdc87553b2764df8121242..eb4528c87c0b3977420a2108c7feaaf9b2a95869 100644 (file)
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        if (pin <= 255) {
                char ev_name[5];
-               sprintf(ev_name, "_%c%02X",
+               sprintf(ev_name, "_%c%02hhX",
                        agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
                        pin);
                if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
index 7ef6c28a34d991a2bba6f20224284072330c22d1..bc746131987ffda438988f7b11bbb413b4c335c3 100644 (file)
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
        placement.busy_placement = &placements;
        placements.fpfn = 0;
        placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
-       placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
 
        r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
        if (unlikely(r))
index 97c94f9683fa047392ba62f128586e0b7e4492bc..38cea6fb25a8b9221d64b43da04c4268a2c986b8 100644 (file)
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity)
 {
        struct amd_sched_rq *rq = entity->rq;
-       int r;
 
        if (!amd_sched_entity_is_initialized(sched, entity))
                return;
+
        /**
         * The client will not queue more IBs during this fini, consume existing
-        * queued IBs or discard them on SIGKILL
+        * queued IBs
        */
-       if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
-               r = -ERESTARTSYS;
-       else
-               r = wait_event_killable(sched->job_scheduled,
-                                       amd_sched_entity_is_idle(entity));
-       amd_sched_rq_remove_entity(rq, entity);
-       if (r) {
-               struct amd_sched_job *job;
+       wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
 
-               /* Park the kernel for a moment to make sure it isn't processing
-                * our enity.
-                */
-               kthread_park(sched->thread);
-               kthread_unpark(sched->thread);
-               while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
-                       sched->ops->free_job(job);
-
-       }
+       amd_sched_rq_remove_entity(rq, entity);
        kfifo_free(&entity->job_queue);
 }
 
index 4e53aae9a1fb19fc6710202188a53119c566aef7..0028591f3f959ced1ad520ee280fb481d7a52898 100644 (file)
@@ -2960,6 +2960,7 @@ out:
                drm_modeset_backoff(&ctx);
        }
 
+       drm_atomic_state_put(state);
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
index e651a58c18cf2d5b743439776dd62d50b64f8a7a..82b72425a42f7977c993134a2142434d8689227f 100644 (file)
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
 static int exynos_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct exynos_drm_private *private = drm_dev->dev_private;
+       struct exynos_drm_private *private;
 
        if (pm_runtime_suspended(dev) || !drm_dev)
                return 0;
 
+       private = drm_dev->dev_private;
+
        drm_kms_helper_poll_disable(drm_dev);
        exynos_drm_fbdev_suspend(drm_dev);
        private->suspend_state = drm_atomic_helper_suspend(drm_dev);
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
 static int exynos_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct exynos_drm_private *private = drm_dev->dev_private;
+       struct exynos_drm_private *private;
 
        if (pm_runtime_suspended(dev) || !drm_dev)
                return 0;
 
+       private = drm_dev->dev_private;
        drm_atomic_helper_resume(drm_dev, private->suspend_state);
        exynos_drm_fbdev_resume(drm_dev);
        drm_kms_helper_poll_enable(drm_dev);
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
 
        kfree(drm->dev_private);
        drm->dev_private = NULL;
+       dev_set_drvdata(dev, NULL);
 
        drm_dev_unref(drm);
 }
index 436377da41baced8e81352587cc3e035d0dc1ba3..03532dfc0cd51b8342e50da61524024dafc8ac34 100644 (file)
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
 
 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
 {
-       struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
-       int ring_id;
-
        kfree(vgpu->sched_data);
        vgpu->sched_data = NULL;
-
-       spin_lock_bh(&scheduler->mmio_context_lock);
-       for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
-               if (scheduler->engine_owner[ring_id] == vgpu) {
-                       intel_gvt_switch_mmio(vgpu, NULL, ring_id);
-                       scheduler->engine_owner[ring_id] = NULL;
-               }
-       }
-       spin_unlock_bh(&scheduler->mmio_context_lock);
 }
 
 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 {
        struct intel_gvt_workload_scheduler *scheduler =
                &vgpu->gvt->scheduler;
+       int ring_id;
 
        gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
 
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->need_reschedule = true;
                scheduler->current_vgpu = NULL;
        }
+
+       spin_lock_bh(&scheduler->mmio_context_lock);
+       for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+               if (scheduler->engine_owner[ring_id] == vgpu) {
+                       intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+                       scheduler->engine_owner[ring_id] = NULL;
+               }
+       }
+       spin_unlock_bh(&scheduler->mmio_context_lock);
 }
index 19404c96eeb10670da75ce5426c96c10f6a5d7e1..32e857dc507cf9b1a9247f3bab497616d82e60a9 100644 (file)
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        if (READ_ONCE(obj->mm.pages))
                return -ENODEV;
 
+       if (obj->mm.madv != I915_MADV_WILLNEED)
+               return -EFAULT;
+
        /* Before the pages are instantiated the object is treated as being
         * in the CPU domain. The pages will be clflushed as required before
         * use, and we can freely write into the pages directly. If userspace
@@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 static void nop_submit_request(struct drm_i915_gem_request *request)
 {
+       unsigned long flags;
+
        GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
        dma_fence_set_error(&request->fence, -EIO);
-       i915_gem_request_submit(request);
+
+       spin_lock_irqsave(&request->engine->timeline->lock, flags);
+       __i915_gem_request_submit(request);
        intel_engine_init_global_seqno(request->engine, request->global_seqno);
+       spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
 }
 
 static void engine_set_wedged(struct intel_engine_cs *engine)
index 4df039ef2ce316509ecc6faa04e707d135acf507..e161d383b526757a79097eadb9e65260392befe1 100644 (file)
 #include "intel_drv.h"
 #include "i915_trace.h"
 
-static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
 
-       for_each_engine(engine, dev_priv, id) {
-               struct intel_timeline *tl;
+       if (i915->gt.active_requests)
+              return false;
 
-               tl = &ggtt->base.timeline.engine[engine->id];
-               if (i915_gem_active_isset(&tl->last_request))
-                       return false;
-       }
+       for_each_engine(engine, i915, id) {
+              if (engine->last_retired_context != i915->kernel_context)
+                      return false;
+       }
 
-       return true;
+       return true;
 }
 
 static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
                                    min_size, alignment, cache_level,
                                    start, end, mode);
 
-       /* Retire before we search the active list. Although we have
+       /*
+        * Retire before we search the active list. Although we have
         * reasonable accuracy in our retirement lists, we may have
         * a stray pin (preventing eviction) that can only be resolved by
         * retiring.
@@ -182,7 +182,8 @@ search_again:
                BUG_ON(ret);
        }
 
-       /* Can we unpin some objects such as idle hw contents,
+       /*
+        * Can we unpin some objects such as idle hw contents,
         * or pending flips? But since only the GGTT has global entries
         * such as scanouts, rinbuffers and contexts, we can skip the
         * purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
        if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
                return -ENOSPC;
 
-       if (ggtt_is_idle(dev_priv)) {
-               /* If we still have pending pageflip completions, drop
-                * back to userspace to give our workqueues time to
-                * acquire our locks and unpin the old scanouts.
-                */
-               return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
-       }
+       /*
+        * Not everything in the GGTT is tracked via VMA using
+        * i915_vma_move_to_active(), otherwise we could evict as required
+        * with minimal stalling. Instead we are forced to idle the GPU and
+        * explicitly retire outstanding requests which will then remove
+        * the pinning for active objects such as contexts and ring,
+        * enabling us to evict them on the next iteration.
+        *
+        * To ensure that all user contexts are evictable, we perform
+        * a switch to the perma-pinned kernel context. This all also gives
+        * us a termination condition, when the last retired context is
+        * the kernel's there is no more we can evict.
+        */
+       if (!ggtt_is_idle(dev_priv)) {
+               ret = ggtt_flush(dev_priv);
+               if (ret)
+                       return ret;
 
-       ret = ggtt_flush(dev_priv);
-       if (ret)
-               return ret;
+               goto search_again;
+       }
 
-       goto search_again;
+       /*
+        * If we still have pending pageflip completions, drop
+        * back to userspace to give our workqueues time to
+        * acquire our locks and unpin the old scanouts.
+        */
+       return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
 
 found:
        /* drm_mm doesn't allow any other other operations while
index ed7cd9ee2c2af89737b2cc4618317c4d2fc9bedd..c9bcc6c450126e7cf638ba1c872a55938660add8 100644 (file)
@@ -6998,6 +6998,7 @@ enum {
  */
 #define  L3_GENERAL_PRIO_CREDITS(x)            (((x) >> 1) << 19)
 #define  L3_HIGH_PRIO_CREDITS(x)               (((x) >> 1) << 14)
+#define  L3_PRIO_CREDITS_MASK                  ((0x1f << 19) | (0x1f << 14))
 
 #define GEN7_L3CNTLREG1                                _MMIO(0xB01C)
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
index 00c6aee0a9a1902978516cd08514fa9330b424c8..5d4cd3d00564ce7a95bd02a7a5f56fbcb191337d 100644 (file)
@@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
 {
        enum port port;
 
-       if (!HAS_DDI(dev_priv))
+       if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                return;
 
        if (!dev_priv->vbt.child_dev_num)
index ff9ecd211abbb07fd8bae3fb6d55a8c33b23770d..b8315bca852b56061ff4cf598245f7902722c09f 100644 (file)
@@ -74,7 +74,7 @@
 #define I9XX_CSC_COEFF_1_0             \
        ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
 
-static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
 {
        return !state->degamma_lut &&
                !state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
        }
 
        mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
-       if (!crtc_state_is_legacy(state)) {
+       if (!crtc_state_is_legacy_gamma(state)) {
                mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
                        (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
        }
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
        struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
        enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                haswell_load_luts(state);
                return;
        }
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
 
        glk_load_degamma_lut(state);
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                haswell_load_luts(state);
                return;
        }
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
        uint32_t i, lut_size;
        uint32_t word0, word1;
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                /* Turn off degamma/gamma on CGM block. */
                I915_WRITE(CGM_PIPE_MODE(pipe),
                           (state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
                return 0;
 
        /*
-        * We also allow no degamma lut and a gamma lut at the legacy
+        * We also allow no degamma lut/ctm and a gamma lut at the legacy
         * size (256 entries).
         */
-       if (!crtc_state->degamma_lut &&
-           crtc_state->gamma_lut &&
-           crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+       if (crtc_state_is_legacy_gamma(crtc_state))
                return 0;
 
        return -EINVAL;
index 476681d5940c7d381c2d48f2c4a1202054e79845..5e5fe03b638cbf2ee17206ccd4c6ee985134645e 100644 (file)
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
                            int *n_entries)
 {
        if (IS_BROADWELL(dev_priv)) {
-               *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
-               return hsw_ddi_translations_fdi;
+               *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+               return bdw_ddi_translations_fdi;
        } else if (IS_HASWELL(dev_priv)) {
                *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
                return hsw_ddi_translations_fdi;
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
                 * register writes.
                 */
                val = I915_READ(DPCLKA_CFGCR0);
-               val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
-                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
                I915_WRITE(DPCLKA_CFGCR0, val);
        } else if (IS_GEN9_BC(dev_priv)) {
                /* DDI -> PLL mapping  */
index 64f7b51ed97c18ee036c6315bff13324e975f62e..5c7828c52d12562e8e95872112aef34d367bffc6 100644 (file)
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       enum transcoder cpu_transcoder;
        struct drm_display_mode *mode;
        struct intel_crtc_state *pipe_config;
-       int htot = I915_READ(HTOTAL(cpu_transcoder));
-       int hsync = I915_READ(HSYNC(cpu_transcoder));
-       int vtot = I915_READ(VTOTAL(cpu_transcoder));
-       int vsync = I915_READ(VSYNC(cpu_transcoder));
+       u32 htot, hsync, vtot, vsync;
        enum pipe pipe = intel_crtc->pipe;
 
        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        i9xx_crtc_clock_get(intel_crtc, pipe_config);
 
        mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+       cpu_transcoder = pipe_config->cpu_transcoder;
+       htot = I915_READ(HTOTAL(cpu_transcoder));
+       hsync = I915_READ(HSYNC(cpu_transcoder));
+       vtot = I915_READ(VTOTAL(cpu_transcoder));
+       vsync = I915_READ(VSYNC(cpu_transcoder));
+
        mode->hdisplay = (htot & 0xffff) + 1;
        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
        mode->hsync_start = (hsync & 0xffff) + 1;
index 64134947c0aae4e478767d3347f5ac73d8d87aa5..203198659ab2dbfe788531dd60878efbd61c303e 100644 (file)
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 
-       intel_dp->panel_power_off_time = ktime_get_boottime();
        wait_panel_off(intel_dp);
+       intel_dp->panel_power_off_time = ktime_get_boottime();
 
        /* We got a reference when we enabled the VDD. */
        intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
         * seems sufficient to avoid this problem.
         */
        if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
-               vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
+               vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
                DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
                              vbt.t11_t12);
        }
index a2a3d93d67bd252a3c9d137bedb66be26816bd23..df808a94c51194a886d8664ff8b8118ad05870ef 100644 (file)
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 
        /* 3. Configure DPLL_CFGCR0 */
        /* Avoid touch CFGCR1 if HDMI mode is not enabled */
-       if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+       if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
                val = pll->state.hw_state.cfgcr1;
                I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
                /* 4. Reab back to ensure writes completed */
index 9ab5969413722a5999a4266629ea2ba0fc2305f0..3c2d9cf22ed5a537253a14c2fe85ee200ce7b24c 100644 (file)
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
        }
 
        /* WaProgramL3SqcReg1DefaultForPerf:bxt */
-       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
-               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
-                                          L3_HIGH_PRIO_CREDITS(2));
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+               u32 val = I915_READ(GEN8_L3SQCREG1);
+               val &= ~L3_PRIO_CREDITS_MASK;
+               val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+               I915_WRITE(GEN8_L3SQCREG1, val);
+       }
 
        /* WaToEnableHwFixForPushConstHWBug:bxt */
        if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
index ed662937ec3c85685b7fed049f381c6d1cb5654c..0a09f8ff6aff6710ea3580329d83646a67593b8c 100644 (file)
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
                                   int high_prio_credits)
 {
        u32 misccpctl;
+       u32 val;
 
        /* WaTempDisableDOPClkGating:bdw */
        misccpctl = I915_READ(GEN7_MISCCPCTL);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 
-       I915_WRITE(GEN8_L3SQCREG1,
-                  L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
-                  L3_HIGH_PRIO_CREDITS(high_prio_credits));
+       val = I915_READ(GEN8_L3SQCREG1);
+       val &= ~L3_PRIO_CREDITS_MASK;
+       val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
+       val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
+       I915_WRITE(GEN8_L3SQCREG1, val);
 
        /*
         * Wait at least 100 clocks before re-enabling clock gating.
index b3a087cb0860d99f429719077ae795cf734a0b9e..49577eba8e7efc30e68e2b8ea8a88cda3232dae7 100644 (file)
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 {
        enum i915_power_well_id id = power_well->id;
        bool wait_fuses = power_well->hsw.has_fuses;
-       enum skl_power_gate pg;
+       enum skl_power_gate uninitialized_var(pg);
        u32 val;
 
        if (wait_fuses) {
index dbb31a0144194662a85f20ae0b77117ce071b901..deaf869374ea7016bbaa33aa45fb013aa00ec6ad 100644 (file)
@@ -248,7 +248,7 @@ disable_clks:
        clk_disable_unprepare(ahb_clk);
 disable_gdsc:
        regulator_disable(gdsc_reg);
-       pm_runtime_put_autosuspend(dev);
+       pm_runtime_put_sync(dev);
 put_clk:
        clk_put(ahb_clk);
 put_gdsc:
index c2bdad88447eb59de05e1476d7607cacfb968376..824067d2d4277d36699b1f15c6d58f74a97fe23f 100644 (file)
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
                                  .caps = MDP_LM_CAP_WB },
                             },
                .nb_stages = 5,
+               .max_width = 2048,
+               .max_height = 0xFFFF,
        },
        .dspp = {
                .count = 3,
index 6fcb58ab718cd9c986155f50eb55cccf07c22c32..44097767700124df63f8a325969ea44b494256d7 100644 (file)
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 
        spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 
-       pm_runtime_put_autosuspend(&pdev->dev);
-
 set_cursor:
        ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
        if (ret) {
index f15821a0d90089d1045e810e4b28af552db9eec0..ea5bb0e1632c69e45e746d8abfb820ba9d540e0b 100644 (file)
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
        struct dma_fence *fence;
        int i, ret;
 
-       if (!exclusive) {
-               /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
-                * which makes this a slightly strange place to call it.  OTOH this
-                * is a convenient can-fail point to hook it in.  (And similar to
-                * how etnaviv and nouveau handle this.)
-                */
-               ret = reservation_object_reserve_shared(msm_obj->resv);
-               if (ret)
-                       return ret;
-       }
-
        fobj = reservation_object_get_list(msm_obj->resv);
        if (!fobj || (fobj->shared_count == 0)) {
                fence = reservation_object_get_excl(msm_obj->resv);
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
        }
 
        vaddr = msm_gem_get_vaddr(obj);
-       if (!vaddr) {
+       if (IS_ERR(vaddr)) {
                msm_gem_put_iova(obj, aspace);
                drm_gem_object_unreference(obj);
-               return ERR_PTR(-ENOMEM);
+               return ERR_CAST(vaddr);
        }
 
        if (bo)
index 5d0a75d4b249c439ff9ff072f89c87c5e95bd776..93535cac0676355d935e1cf032d4601ba5000ac3 100644 (file)
@@ -221,7 +221,7 @@ fail:
        return ret;
 }
 
-static int submit_fence_sync(struct msm_gem_submit *submit)
+static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 {
        int i, ret = 0;
 
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
                bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
 
+               if (!write) {
+                       /* NOTE: _reserve_shared() must happen before
+                        * _add_shared_fence(), which makes this a slightly
+                        * strange place to call it.  OTOH this is a
+                        * convenient can-fail point to hook it in.
+                        */
+                       ret = reservation_object_reserve_shared(msm_obj->resv);
+                       if (ret)
+                               return ret;
+               }
+
+               if (no_implicit)
+                       continue;
+
                ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
                if (ret)
                        break;
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
-               ret = submit_fence_sync(submit);
-               if (ret)
-                       goto out;
-       }
+       ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
+       if (ret)
+               goto out;
 
        ret = submit_pin_objects(submit);
        if (ret)
index ffbff27600e0ff620fe623172e30783f53ce5fe7..6a887032c66ae08ea5354599b997640c7b005ac5 100644 (file)
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
                        msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
                msm_ringbuffer_destroy(gpu->rb);
        }
-       if (gpu->aspace) {
+
+       if (!IS_ERR_OR_NULL(gpu->aspace)) {
                gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
                        NULL, 0);
                msm_gem_address_space_put(gpu->aspace);
index 0366b8092f9772178334449ce121255087206ede..ec56794ad0399277693b9185c75b6abcf9241e4a 100644 (file)
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
 
                wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
 
+               /* Note that smp_load_acquire() is not strictly required
+                * as CIRC_SPACE_TO_END() does not access the tail more
+                * than once.
+                */
                n = min(sz, circ_space_to_end(&rd->fifo));
                memcpy(fptr, ptr, n);
 
-               fifo->head = (fifo->head + n) & (BUF_SZ - 1);
+               smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
                sz  -= n;
                ptr += n;
 
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
        if (ret)
                goto out;
 
+       /* Note that smp_load_acquire() is not strictly required
+        * as CIRC_CNT_TO_END() does not access the head more than
+        * once.
+        */
        n = min_t(int, sz, circ_count_to_end(&rd->fifo));
        if (copy_to_user(buf, fptr, n)) {
                ret = -EFAULT;
                goto out;
        }
 
-       fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
+       smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
        *ppos += n;
 
        wake_up_all(&rd->fifo_event);
index f7707849bb538697009ca3b68c8296100a67b2a4..2b12d82aac1509f7023b24e15d5f04fe7ecc8290 100644 (file)
@@ -223,7 +223,7 @@ void
 nouveau_fbcon_accel_save_disable(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
+       if (drm->fbcon && drm->fbcon->helper.fbdev) {
                drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
                drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
        }
@@ -233,7 +233,7 @@ void
 nouveau_fbcon_accel_restore(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
+       if (drm->fbcon && drm->fbcon->helper.fbdev) {
                drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
        }
 }
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
        struct nouveau_fbdev *fbcon = drm->fbcon;
        if (fbcon && drm->channel) {
                console_lock();
-               fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+               if (fbcon->helper.fbdev)
+                       fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
                console_unlock();
                nouveau_channel_idle(drm->channel);
                nvif_object_fini(&fbcon->twod);
index 2dbf62a2ac413081f7a15cb7c8779667447015d4..e4751f92b342d60f44c7d1a73a981940f15cb400 100644 (file)
@@ -3265,11 +3265,14 @@ nv50_mstm = {
 void
 nv50_mstm_service(struct nv50_mstm *mstm)
 {
-       struct drm_dp_aux *aux = mstm->mgr.aux;
+       struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
        bool handled = true;
        int ret;
        u8 esi[8] = {};
 
+       if (!aux)
+               return;
+
        while (handled) {
                ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
                if (ret != 8) {
index 8e2e24a7477458d0ad2361885e79fca4f936068a..44e116f7880dd02e6754d3d328f1d909a0a7041a 100644 (file)
@@ -39,5 +39,5 @@ int
 g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
        return nvkm_xtensa_new_(&g84_bsp, device, index,
-                               true, 0x103000, pengine);
+                               device->chipset != 0x92, 0x103000, pengine);
 }
index d06ad2c372bf30efb6b8ecc5978776def5721222..455da298227f65c2b4c2cfc6a2cedebe12661877 100644 (file)
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
                        mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
                }
 
+               mmu->func->flush(vm);
+
                nvkm_memory_del(&pgt);
        }
 }
index 6a573d21d3cc2ec91ea3d0d0e0ebecda20e4c534..658fa2d3e40c260d051d4299bda4eddb0af5abeb 100644 (file)
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
                return -EINVAL;
        }
 
+       /*
+        * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
+        * i.MX53 channel arbitration locking doesn't seem to work properly.
+        * Allow enabling the lock feature on IPUv3H / i.MX6 only.
+        */
+       if (bursts && ipu->ipu_type != IPUV3H)
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
                if (channel->num == idmac_lock_en_info[i].chnum)
                        break;
index c35f74c830657f26a3e29c34f7cef7e9f864f71a..c860a7997cb59c981557e01021d6a4278b2470ea 100644 (file)
 #define  IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v)                ((v & 0x7) << 1)
 #define  IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v)   ((v & 0x3) << 4)
 
+#define IPU_PRE_STORE_ENG_STATUS                       0x120
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK   0xffff
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT  0
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK   0x3fff
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT  16
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL      (1 << 30)
+#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIELD          (1 << 31)
+
 #define IPU_PRE_STORE_ENG_SIZE                         0x130
 #define  IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v)         ((v & 0xffff) << 0)
 #define  IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v)                ((v & 0xffff) << 16)
@@ -93,6 +101,7 @@ struct ipu_pre {
        dma_addr_t              buffer_paddr;
        void                    *buffer_virt;
        bool                    in_use;
+       unsigned int            safe_window_end;
 };
 
 static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
        u32 active_bpp = info->cpp[0] >> 1;
        u32 val;
 
+       /* calculate safe window for ctrl register updates */
+       pre->safe_window_end = height - 2;
+
        writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
        writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
 
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
 
 void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
 {
+       unsigned long timeout = jiffies + msecs_to_jiffies(5);
+       unsigned short current_yblock;
+       u32 val;
+
        writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+
+       do {
+               if (time_after(jiffies, timeout)) {
+                       dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
+                       return;
+               }
+
+               val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
+               current_yblock =
+                       (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
+                       IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
+       } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
+
        writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
 }
 
index ecc9ea44dc50fc4f5b815564ae54f46884f93a2e..0013ca9f72c83e8f85b3c91aa62aaf619690f234 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_fourcc.h>
 #include <linux/clk.h>
 #include <linux/err.h>
+#include <linux/iopoll.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/module.h>
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
        val = IPU_PRG_REG_UPDATE_REG_UPDATE;
        writel(val, prg->regs + IPU_PRG_REG_UPDATE);
 
+       /* wait for both double buffers to be filled */
+       readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
+                          (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
+                          (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
+                          5, 1000);
+
        clk_disable_unprepare(prg->clk_ipg);
 
        chan->enabled = true;
index 0a3117cc29e70c54b3f9b269889d9b4ccf2fb56c..374301fcbc86791e5005ba3e1b45a35c496174ad 100644 (file)
@@ -281,6 +281,7 @@ config HID_ELECOM
        Support for ELECOM devices:
          - BM084 Bluetooth Mouse
          - DEFT Trackball (Wired and wireless)
+         - HUGE Trackball (Wired and wireless)
 
 config HID_ELO
        tristate "ELO USB 4000/4500 touchscreen"
index 9bc91160819b6eaeac1b0368bdbdfe15a1328422..330ca983828ba6d11feaf5538841a6810d84f5c6 100644 (file)
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ELO)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
index e2c7465df69f3ae74c2cb1979c531b02e2934089..54aeea57d2099bd8c0a1acf41b9d2f0c21c58ee6 100644 (file)
@@ -3,6 +3,7 @@
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
  *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
  *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ *  Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
  */
 
 /*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                break;
        case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
        case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
-               /* The DEFT trackball has eight buttons, but its descriptor only
-                * reports five, disabling the three Fn buttons on the top of
-                * the mouse.
+       case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
+       case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
+               /* The DEFT/HUGE trackball has eight buttons, but its descriptor
+                * only reports five, disabling the three Fn buttons on the top
+                * of the mouse.
                 *
                 * Apply the following diff to the descriptor:
                 *
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                 * End Collection,                     End Collection,
                 */
                if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
-                       hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+                       hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
                        rdesc[13] = 8; /* Button/Variable Report Count */
                        rdesc[21] = 8; /* Button/Variable Usage Maximum */
                        rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index a98919199858717ecd173ba2ef04f014edcb109f..be2e005c3c516c6cb522a68a891d75e48f8f800f 100644 (file)
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
 #define USB_DEVICE_ID_ELECOM_DEFT_WIRED        0x00fe
 #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS     0x00ff
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRED        0x010c
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS     0x010d
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
index 089bad8a9a21d6b35742df8819fabb4da5036730..045b5da9b992873ce74f5d401de34855b93a6f8e 100644 (file)
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
        unsigned int rsize = 0;
        char *rdesc;
        int ret, n;
+       int num_descriptors;
+       size_t offset = offsetof(struct hid_descriptor, desc);
 
        quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
                        le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
                return -ENODEV;
        }
 
+       if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+               dbg_hid("hid descriptor is too short\n");
+               return -EINVAL;
+       }
+
        hid->version = le16_to_cpu(hdesc->bcdHID);
        hid->country = hdesc->bCountryCode;
 
-       for (n = 0; n < hdesc->bNumDescriptors; n++)
+       num_descriptors = min_t(int, hdesc->bNumDescriptors,
+              (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+       for (n = 0; n < num_descriptors; n++)
                if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
                        rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
 
index efd5db743319282922a3fa9994bcee6c618781bb..894b67ac2cae509296cc6c421565df9b7ec48f7f 100644 (file)
@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
                 */
                return;
        }
+       mutex_lock(&vmbus_connection.channel_mutex);
        /*
         * Close all the sub-channels first and then close the
         * primary channel.
@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
                cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
                vmbus_close_internal(cur_channel);
                if (cur_channel->rescind) {
-                       mutex_lock(&vmbus_connection.channel_mutex);
-                       hv_process_channel_removal(cur_channel,
+                       hv_process_channel_removal(
                                           cur_channel->offermsg.child_relid);
-                       mutex_unlock(&vmbus_connection.channel_mutex);
                }
        }
        /*
         * Now close the primary.
         */
        vmbus_close_internal(channel);
+       mutex_unlock(&vmbus_connection.channel_mutex);
 }
 EXPORT_SYMBOL_GPL(vmbus_close);
 
index bcbb031f726313937ecd7bcd6feb2c859f161588..018d2e0f8ec57b1a35c4bbc977e22e0228004d22 100644 (file)
@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
 
 
        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-
+       channel->rescind = true;
        list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
                                msglistentry) {
 
@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
                       true);
 }
 
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
+void hv_process_channel_removal(u32 relid)
 {
        unsigned long flags;
-       struct vmbus_channel *primary_channel;
+       struct vmbus_channel *primary_channel, *channel;
 
-       BUG_ON(!channel->rescind);
        BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
 
+       /*
+        * Make sure channel is valid as we may have raced.
+        */
+       channel = relid2channel(relid);
+       if (!channel)
+               return;
+
+       BUG_ON(!channel->rescind);
        if (channel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(channel->target_cpu,
@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
        if (!fnew) {
                if (channel->sc_creation_callback != NULL)
                        channel->sc_creation_callback(newchannel);
+               newchannel->probe_done = true;
                return;
        }
 
@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 {
        struct vmbus_channel_rescind_offer *rescind;
        struct vmbus_channel *channel;
-       unsigned long flags;
        struct device *dev;
 
        rescind = (struct vmbus_channel_rescind_offer *)hdr;
@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                return;
        }
 
-       spin_lock_irqsave(&channel->lock, flags);
-       channel->rescind = true;
-       spin_unlock_irqrestore(&channel->lock, flags);
-
-       /*
-        * Now that we have posted the rescind state, perform
-        * rescind related cleanup.
-        */
-       vmbus_rescind_cleanup(channel);
-
        /*
         * Now wait for offer handling to complete.
         */
@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        if (channel->device_obj) {
                if (channel->chn_rescind_callback) {
                        channel->chn_rescind_callback(channel);
+                       vmbus_rescind_cleanup(channel);
                        return;
                }
                /*
@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 */
                dev = get_device(&channel->device_obj->device);
                if (dev) {
+                       vmbus_rescind_cleanup(channel);
                        vmbus_device_unregister(channel->device_obj);
                        put_device(dev);
                }
@@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 * 1. Close all sub-channels first
                 * 2. Then close the primary channel.
                 */
+               mutex_lock(&vmbus_connection.channel_mutex);
+               vmbus_rescind_cleanup(channel);
                if (channel->state == CHANNEL_OPEN_STATE) {
                        /*
                         * The channel is currently not open;
                         * it is safe for us to cleanup the channel.
                         */
-                       mutex_lock(&vmbus_connection.channel_mutex);
-                       hv_process_channel_removal(channel,
-                                               channel->offermsg.child_relid);
-                       mutex_unlock(&vmbus_connection.channel_mutex);
+                       hv_process_channel_removal(rescind->child_relid);
                }
+               mutex_unlock(&vmbus_connection.channel_mutex);
        }
 }
 
index a9d49f6f6501ccb1965217abb429d13571f26f12..937801ac2fe0eafb3d148072a604c4e50a159572 100644 (file)
@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
        struct vmbus_channel *channel = hv_dev->channel;
 
        mutex_lock(&vmbus_connection.channel_mutex);
-       hv_process_channel_removal(channel,
-                                  channel->offermsg.child_relid);
+       hv_process_channel_removal(channel->offermsg.child_relid);
        mutex_unlock(&vmbus_connection.channel_mutex);
        kfree(hv_dev);
 
index 54a47b40546f69c7ea0d3dbf033c22c95f106516..f96830ffd9f1c1456965810fad723ab365a7f263 100644 (file)
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
        }
 
        dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
-                       rinfo->sda_gpio, rinfo->scl_gpio);
+                       rinfo->scl_gpio, rinfo->sda_gpio);
 
        rinfo->prepare_recovery = i2c_imx_prepare_recovery;
        rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
        }
 
        /* Request IRQ */
-       ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
+       ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
                                pdev->name, i2c_imx);
        if (ret) {
                dev_err(&pdev->dev, "can't claim irq %d\n", irq);
index 22ffcb73c185f592d8b4e6bdbb1ede45cbeb4951..b51adffa484109efb842bbe75afec593ac8a1731 100644 (file)
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
                        data->word = dma_buffer[0] | (dma_buffer[1] << 8);
                        break;
                case I2C_SMBUS_BLOCK_DATA:
-               case I2C_SMBUS_I2C_BLOCK_DATA:
                        if (desc->rxbytes != dma_buffer[0] + 1)
                                return -EMSGSIZE;
 
                        memcpy(data->block, dma_buffer, desc->rxbytes);
                        break;
+               case I2C_SMBUS_I2C_BLOCK_DATA:
+                       memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+                       data->block[0] = desc->rxbytes;
+                       break;
                }
                return 0;
        }
index 1ebb5e947e0b6625fcf0cda7a71f51e79ac29178..23c2ea2baedc07ee15dfab3e9ea0ce0629587374 100644 (file)
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
        unsigned long fclk_rate = 12000000;
        unsigned long internal_clk = 0;
        struct clk *fclk;
+       int error;
 
        if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
                /*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
                 * do this bit unconditionally.
                 */
                fclk = clk_get(omap->dev, "fck");
+               if (IS_ERR(fclk)) {
+                       error = PTR_ERR(fclk);
+                       dev_err(omap->dev, "could not get fck: %i\n", error);
+
+                       return error;
+               }
+
                fclk_rate = clk_get_rate(fclk);
                clk_put(fclk);
 
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
                else
                        internal_clk = 4000;
                fclk = clk_get(omap->dev, "fck");
+               if (IS_ERR(fclk)) {
+                       error = PTR_ERR(fclk);
+                       dev_err(omap->dev, "could not get fck: %i\n", error);
+
+                       return error;
+               }
                fclk_rate = clk_get_rate(fclk) / 1000;
                clk_put(fclk);
 
index 0ecdb47a23abcbf9691bf809b126d72d6c3a46f8..174579d32e5f39ecdc44d2c230b55fbfb5d073e2 100644 (file)
@@ -85,6 +85,9 @@
 /* SB800 constants */
 #define SB800_PIIX4_SMB_IDX            0xcd6
 
+#define KERNCZ_IMC_IDX                 0x3e
+#define KERNCZ_IMC_DATA                        0x3f
+
 /*
  * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
  * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
 #define SB800_PIIX4_PORT_IDX_ALT       0x2e
 #define SB800_PIIX4_PORT_IDX_SEL       0x2f
 #define SB800_PIIX4_PORT_IDX_MASK      0x06
+#define SB800_PIIX4_PORT_IDX_SHIFT     1
+
+/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ            0x02
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ       0x18
+#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ      3
 
 /* insmod parameters */
 
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
  */
 static DEFINE_MUTEX(piix4_mutex_sb800);
 static u8 piix4_port_sel_sb800;
+static u8 piix4_port_mask_sb800;
+static u8 piix4_port_shift_sb800;
 static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
        " port 0", " port 2", " port 3", " port 4"
 };
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
 
        /* SB800 */
        bool sb800_main;
+       bool notify_imc;
        u8 port;                /* Port number, shifted */
 };
 
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
 
        /* Find which register is used for port selection */
        if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
-               piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+               switch (PIIX4_dev->device) {
+               case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+                       piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+                       piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+                       piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+                       break;
+               case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+               default:
+                       piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+                       piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+                       piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+                       break;
+               }
        } else {
                mutex_lock(&piix4_mutex_sb800);
                outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
                piix4_port_sel_sb800 = (port_sel & 0x01) ?
                                       SB800_PIIX4_PORT_IDX_ALT :
                                       SB800_PIIX4_PORT_IDX;
+               piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+               piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
                mutex_unlock(&piix4_mutex_sb800);
        }
 
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
        return 0;
 }
 
+static uint8_t piix4_imc_read(uint8_t idx)
+{
+       outb_p(idx, KERNCZ_IMC_IDX);
+       return inb_p(KERNCZ_IMC_DATA);
+}
+
+static void piix4_imc_write(uint8_t idx, uint8_t value)
+{
+       outb_p(idx, KERNCZ_IMC_IDX);
+       outb_p(value, KERNCZ_IMC_DATA);
+}
+
+static int piix4_imc_sleep(void)
+{
+       int timeout = MAX_TIMEOUT;
+
+       if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+               return -EBUSY;
+
+       /* clear response register */
+       piix4_imc_write(0x82, 0x00);
+       /* request ownership flag */
+       piix4_imc_write(0x83, 0xB4);
+       /* kick off IMC Mailbox command 96 */
+       piix4_imc_write(0x80, 0x96);
+
+       while (timeout--) {
+               if (piix4_imc_read(0x82) == 0xfa) {
+                       release_region(KERNCZ_IMC_IDX, 2);
+                       return 0;
+               }
+               usleep_range(1000, 2000);
+       }
+
+       release_region(KERNCZ_IMC_IDX, 2);
+       return -ETIMEDOUT;
+}
+
+static void piix4_imc_wakeup(void)
+{
+       int timeout = MAX_TIMEOUT;
+
+       if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+               return;
+
+       /* clear response register */
+       piix4_imc_write(0x82, 0x00);
+       /* release ownership flag */
+       piix4_imc_write(0x83, 0xB5);
+       /* kick off IMC Mailbox command 96 */
+       piix4_imc_write(0x80, 0x96);
+
+       while (timeout--) {
+               if (piix4_imc_read(0x82) == 0xfa)
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       release_region(KERNCZ_IMC_IDX, 2);
+}
+
 /*
  * Handles access to multiple SMBus ports on the SB800.
  * The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
                return -EBUSY;
        }
 
+       /*
+        * Notify the IMC (Integrated Micro Controller) if required.
+        * Among other responsibilities, the IMC is in charge of monitoring
+        * the System fans and temperature sensors, and act accordingly.
+        * All this is done through SMBus and can/will collide
+        * with our transactions if they are long (BLOCK_DATA).
+        * Therefore we need to request the ownership flag during those
+        * transactions.
+        */
+       if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
+               int ret;
+
+               ret = piix4_imc_sleep();
+               switch (ret) {
+               case -EBUSY:
+                       dev_warn(&adap->dev,
+                                "IMC base address index region 0x%x already in use.\n",
+                                KERNCZ_IMC_IDX);
+                       break;
+               case -ETIMEDOUT:
+                       dev_warn(&adap->dev,
+                                "Failed to communicate with the IMC.\n");
+                       break;
+               default:
+                       break;
+               }
+
+               /* If IMC communication fails do not retry */
+               if (ret) {
+                       dev_warn(&adap->dev,
+                                "Continuing without IMC notification.\n");
+                       adapdata->notify_imc = false;
+               }
+       }
+
        outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
 
        port = adapdata->port;
-       if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
-               outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
+       if ((smba_en_lo & piix4_port_mask_sb800) != port)
+               outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
                       SB800_PIIX4_SMB_IDX + 1);
 
        retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
        /* Release the semaphore */
        outb_p(smbslvcnt | 0x20, SMBSLVCNT);
 
+       if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
+               piix4_imc_wakeup();
+
        mutex_unlock(&piix4_mutex_sb800);
 
        return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
 static struct i2c_adapter *piix4_aux_adapter;
 
 static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
-                            bool sb800_main, u8 port,
+                            bool sb800_main, u8 port, bool notify_imc,
                             const char *name, struct i2c_adapter **padap)
 {
        struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
 
        adapdata->smba = smba;
        adapdata->sb800_main = sb800_main;
-       adapdata->port = port << 1;
+       adapdata->port = port << piix4_port_shift_sb800;
+       adapdata->notify_imc = notify_imc;
 
        /* set up the sysfs linkage to our parent device */
        adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
        return 0;
 }
 
-static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
+static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
+                                   bool notify_imc)
 {
        struct i2c_piix4_adapdata *adapdata;
        int port;
        int retval;
 
        for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
-               retval = piix4_add_adapter(dev, smba, true, port,
+               retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
                                           piix4_main_port_names_sb800[port],
                                           &piix4_main_adapters[port]);
                if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
             dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
             dev->revision >= 0x40) ||
            dev->vendor == PCI_VENDOR_ID_AMD) {
+               bool notify_imc = false;
                is_sb800 = true;
 
                if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return -EBUSY;
                }
 
+               if (dev->vendor == PCI_VENDOR_ID_AMD &&
+                   dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
+                       u8 imc;
+
+                       /*
+                        * Detect if IMC is active or not, this method is
+                        * described on coreboot's AMD IMC notes
+                        */
+                       pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
+                                                0x40, &imc);
+                       if (imc & 0x80)
+                               notify_imc = true;
+               }
+
                /* base address location etc changed in SB800 */
                retval = piix4_setup_sb800(dev, id, 0);
                if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                 * Try to register multiplexed main SMBus adapter,
                 * give up if we can't
                 */
-               retval = piix4_add_adapters_sb800(dev, retval);
+               retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
                if (retval < 0) {
                        release_region(SB800_PIIX4_SMB_IDX, 2);
                        return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return retval;
 
                /* Try to register main SMBus adapter, give up if we can't */
-               retval = piix4_add_adapter(dev, retval, false, 0, "",
+               retval = piix4_add_adapter(dev, retval, false, 0, false, "",
                                           &piix4_main_adapters[0]);
                if (retval < 0)
                        return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (retval > 0) {
                /* Try to add the aux adapter if it exists,
                 * piix4_add_adapter will clean up if this fails */
-               piix4_add_adapter(dev, retval, false, 0,
+               piix4_add_adapter(dev, retval, false, 0, false,
                                  is_sb800 ? piix4_aux_port_name_sb800 : "",
                                  &piix4_aux_adapter);
        }
index 51f8215877f552ed168424107078ac786febaf3b..8e8874d23717ab5120c87f180146562be3e03ef6 100644 (file)
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
 
 int __init amd_iommu_init_dma_ops(void)
 {
-       swiotlb        = iommu_pass_through ? 1 : 0;
+       swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
        iommu_detected = 1;
 
        /*
         * In case we don't initialize SWIOTLB (actually the common case
-        * when AMD IOMMU is enabled), make sure there are global
-        * dma_ops set as a fall-back for devices not handled by this
-        * driver (for example non-PCI devices).
+        * when AMD IOMMU is enabled and SME is not active), make sure there
+        * are global dma_ops set as a fall-back for devices not handled by
+        * this driver (for example non-PCI devices). When SME is active,
+        * make sure that swiotlb variable remains set so the global dma_ops
+        * continue to be SWIOTLB.
         */
        if (!swiotlb)
                dma_ops = &nommu_dma_ops;
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
        mutex_unlock(&domain->api_lock);
 
        domain_flush_tlb_pde(domain);
+       domain_flush_complete(domain);
 
        return unmap_size;
 }
index f596fcc3289830f5a74ec93d438aa626bebf42fe..25c2c75f5332efe3e98d7e1c290db71d515364f5 100644 (file)
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
                                pm_runtime_force_resume)
 };
 
-static const struct of_device_id sysmmu_of_match[] __initconst = {
+static const struct of_device_id sysmmu_of_match[] = {
        { .compatible   = "samsung,exynos-sysmmu", },
        { },
 };
index eed6c397d8400b0a25c57feb6ef23dc49eac71df..f8a808d45034e048f9b0bc2c764c51d95e22bdc9 100644 (file)
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
         */
        switch (msg->msg[1]) {
        case CEC_MSG_GET_CEC_VERSION:
-       case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
        case CEC_MSG_ABORT:
        case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
-       case CEC_MSG_GIVE_PHYSICAL_ADDR:
        case CEC_MSG_GIVE_OSD_NAME:
+               /*
+                * These messages reply with a directed message, so ignore if
+                * the initiator is Unregistered.
+                */
+               if (!adap->passthrough && from_unregistered)
+                       return 0;
+               /* Fall through */
+       case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
        case CEC_MSG_GIVE_FEATURES:
+       case CEC_MSG_GIVE_PHYSICAL_ADDR:
                /*
                 * Skip processing these messages if the passthrough mode
                 * is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
                if (adap->passthrough)
                        goto skip_processing;
                /* Ignore if addressing is wrong */
-               if (is_broadcast || from_unregistered)
+               if (is_broadcast)
                        return 0;
                break;
 
index 2fcba16161685888164565bb24e0e917024092eb..9139d01ba7ed6c9470896dea8500a433d9c05240 100644 (file)
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
 static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
                                        void (*release)(struct dvb_frontend *fe));
 
-static void dvb_frontend_free(struct kref *ref)
+static void __dvb_frontend_free(struct dvb_frontend *fe)
 {
-       struct dvb_frontend *fe =
-               container_of(ref, struct dvb_frontend, refcount);
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
 
+       if (!fepriv)
+               return;
+
        dvb_free_device(fepriv->dvbdev);
 
        dvb_frontend_invoke_release(fe, fe->ops.release);
 
        kfree(fepriv);
+       fe->frontend_priv = NULL;
+}
+
+static void dvb_frontend_free(struct kref *ref)
+{
+       struct dvb_frontend *fe =
+               container_of(ref, struct dvb_frontend, refcount);
+
+       __dvb_frontend_free(fe);
 }
 
 static void dvb_frontend_put(struct dvb_frontend *fe)
 {
-       kref_put(&fe->refcount, dvb_frontend_free);
+       /*
+        * Check if the frontend was registered, as otherwise
+        * kref was not initialized yet.
+        */
+       if (fe->frontend_priv)
+               kref_put(&fe->refcount, dvb_frontend_free);
+       else
+               __dvb_frontend_free(fe);
 }
 
 static void dvb_frontend_get(struct dvb_frontend *fe)
index 224283fe100a8fe6f6a3f17f52cfbff2a723b15e..4d086a7248e9b2508905cd026038793dc7882241 100644 (file)
@@ -55,29 +55,57 @@ struct dib3000mc_state {
 
 static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
 {
-       u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
-       u8 rb[2];
        struct i2c_msg msg[2] = {
-               { .addr = state->i2c_addr >> 1, .flags = 0,        .buf = wb, .len = 2 },
-               { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+               { .addr = state->i2c_addr >> 1, .flags = 0,        .len = 2 },
+               { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
        };
+       u16 word;
+       u8 *b;
+
+       b = kmalloc(4, GFP_KERNEL);
+       if (!b)
+               return 0;
+
+       b[0] = (reg >> 8) | 0x80;
+       b[1] = reg;
+       b[2] = 0;
+       b[3] = 0;
+
+       msg[0].buf = b;
+       msg[1].buf = b + 2;
 
        if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
                dprintk("i2c read error on %d\n",reg);
 
-       return (rb[0] << 8) | rb[1];
+       word = (b[2] << 8) | b[3];
+       kfree(b);
+
+       return word;
 }
 
 static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
 {
-       u8 b[4] = {
-               (reg >> 8) & 0xff, reg & 0xff,
-               (val >> 8) & 0xff, val & 0xff,
-       };
        struct i2c_msg msg = {
-               .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+               .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
        };
-       return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+       int rc;
+       u8 *b;
+
+       b = kmalloc(4, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       b[0] = reg >> 8;
+       b[1] = reg;
+       b[2] = val >> 8;
+       b[3] = val;
+
+       msg.buf = b;
+
+       rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+       kfree(b);
+
+       return rc;
 }
 
 static int dib3000mc_identify(struct dib3000mc_state *state)
index 7bec3e028beec10e188fea4d9f53cc40556f8ddf..5553b89b804e7d4219d6c767b96fbdb5006cce1b 100644 (file)
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                                    struct i2c_adapter *i2c,
                                    unsigned int pll_desc_id)
 {
-       u8 b1 [] = { 0 };
-       struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
-                              .buf = b1, .len = 1 };
+       u8 *b1;
+       struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
        struct dvb_pll_priv *priv = NULL;
        int ret;
        const struct dvb_pll_desc *desc;
 
+       b1 = kmalloc(1, GFP_KERNEL);
+       if (!b1)
+               return NULL;
+
+       b1[0] = 0;
+       msg.buf = b1;
+
        if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
            (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
                pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                        fe->ops.i2c_gate_ctrl(fe, 1);
 
                ret = i2c_transfer (i2c, &msg, 1);
-               if (ret != 1)
+               if (ret != 1) {
+                       kfree(b1);
                        return NULL;
+               }
                if (fe->ops.i2c_gate_ctrl)
                             fe->ops.i2c_gate_ctrl(fe, 0);
        }
 
        priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
-       if (priv == NULL)
+       if (!priv) {
+               kfree(b1);
                return NULL;
+       }
 
        priv->pll_i2c_address = pll_addr;
        priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                                "insmod option" : "autodetected");
        }
 
+       kfree(b1);
+
        return fe;
 }
 EXPORT_SYMBOL(dvb_pll_attach);
index 7e7cc49b867400093ba038baf9e14773970f15f4..3c4f7fa7b9d8ea06e7b1455ce3e0172d17322483 100644 (file)
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
 
 config VIDEO_QCOM_CAMSS
        tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
-       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
        depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
        select VIDEOBUF2_DMA_SG
        select V4L2_FWNODE
index b21b3c2dc77f2bb12f617f198a928a58cc17b592..b22d2dfcd3c29ec85c474948f67d0805571db3ab 100644 (file)
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
  *
  * Return -EINVAL or zero on success
  */
-int vfe_set_selection(struct v4l2_subdev *sd,
+static int vfe_set_selection(struct v4l2_subdev *sd,
                             struct v4l2_subdev_pad_config *cfg,
                             struct v4l2_subdev_selection *sel)
 {
index 68933d20806338629a89bdde9a5b05219ef2f5ac..9b2a401a4891c49e1388783cbf4111bf6df26af6 100644 (file)
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
                        hfi_session_abort(inst);
 
                load_scale_clocks(core);
+               INIT_LIST_HEAD(&inst->registeredbufs);
        }
 
        venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
diff --git